summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/libxed.py
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2023-09-17 09:06:05 -0400
committerTrond Myklebust <trond.myklebust@hammerspace.com>2023-10-22 19:47:56 -0400
commit4b09ca1508a60be30b2e3940264e93d7aeb5c97e (patch)
tree3468904a8bb22626a96f789dea7a3b61be844acd /tools/perf/scripts/python/libxed.py
parent6e7434abcd07e8beb67fdc4af6207ae0490d5274 (diff)
SUNRPC: ECONNRESET might require a rebind
If connect() is returning ECONNRESET, it usually means that nothing is listening on that port. If so, a rebind might be required in order to obtain the new port on which the RPC service is listening. Fixes: fd01b2597941 ("SUNRPC: ECONNREFUSED should cause a rebind.") Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'tools/perf/scripts/python/libxed.py')
0 files changed, 0 insertions, 0 deletions
'reload'/>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/crypto/chacha20-s390/Makefile12
-rw-r--r--tools/testing/crypto/chacha20-s390/run-tests.sh34
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c369
-rw-r--r--tools/testing/cxl/Kbuild72
-rw-r--r--tools/testing/cxl/config_check.c18
-rw-r--r--tools/testing/cxl/cxl_acpi_test.c6
-rw-r--r--tools/testing/cxl/cxl_core_exports.c29
-rw-r--r--tools/testing/cxl/cxl_core_test.c6
-rw-r--r--tools/testing/cxl/cxl_mem_test.c6
-rw-r--r--tools/testing/cxl/cxl_pmem_test.c6
-rw-r--r--tools/testing/cxl/cxl_port_test.c6
-rw-r--r--tools/testing/cxl/exports.h13
-rw-r--r--tools/testing/cxl/mock_acpi.c35
-rw-r--r--tools/testing/cxl/test/Kbuild13
-rw-r--r--tools/testing/cxl/test/cxl.c1628
-rw-r--r--tools/testing/cxl/test/cxl_translate.c445
-rw-r--r--tools/testing/cxl/test/mem.c1895
-rw-r--r--tools/testing/cxl/test/mock.c279
-rw-r--r--tools/testing/cxl/test/mock.h35
-rw-r--r--tools/testing/cxl/watermark.h25
-rwxr-xr-x[-rw-r--r--]tools/testing/fault-injection/failcmd.sh12
-rwxr-xr-xtools/testing/ktest/config-bisect.pl4
-rw-r--r--tools/testing/ktest/examples/bootconfigs/boottrace.bconf59
-rw-r--r--tools/testing/ktest/examples/bootconfigs/config-bootconfig1
-rw-r--r--tools/testing/ktest/examples/bootconfigs/functiongraph.bconf15
-rw-r--r--tools/testing/ktest/examples/bootconfigs/tracing.bconf33
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-boottrace.sh84
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh61
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-tracing.sh72
-rw-r--r--tools/testing/ktest/examples/include/bootconfig.conf69
-rw-r--r--tools/testing/ktest/examples/include/defaults.conf2
-rw-r--r--tools/testing/ktest/examples/kvm.conf1
-rw-r--r--tools/testing/ktest/examples/vmware.conf137
-rwxr-xr-xtools/testing/ktest/ktest.pl807
-rw-r--r--tools/testing/ktest/sample.conf15
-rw-r--r--tools/testing/kunit/configs/all_tests.config56
-rw-r--r--tools/testing/kunit/configs/arch_uml.config7
-rw-r--r--tools/testing/kunit/configs/broken_on_uml.config42
-rw-r--r--tools/testing/kunit/configs/coverage_uml.config11
-rw-r--r--tools/testing/kunit/configs/default.config3
-rwxr-xr-xtools/testing/kunit/kunit.py700
-rw-r--r--tools/testing/kunit/kunit_config.py131
-rw-r--r--tools/testing/kunit/kunit_json.py104
-rw-r--r--tools/testing/kunit/kunit_kernel.py396
-rw-r--r--tools/testing/kunit/kunit_parser.py1130
-rw-r--r--tools/testing/kunit/kunit_printer.py54
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py904
-rw-r--r--tools/testing/kunit/mypy.ini6
-rw-r--r--tools/testing/kunit/qemu_config.py20
-rw-r--r--tools/testing/kunit/qemu_configs/alpha.py10
-rw-r--r--tools/testing/kunit/qemu_configs/arm.py13
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py12
-rw-r--r--tools/testing/kunit/qemu_configs/i386.py10
-rw-r--r--tools/testing/kunit/qemu_configs/loongarch.py21
-rw-r--r--tools/testing/kunit/qemu_configs/m68k.py10
-rw-r--r--tools/testing/kunit/qemu_configs/mips.py18
-rw-r--r--tools/testing/kunit/qemu_configs/mips64.py19
-rw-r--r--tools/testing/kunit/qemu_configs/mips64el.py19
-rw-r--r--tools/testing/kunit/qemu_configs/mipsel.py18
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py13
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc32.py17
-rw-r--r--tools/testing/kunit/qemu_configs/powerpcle.py14
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py28
-rw-r--r--tools/testing/kunit/qemu_configs/riscv32.py17
-rw-r--r--tools/testing/kunit/qemu_configs/s390.py14
-rw-r--r--tools/testing/kunit/qemu_configs/sh.py19
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py13
-rw-r--r--tools/testing/kunit/qemu_configs/sparc64.py16
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py12
-rwxr-xr-xtools/testing/kunit/run_checks.py81
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log34
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-crash.log70
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-kselftest.log15
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log31
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log7
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log (renamed from tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log)0
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log2
-rw-r--r--tools/testing/kunit/test_data/test_parse_attributes.log9
-rw-r--r--tools/testing/kunit/test_data/test_parse_ktap_output.log8
-rw-r--r--tools/testing/kunit/test_data/test_parse_subtest_header.log7
-rw-r--r--tools/testing/kunit/test_data/test_skip_all_tests.log15
-rw-r--r--tools/testing/kunit/test_data/test_skip_tests.log15
-rw-r--r--tools/testing/kunit/test_data/test_strip_hyphen.log16
-rw-r--r--tools/testing/memblock/.gitignore5
-rw-r--r--tools/testing/memblock/Makefile55
-rw-r--r--tools/testing/memblock/README118
-rw-r--r--tools/testing/memblock/TODO5
-rw-r--r--tools/testing/memblock/asm/dma.h5
-rw-r--r--tools/testing/memblock/internal.h33
-rw-r--r--tools/testing/memblock/lib/slab.c9
-rw-r--r--tools/testing/memblock/linux/kernel.h14
-rw-r--r--tools/testing/memblock/linux/kmemleak.h18
-rw-r--r--tools/testing/memblock/linux/memory_hotplug.h17
-rw-r--r--tools/testing/memblock/linux/mmzone.h38
-rw-r--r--tools/testing/memblock/linux/mutex.h14
-rw-r--r--tools/testing/memblock/linux/printk.h25
-rw-r--r--tools/testing/memblock/main.c19
-rw-r--r--tools/testing/memblock/mmzone.c20
-rw-r--r--tools/testing/memblock/scripts/Makefile.include19
-rw-r--r--tools/testing/memblock/tests/alloc_api.c884
-rw-r--r--tools/testing/memblock/tests/alloc_api.h9
-rw-r--r--tools/testing/memblock/tests/alloc_exact_nid_api.c1113
-rw-r--r--tools/testing/memblock/tests/alloc_exact_nid_api.h25
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.c414
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.h9
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.c2733
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.h26
-rw-r--r--tools/testing/memblock/tests/basic_api.c2551
-rw-r--r--tools/testing/memblock/tests/basic_api.h9
-rw-r--r--tools/testing/memblock/tests/common.c209
-rw-r--r--tools/testing/memblock/tests/common.h177
-rw-r--r--tools/testing/nvdimm/Kbuild15
-rw-r--r--tools/testing/nvdimm/config_check.c4
-rw-r--r--tools/testing/nvdimm/dax_pmem_compat_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_core_test.c8
-rw-r--r--tools/testing/nvdimm/dimm_devs.c30
-rw-r--r--tools/testing/nvdimm/pmem-dax.c8
-rw-r--r--tools/testing/nvdimm/test/Kbuild7
-rw-r--r--tools/testing/nvdimm/test/iomap.c69
-rw-r--r--tools/testing/nvdimm/test/ndtest.c1003
-rw-r--r--tools/testing/nvdimm/test/ndtest.h78
-rw-r--r--tools/testing/nvdimm/test/nfit.c110
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h28
-rw-r--r--tools/testing/radix-tree/.gitignore4
-rw-r--r--tools/testing/radix-tree/Makefile61
-rw-r--r--tools/testing/radix-tree/bitmap.c23
-rw-r--r--tools/testing/radix-tree/idr-test.c38
-rw-r--r--tools/testing/radix-tree/linux.c120
-rw-r--r--tools/testing/radix-tree/linux/gfp.h33
-rw-r--r--tools/testing/radix-tree/linux/idr.h1
-rw-r--r--tools/testing/radix-tree/linux/init.h1
-rw-r--r--tools/testing/radix-tree/linux/slab.h27
-rw-r--r--tools/testing/radix-tree/maple.c36227
-rw-r--r--tools/testing/radix-tree/multiorder.c74
-rw-r--r--tools/testing/radix-tree/regression1.c2
-rw-r--r--tools/testing/radix-tree/xarray.c11
-rw-r--r--tools/testing/rbtree/Makefile33
-rw-r--r--tools/testing/rbtree/interval_tree_test.c58
-rw-r--r--tools/testing/rbtree/rbtree_test.c48
-rw-r--r--tools/testing/rbtree/test.h4
-rw-r--r--tools/testing/scatterlist/linux/mm.h5
-rw-r--r--tools/testing/scatterlist/main.c39
-rw-r--r--tools/testing/selftests/.gitignore2
-rw-r--r--tools/testing/selftests/Makefile182
-rw-r--r--tools/testing/selftests/acct/.gitignore3
-rw-r--r--tools/testing/selftests/acct/Makefile5
-rw-r--r--tools/testing/selftests/acct/acct_syscall.c78
-rw-r--r--tools/testing/selftests/alsa/.gitignore5
-rw-r--r--tools/testing/selftests/alsa/Makefile31
-rw-r--r--tools/testing/selftests/alsa/alsa-local.h37
-rw-r--r--tools/testing/selftests/alsa/conf.c475
-rw-r--r--tools/testing/selftests/alsa/conf.d/Lenovo_ThinkPad_P1_Gen2.conf84
-rw-r--r--tools/testing/selftests/alsa/global-timer.c87
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c1145
-rw-r--r--tools/testing/selftests/alsa/pcm-test.c671
-rw-r--r--tools/testing/selftests/alsa/pcm-test.conf63
-rw-r--r--tools/testing/selftests/alsa/test-pcmtest-driver.c330
-rw-r--r--tools/testing/selftests/alsa/utimer-test.c165
-rw-r--r--tools/testing/selftests/amd-pstate/Makefile18
-rwxr-xr-xtools/testing/selftests/amd-pstate/basic.sh38
-rw-r--r--tools/testing/selftests/amd-pstate/config1
-rwxr-xr-xtools/testing/selftests/amd-pstate/gitsource.sh359
-rwxr-xr-xtools/testing/selftests/amd-pstate/run.sh396
-rwxr-xr-xtools/testing/selftests/amd-pstate/tbench.sh339
-rw-r--r--tools/testing/selftests/arm64/Makefile17
-rw-r--r--tools/testing/selftests/arm64/abi/.gitignore4
-rw-r--r--tools/testing/selftests/arm64/abi/Makefile15
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c1295
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c271
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi-asm.S360
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.c565
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.h15
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c246
-rw-r--r--tools/testing/selftests/arm64/bti/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/Makefile56
-rw-r--r--tools/testing/selftests/arm64/bti/assembler.h79
-rw-r--r--tools/testing/selftests/arm64/bti/btitest.h23
-rw-r--r--tools/testing/selftests/arm64/bti/signal.c37
-rw-r--r--tools/testing/selftests/arm64/bti/signal.h21
-rw-r--r--tools/testing/selftests/arm64/bti/start.S14
-rw-r--r--tools/testing/selftests/arm64/bti/syscall.S23
-rw-r--r--tools/testing/selftests/arm64/bti/system.c20
-rw-r--r--tools/testing/selftests/arm64/bti/system.h28
-rw-r--r--tools/testing/selftests/arm64/bti/test.c229
-rw-r--r--tools/testing/selftests/arm64/bti/teststubs.S39
-rw-r--r--tools/testing/selftests/arm64/bti/trampoline.S29
-rw-r--r--tools/testing/selftests/arm64/fp/.gitignore13
-rw-r--r--tools/testing/selftests/arm64/fp/Makefile55
-rw-r--r--tools/testing/selftests/arm64/fp/TODO7
-rw-r--r--tools/testing/selftests/arm64/fp/asm-offsets.h1
-rw-r--r--tools/testing/selftests/arm64/fp/asm-utils.S172
-rw-r--r--tools/testing/selftests/arm64/fp/assembler.h26
-rw-r--r--tools/testing/selftests/arm64/fp/fp-pidbench.S70
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace-asm.S292
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace.c1692
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace.h25
-rw-r--r--tools/testing/selftests/arm64/fp/fp-stress.c660
-rw-r--r--tools/testing/selftests/arm64/fp/fpsimd-test.S224
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c326
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl-sme.c14
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl-sve.c14
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl.S20
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl.h9
-rw-r--r--tools/testing/selftests/arm64/fp/sme-inst.h73
-rw-r--r--tools/testing/selftests/arm64/fp/ssve-stress59
-rw-r--r--tools/testing/selftests/arm64/fp/sve-probe-vls.c9
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace-asm.S33
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c891
-rw-r--r--tools/testing/selftests/arm64/fp/sve-test.S294
-rw-r--r--tools/testing/selftests/arm64/fp/vec-syscfg.c796
-rw-r--r--tools/testing/selftests/arm64/fp/vlset.c10
-rw-r--r--tools/testing/selftests/arm64/fp/za-fork-asm.S61
-rw-r--r--tools/testing/selftests/arm64/fp/za-fork.c100
-rw-r--r--tools/testing/selftests/arm64/fp/za-ptrace.c368
-rw-r--r--tools/testing/selftests/arm64/fp/za-stress59
-rw-r--r--tools/testing/selftests/arm64/fp/za-test.S400
-rw-r--r--tools/testing/selftests/arm64/fp/zt-ptrace.c366
-rw-r--r--tools/testing/selftests/arm64/fp/zt-test.S318
-rw-r--r--tools/testing/selftests/arm64/gcs/.gitignore7
-rw-r--r--tools/testing/selftests/arm64/gcs/Makefile30
-rw-r--r--tools/testing/selftests/arm64/gcs/asm-offsets.h (renamed from tools/testing/radix-tree/linux/compiler_types.h)0
-rw-r--r--tools/testing/selftests/arm64/gcs/basic-gcs.c420
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-locking.c199
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-stress-thread.S311
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-stress.c530
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-util.h100
-rw-r--r--tools/testing/selftests/arm64/gcs/gcspushm.S96
-rw-r--r--tools/testing/selftests/arm64/gcs/gcsstr.S99
-rw-r--r--tools/testing/selftests/arm64/gcs/libc-gcs.c728
-rw-r--r--tools/testing/selftests/arm64/mte/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/mte/Makefile28
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c30
-rw-r--r--tools/testing/selftests/arm64/mte/check_child_memory.c16
-rw-r--r--tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c11
-rw-r--r--tools/testing/selftests/arm64/mte/check_hugetlb_options.c296
-rw-r--r--tools/testing/selftests/arm64/mte/check_ksm_options.c11
-rw-r--r--tools/testing/selftests/arm64/mte/check_mmap_options.c905
-rw-r--r--tools/testing/selftests/arm64/mte/check_prctl.c132
-rw-r--r--tools/testing/selftests/arm64/mte/check_tags_inclusion.c62
-rw-r--r--tools/testing/selftests/arm64/mte/check_user_mem.c200
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.c180
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.h28
-rw-r--r--tools/testing/selftests/arm64/mte/mte_def.h8
-rw-r--r--tools/testing/selftests/arm64/mte/mte_helper.S2
-rw-r--r--tools/testing/selftests/arm64/pauth/Makefile6
-rw-r--r--tools/testing/selftests/arm64/pauth/exec_target.c7
-rw-r--r--tools/testing/selftests/arm64/pauth/pac.c17
-rw-r--r--tools/testing/selftests/arm64/signal/.gitignore9
-rw-r--r--tools/testing/selftests/arm64/signal/Makefile11
-rw-r--r--tools/testing/selftests/arm64/signal/sve_helpers.c56
-rw-r--r--tools/testing/selftests/arm64/signal/sve_helpers.h34
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.c19
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.h19
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.c161
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.h73
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/TODO1
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c76
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c76
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fpmr_siginfo.c82
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/gcs_exception_fault.c62
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/gcs_frame.c88
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/gcs_write_fault.c67
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/poe_siginfo.c86
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c38
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c45
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c36
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_vl.c68
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_regs.c117
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c141
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sve_regs.c105
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sve_vl.c68
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.c209
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.h41
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c86
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c90
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_no_regs.c103
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_regs.c123
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c51
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/zt_regs.c86
-rw-r--r--tools/testing/selftests/arm64/tags/Makefile3
-rwxr-xr-xtools/testing/selftests/arm64/tags/run_tags_test.sh12
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c12
-rw-r--r--tools/testing/selftests/bpf/.gitignore40
-rw-r--r--tools/testing/selftests/bpf/DENYLIST7
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.riscv643
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x4
-rw-r--r--tools/testing/selftests/bpf/Makefile807
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs77
-rw-r--r--tools/testing/selftests/bpf/README.rst244
-rw-r--r--tools/testing/selftests/bpf/autoconf_helper.h9
-rw-r--r--tools/testing/selftests/bpf/bench.c412
-rw-r--r--tools/testing/selftests/bpf/bench.h40
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c477
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_crypto.c185
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c89
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c277
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_loop.c100
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_count.c16
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_htab_mem.c350
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage.c282
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c258
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c263
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c555
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_rename.c33
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c101
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_sockmap.c599
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_strncmp.c156
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c611
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh45
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh11
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh15
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_htab_mem.sh40
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage.sh24
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh11
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_rename.sh2
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh58
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_strncmp.sh12
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_trigger.sh22
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_uprobes.sh9
-rw-r--r--tools/testing/selftests/bpf/benchs/run_common.sh92
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_alloc.h67
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h75
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_htab.h100
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_list.h90
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_strsearch.h128
-rw-r--r--tools/testing/selftests/bpf/bpf_atomic.h140
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h656
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h98
-rw-r--r--tools/testing/selftests/bpf/bpf_legacy.h28
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/bpf_sockopt_helpers.h21
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h233
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/Makefile20
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h36
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c52
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h14
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h34
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.c53
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.c67
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.h20
-rw-r--r--tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h25
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c554
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h37
-rw-r--r--tools/testing/selftests/bpf/cgroup_tcp_skb.h35
-rw-r--r--tools/testing/selftests/bpf/config161
-rw-r--r--tools/testing/selftests/bpf/config.aarch64154
-rw-r--r--tools/testing/selftests/bpf/config.ppc64el92
-rw-r--r--tools/testing/selftests/bpf/config.riscv6483
-rw-r--r--tools/testing/selftests/bpf/config.s390x125
-rw-r--r--tools/testing/selftests/bpf/config.vm15
-rw-r--r--tools/testing/selftests/bpf/config.x86_64227
l---------tools/testing/selftests/bpf/disasm.c1
l---------tools/testing/selftests/bpf/disasm.h1
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.c69
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.h12
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c18
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h13
-rwxr-xr-xtools/testing/selftests/bpf/generate_udp_fragments.py90
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c139
-rw-r--r--tools/testing/selftests/bpf/gnu/stubs.h2
-rwxr-xr-xtools/testing/selftests/bpf/ima_setup.sh35
-rw-r--r--tools/testing/selftests/bpf/io_helpers.c21
-rw-r--r--tools/testing/selftests/bpf/io_helpers.h7
-rw-r--r--tools/testing/selftests/bpf/ip_check_defrag_frags.h57
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.c245
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.h10
l---------tools/testing/selftests/bpf/json_writer.c1
l---------tools/testing/selftests/bpf/json_writer.h1
-rw-r--r--tools/testing/selftests/bpf/liburandom_read.map15
-rw-r--r--tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c118
-rw-r--r--tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c17
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c (renamed from tools/testing/selftests/bpf/test_lpm_map.c)490
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c155
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c109
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c278
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c488
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c90
-rw-r--r--tools/testing/selftests/bpf/map_tests/task_storage_map.c128
-rw-r--r--tools/testing/selftests/bpf/netcnt_common.h38
-rw-r--r--tools/testing/selftests/bpf/netlink_helpers.c358
-rw-r--r--tools/testing/selftests/bpf/netlink_helpers.h46
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c1106
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h251
-rw-r--r--tools/testing/selftests/bpf/prog_tests/access_variable_array.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c296
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_atomics.c268
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_htab.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_list.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c123
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_strsearch.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c181
-rw-r--r--tools/testing/selftests/bpf/prog_tests/assign_reuse.c199
-rw-r--r--tools/testing/selftests/bpf/prog_tests/async_stack_depth.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomic_bounds.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c204
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c468
-rw-r--r--tools/testing/selftests/bpf/prog_tests/autoattach.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bind_perm.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c224
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c763
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_gotox.c292
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c504
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c1110
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c226
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c100
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_loop.c207
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c206
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c198
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c230
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c269
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c231
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c695
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c278
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c1929
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c267
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c692
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c990
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_endian.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_field_iter.c161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_module.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c299
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_split.c145
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_sysfs.c81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_tag.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c239
-rw-r--r--tools/testing/selftests/bpf/prog_tests/build_id.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cb_refs.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c163
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_dev.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c549
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c339
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter.c333
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_link.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c617
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_storage.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c344
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c180
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c361
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c227
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c192
-rw-r--r--tools/testing/selftests/bpf/prog_tests/compute_live_registers.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_force_port.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_ping.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_autosize.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_extern.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_kern.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_read_macros.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c444
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_retro.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/crypto_sanity.c196
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c821
-rw-r--r--tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c176
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/deny_namespace.c102
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c285
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c191
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c193
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c151
-rw-r--r--tools/testing/selftests/bpf/prog_tests/enable_stats.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exceptions.c409
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exhandler.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_array.c441
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c192
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c407
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c377
-rw-r--r--tools/testing/selftests/bpf/prog_tests/file_reader.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c652
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c127
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c437
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c797
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c253
-rw-r--r--tools/testing/selftests/bpf/prog_tests/free_timer.c169
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c291
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c130
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_args_test.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data_init.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_func_args.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_func_dead_code.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_map_resize.c235
-rw-r--r--tools/testing/selftests/bpf/prog_tests/hashmap.c199
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_reuse.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_update.c145
-rw-r--r--tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c283
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c319
-rw-r--r--tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kernel_flag.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c324
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c119
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c127
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c609
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/legacy_printk.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c87
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_probes.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c225
-rw-r--r--tools/testing/selftests/bpf/prog_tests/link_pinning.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c813
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_maps.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_vars.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c147
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c278
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c181
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c291
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_key.c112
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lru_bug.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c319
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h109
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c540
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c331
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c264
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c176
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_btf.c98
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_excl.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_in_map.c271
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c163
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_ops.c162
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_ptr.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/metadata.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c559
-rw-r--r--tools/testing/selftests/bpf/prog_tests/missed.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/modify_return.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c134
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c587
-rw-r--r--tools/testing/selftests/bpf/prog_tests/nested_trust.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/net_timestamping.c239
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c124
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netns_cookie.c102
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c244
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c94
-rw-r--r--tools/testing/selftests/bpf/prog_tests/percpu_alloc.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_skip.c137
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning_htab.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/preempt_lock.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prepare.c99
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pro_epilogue.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_array_init.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_opts.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c181
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_null.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c195
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rdonly_maps.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/read_vsyscall.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursion.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursive_attach.c218
-rw-r--r--tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c102
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c2161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c356
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/section_names.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c185
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c201
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/setget_sockopt.c245
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sha256.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c292
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c298
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_helpers.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c129
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf_btf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_addr.c2660
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_create.c348
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_destroy.c221
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_fields.c70
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c994
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_post_bind.c426
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_cookie.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_helpers.h473
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c922
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h83
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c452
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c609
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_redir.c465
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_strp.c454
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c291
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_multi.c120
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c174
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stack_var_off.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/static_linked.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stream.c108
-rw-r--r--tools/testing/selftests/bpf/prog_tests/string_kfuncs.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c106
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs_extable.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subskeleton.c152
-rw-r--r--tools/testing/selftests/bpf/prog_tests/summarization.c144
-rw-r--r--tools/testing/selftests/bpf/prog_tests/syscall.c81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c1311
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_data.h386
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c518
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_pt_regs.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_work_stress.c130
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c429
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_change_tail.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_helpers.h50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_links.c1962
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_netkit.c870
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c2814
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c1303
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c119
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c390
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c98
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_btf_ext.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_csum_diff.c408
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c211
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ima.c186
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_local_storage.c172
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_overhead.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_profiler.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c317
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_sysctl.c (renamed from tools/testing/selftests/bpf/test_sysctl.c)68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_local_data.c297
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_work.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tc_edt.c145
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c714
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c1074
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_veristat.c261
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c599
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.c2596
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.h298
-rw-r--r--tools/testing/selftests/bpf/prog_tests/time_tai.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c135
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_lockup.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_mim.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c1197
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_ext.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c75
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_vprintk.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_failure.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c155
-rw-r--r--tools/testing/selftests/bpf/prog_tests/type_cast.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/udp_limit.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uninit_stack.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c317
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c1376
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c803
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c186
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c526
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c701
-rw-r--r--tools/testing/selftests/bpf/prog_tests/varlen.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verif_stats.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c298
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier_kfunc_prog_types.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier_log.c450
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c565
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vmlinux.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c312
-rw-r--r--tools/testing/selftests/bpf/prog_tests/wq.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c146
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c369
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_attach.c115
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c691
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c146
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c512
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c135
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c254
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c421
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c168
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_info.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_link.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c545
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_perf.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c179
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_vlan.c175
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdpwall.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xfrm_info.c347
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xsk.c151
-rw-r--r--tools/testing/selftests/bpf/progs/access_map_in_map.c93
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c397
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c59
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab_asm.c5
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c88
-rw-r--r--tools/testing/selftests/bpf/progs/arena_spin_lock.c54
-rw-r--r--tools/testing/selftests/bpf/progs/arena_strsearch.c146
-rw-r--r--tools/testing/selftests/bpf/progs/async_stack_depth.c61
-rw-r--r--tools/testing/selftests/bpf/progs/atomic_bounds.c24
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c170
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops2.c14
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c83
-rw-r--r--tools/testing/selftests/bpf/progs/bench_sockmap_prog.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c93
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c93
-rw-r--r--tools/testing/selftests/bpf/progs/bind_perm.c45
-rw-r--r--tools/testing/selftests/bpf/progs/bind_prog.h19
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_bench.c154
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_map.c83
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h542
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cc_cubic.c180
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_compiler.h33
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c106
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c140
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp_release.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c20
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_gotox.c448
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c40
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h130
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c29
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c24
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ksym.c71
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_netlink.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c58
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c60
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task.c26
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c35
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c62
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tasks.c198
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c13
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c11
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp4.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp6.c7
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_unix.c80
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c37
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop.c225
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop_bench.c30
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h273
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_mod_race.c100
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_common.h27
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c41
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c126
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c756
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_smc.c117
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c108
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c17
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_test_utils.h18
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h143
-rw-r--r--tools/testing/selftests/bpf/progs/bprm_opts.c2
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c12
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c4
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c84
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c173
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c59
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag.c25
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c73
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_user.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c114
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi.h2
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c4
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_ancestor.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c58
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c16
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c69
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c156
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_iter.c38
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_mprog.c30
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_preorder.c41
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_read_xattr.c158
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c15
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_storage.c24
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c382
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h79
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c247
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c235
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c100
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_negative.c26
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c94
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c125
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c126
-rw-r--r--tools/testing/selftests/bpf/progs/compute_live_registers.c440
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_dropper.c28
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c36
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c8
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port6.c9
-rw-r--r--tools/testing/selftests/bpf/progs/connect_ping.c53
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c45
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern.c120
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern_overflow.c22
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h243
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h129
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c262
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c890
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_basic.c68
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_bench.c107
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_common.h66
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c179
-rw-r--r--tools/testing/selftests/bpf/progs/csum_diff_test.c42
-rw-r--r--tools/testing/selftests/bpf/progs/decap_sanity.c68
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c5
-rw-r--r--tools/testing/selftests/bpf/progs/dmabuf_iter.c101
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c27
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c1995
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c1137
-rw-r--r--tools/testing/selftests/bpf/progs/empty_skb.c37
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_exit.c82
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_tailcall.c58
-rw-r--r--tools/testing/selftests/bpf/progs/err.h28
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions.c368
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c135
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_ext.c72
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_fail.c349
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c52
-rw-r--r--tools/testing/selftests/bpf/progs/fd_htab_lookup.c25
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_many_args.c39
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_recursive.c14
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_recursive_target.c25
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c10
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_many_args.c40
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_sleep.c32
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c4
-rw-r--r--tools/testing/selftests/bpf/progs/fib_lookup.c22
-rw-r--r--tools/testing/selftests/bpf/progs/file_reader.c145
-rw-r--r--tools/testing/selftests/bpf/progs/file_reader_fail.c52
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma.c69
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail2.c29
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c73
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c95
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_modify.c30
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c27
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_multi_maps.c49
-rw-r--r--tools/testing/selftests/bpf/progs/free_timer.c71
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_attach_probe.c2
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_cls_redirect.c12
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_dead_global_func.c11
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_global_func.c18
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_progmap.c24
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c20
-rw-r--r--tools/testing/selftests/bpf/progs/get_branch_snapshot.c40
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c27
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_test.c123
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c105
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c18
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname4_prog.c24
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname6_prog.c31
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/htab_mem_bench.c105
-rw-r--r--tools/testing/selftests/bpf/progs/htab_reuse.c19
-rw-r--r--tools/testing/selftests/bpf/progs/htab_update.c36
-rw-r--r--tools/testing/selftests/bpf/progs/ima.c89
-rw-r--r--tools/testing/selftests/bpf/progs/inner_array_lookup.c45
-rw-r--r--tools/testing/selftests/bpf/progs/ip_check_defrag.c99
-rw-r--r--tools/testing/selftests/bpf/progs/irq.c566
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c1929
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css.c72
-rw-r--r--tools/testing/selftests/bpf/progs/iters_css_task.c102
-rw-r--r--tools/testing/selftests/bpf/progs/iters_looping.c216
-rw-r--r--tools/testing/selftests/bpf/progs/iters_num.c242
-rw-r--r--tools/testing/selftests/bpf/progs/iters_state_safety.c426
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task.c51
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_failure.c105
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_vma.c43
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c171
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c129
-rw-r--r--tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c46
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c59
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c8
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c160
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_race.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c217
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c37
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_module_order.c30
-rw-r--r--tools/testing/selftests/bpf/progs/kmem_cache_iter.c108
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c162
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_empty.c12
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_override.c13
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session.c78
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c58
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c31
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_write_ctx.c22
-rw-r--r--tools/testing/selftests/bpf/progs/kptr_xchg_inline.c48
-rw-r--r--tools/testing/selftests/bpf/progs/ksym_race.c13
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c97
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c97
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c420
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.h56
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c611
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_peek.c113
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c82
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps2.c76
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars1.c54
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars2.c55
-rw-r--r--tools/testing/selftests/bpf/progs/livepatch_trampoline.c30
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c285
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c85
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c173
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_bench.c104
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c67
-rw-r--r--tools/testing/selftests/bpf/progs/loop1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/loop2.c7
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c11
-rw-r--r--tools/testing/selftests/bpf/progs/loop4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/loop5.c1
-rw-r--r--tools/testing/selftests/bpf/progs/loop6.c95
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie.h30
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie_bench.c230
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie_map.c19
-rw-r--r--tools/testing/selftests/bpf/progs/lru_bug.c49
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c89
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c192
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c14
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_tailcall.c34
-rw-r--r--tools/testing/selftests/bpf/progs/map_excl.c34
-rw-r--r--tools/testing/selftests/bpf/progs/map_in_map_btf.c73
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c540
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c388
-rw-r--r--tools/testing/selftests/bpf/progs/map_percpu_stats.c24
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c28
-rw-r--r--tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c229
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe.c30
-rw-r--r--tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c54
-rw-r--r--tools/testing/selftests/bpf/progs/missed_tp_recursion.c41
-rw-r--r--tools/testing/selftests/bpf/progs/mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/progs/modify_return.c40
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_bpf.h42
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sock.c88
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sockmap.c43
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_subflow.c128
-rw-r--r--tools/testing/selftests/bpf/progs/mptcpify.c24
-rw-r--r--tools/testing/selftests/bpf/progs/nested_acquire.c33
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h12
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c41
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c42
-rw-r--r--tools/testing/selftests/bpf/progs/net_timestamping.c248
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c10
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c15
-rw-r--r--tools/testing/selftests/bpf/progs/netns_cookie_prog.c103
-rw-r--r--tools/testing/selftests/bpf/progs/normal_map_btf.c56
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c190
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c109
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_fail.c182
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c8
-rw-r--r--tools/testing/selftests/bpf/progs/perfbuf_bench.c4
-rw-r--r--tools/testing/selftests/bpf/progs/preempt_lock.c212
-rw-r--r--tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c106
-rw-r--r--tools/testing/selftests/bpf/progs/prepare.c27
-rw-r--r--tools/testing/selftests/bpf/progs/priv_freplace_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_map.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue.c154
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c149
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c88
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h162
-rw-r--r--tools/testing/selftests/bpf/progs/profiler1.c1
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h88
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf180.c22
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_iter.c7
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_nounroll.c3
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null.c31
-rw-r--r--tools/testing/selftests/bpf/progs/raw_tp_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c323
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c52
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c38
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c304
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search.c206
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c541
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c36
-rw-r--r--tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c38
-rw-r--r--tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c60
-rw-r--r--tools/testing/selftests/bpf/progs/read_vsyscall.c59
-rw-r--r--tools/testing/selftests/bpf/progs/recursion.c43
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg4_prog.c40
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg6_prog.c46
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c38
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c631
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c121
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock.c147
-rw-r--r--tools/testing/selftests/bpf/progs/res_spin_lock_fail.c244
-rw-r--r--tools/testing/selftests/bpf/progs/ringbuf_bench.c14
-rw-r--r--tools/testing/selftests/bpf/progs/security_bpf_map.c69
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c64
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c45
-rw-r--r--tools/testing/selftests/bpf/progs/set_global_vars.c106
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c434
-rw-r--r--tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c104
-rw-r--r--tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c59
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c16
-rw-r--r--tools/testing/selftests/bpf/progs/sock_addr_kern.c65
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog.c145
-rw-r--r--tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c22
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c133
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c14
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c18
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_inherit.c19
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_multi.c31
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c43
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c108
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_ips.c49
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map.c (renamed from tools/testing/selftests/bpf/progs/test_stacktrace_map.c)14
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map_skip.c68
-rw-r--r--tools/testing/selftests/bpf/progs/stream.c237
-rw-r--r--tools/testing/selftests/bpf/progs/stream_fail.c33
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c105
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c26
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_bench.c53
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c54
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h172
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c9
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate.c52
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c32
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_detach.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c19
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c59
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c59
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c30
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c26
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c34
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c25
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c30
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c29
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c90
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c102
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack.c62
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c62
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c50
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted.c31
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c39
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c22
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c36
-rw-r--r--tools/testing/selftests/bpf/progs/summarization.c78
-rw-r--r--tools/testing/selftests/bpf/progs/summarization_freplace.c33
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c208
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c23
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall6.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c22
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c45
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c18
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c37
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c73
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c65
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c38
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_fail.c64
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_freplace.c23
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_poke.c32
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_common.h77
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_failure.c340
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c419
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_data.bpf.h237
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage.c64
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c35
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c90
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_uptr.c63
-rw-r--r--tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c47
-rw-r--r--tools/testing/selftests/bpf/progs/task_work.c107
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_fail.c96
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_stress.c73
-rw-r--r--tools/testing/selftests/bpf/progs/tc_bpf2bpf.c25
-rw-r--r--tools/testing/selftests/bpf/progs/tc_dummy.c12
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c29
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c21
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_update.c74
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c63
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_rtt.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_access_variable_array.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_assign_reuse.c142
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c177
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe_manual.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_autoattach.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c137
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_ma.c285
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c346
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c149
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_decl_tag.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_ext.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_map_in_map.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c100
-rw-r--r--tools/testing/selftests/bpf/progs/test_build_id.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup_link.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c302
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c34
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.h11
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c981
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_autosize.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_extern.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_existence.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_mods.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_module.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c49
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_types.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_deny_namespace.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_enable_stats.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_fill_link_info.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_fsverity.c48
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_xattr.c84
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c35
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func11.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func12.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func13.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func14.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c56
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func16.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func2.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func4.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func5.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func6.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func8.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func9.c134
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c91
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c172
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c94
-rw-r--r--tools/testing/selftests/bpf/progs/test_hash_large_key.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c123
-rw-r--r--tools/testing/selftests/bpf/progs/test_jhash.h31
-rw-r--r--tools/testing/selftests/bpf/progs/test_kernel_flag.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c86
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_module.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_weak.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c487
-rw-r--r--tools/testing/selftests/bpf/progs/test_ldsx_insn.c125
-rw-r--r--tools/testing/selftests/bpf/progs/test_legacy_printk.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_buf.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c74
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_and_delete.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_key.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_reroute.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c76
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_ops.c138
-rw-r--r--tools/testing/selftests/bpf/progs/test_migrate_reuseport.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c69
-rw-r--r--tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c118
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c114
-rw-r--r--tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_branches.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_link.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_skip.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_devmap.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_htab.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_invalid.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_prog_array_init.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_ptr_untrusted.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_stack_map.h4
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_multi.c15
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_n.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c98
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_send_signal_kern.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_set_remove_xattr.c133
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c87
-rw-r--r--tools/testing/selftests/bpf/progs/test_siphash.h64
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h67
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_ktls.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_redir.c68
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_strp.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_update.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c72
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c317
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_var_off.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_extable.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_local_data.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_under_cgroup.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_change_tail.c106
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c392
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_link.c129
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c56
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c265
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c167
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c591
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h138
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_time_tai.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c747
-rw-r--r--tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c83
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe.c99
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c117
-rw-r--r--tools/testing/selftests/bpf/progs/test_urandom_usdt.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c141
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt_multispec.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_user_ringbuf.h35
-rw-r--r--tools/testing/selftests/bpf/progs/test_varlen.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c92
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c54
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_tailcall.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c128
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c256
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_link.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c658
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c111
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_pull_data.c48
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_update_frags.c42
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c15
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c425
-rw-r--r--tools/testing/selftests/bpf/progs/timer_crash.c54
-rw-r--r--tools/testing/selftests/bpf/progs/timer_failure.c68
-rw-r--r--tools/testing/selftests/bpf/progs/timer_interrupt.c48
-rw-r--r--tools/testing/selftests/bpf/progs/timer_lockup.c87
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim.c88
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim_reject.c74
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c32
-rw-r--r--tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c21
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c7
-rw-r--r--tools/testing/selftests/bpf/progs/trace_vprintk.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c32
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c166
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct_many_args.c95
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c136
-rw-r--r--tools/testing/selftests/bpf/progs/twfw.c58
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c79
-rw-r--r--tools/testing/selftests/bpf/progs/udp_limit.c2
-rw-r--r--tools/testing/selftests/bpf/progs/uninit_stack.c88
-rw-r--r--tools/testing/selftests/bpf/progs/unsupported_ops.c22
-rw-r--r--tools/testing/selftests/bpf/progs/update_map_in_htab.c30
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi.c143
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_bench.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c39
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c40
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session.c71
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c48
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c44
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c44
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c16
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c31
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c73
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_failure.c105
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_map_failure.c27
-rw-r--r--tools/testing/selftests/bpf/progs/uptr_update_failure.c42
-rw-r--r--tools/testing/selftests/bpf/progs/uretprobe_stack.c96
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c245
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_success.c212
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_and.c113
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c257
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c274
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_array_access.c731
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_async_cb_context.c181
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_basic_stack.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bitfield_write.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c232
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c1866
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c174
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c639
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c554
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c888
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_trap.c71
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bswap.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c80
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c20
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cfg.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c227
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c308
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c98
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const_or.c82
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c295
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c228
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_d_path.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c862
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c56
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div0.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_overflow.c144
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c310
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c391
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotol.c63
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotox.c389
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c825
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c550
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_restricted.c279
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_value_access.c1282
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c155
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c786
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jit_convergence.c114
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c170
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ld_ind.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c447
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_leak_ptr.c92
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_linked_scalars.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_live_stack.c344
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_load_acquire.c234
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c307
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lsm.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lwt.c234
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c260
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c265
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ret_val.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_masking.c410
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_1.c109
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_2.c28
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_meta_access.c284
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c354
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mtu.c20
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mul.c38
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c116
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c49
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_or_jmp32_k.c41
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c304
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c359
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_stack.c372
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c50
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ref_tracking.c1495
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_reg_equal.c58
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_regalloc.c364
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ringbuf.c131
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_runtime_jit.c360
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c830
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c1224
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_search_pruning.c362
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock.c1169
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock_addr.c331
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sockmap_mutate.c187
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c1282
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c559
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_stack_ptr.c536
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_store_release.c301
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c849
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subreg.c673
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c105
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_typedef.c23
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_uninit.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c953
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value.c158
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c78
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c215
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_or_null.c288
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c1441
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_var_off.c418
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_accept.c103
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_reject.c176
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xadd.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c1722
-rw-r--r--tools/testing/selftests/bpf/progs/vrf_socket_lookup.c89
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c206
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c167
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_features.c268
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_flowtable.c148
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c117
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c111
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata2.c24
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_map.c94
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c111
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c865
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_tx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c9
-rw-r--r--tools/testing/selftests/bpf/progs/xdpwall.c364
-rw-r--r--tools/testing/selftests/bpf/progs/xfrm_info.c41
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c123
-rw-r--r--tools/testing/selftests/bpf/sdt-config.h6
-rw-r--r--tools/testing/selftests/bpf/sdt.h515
-rw-r--r--tools/testing/selftests/bpf/task_local_storage_helpers.h22
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py22
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool.sh6
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh23
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_map.sh398
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_metadata.sh7
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py627
-rw-r--r--tools/testing/selftests/bpf/test_btf.h18
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c170
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp113
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c160
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c82
-rwxr-xr-xtools/testing/selftests/bpf/test_doc_build.sh20
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c780
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh168
-rwxr-xr-xtools/testing/selftests/bpf/test_ftrace.sh7
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh20
-rw-r--r--tools/testing/selftests/bpf/test_kmods/.gitignore (renamed from tools/testing/selftests/bpf/bpf_testmod/.gitignore)0
-rw-r--r--tools/testing/selftests/bpf/test_kmods/Makefile21
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c39
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c39
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c84
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c393
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h71
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c1787
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.h125
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h166
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh5
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c9
-rw-r--r--tools/testing/selftests/bpf/test_loader.c1406
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c226
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh464
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh149
-rw-r--r--tools/testing/selftests/bpf/test_maps.c536
-rw-r--r--tools/testing/selftests/bpf/test_maps.h7
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c148
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1904
-rw-r--r--tools/testing/selftests/bpf/test_progs.h381
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh63
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c181
-rw-r--r--tools/testing/selftests/bpf/test_sock.c481
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c1483
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh58
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c208
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c463
-rw-r--r--tools/testing/selftests/bpf/test_stub.c44
-rw-r--r--tools/testing/selftests/bpf/test_tag.c14
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh99
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_redirect.sh216
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh295
-rwxr-xr-xtools/testing/selftests/bpf/test_tcp_check_syncookie.sh84
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c257
-rw-r--r--tools/testing/selftests/bpf/test_tcp_hdr_options.h1
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c35
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh798
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c950
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c174
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_features.sh107
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh52
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh81
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh118
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh228
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_xdping.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh277
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c436
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h54
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c699
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h26
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c144
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.h7
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c136
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.ld11
-rw-r--r--tools/testing/selftests/bpf/uptr_test_common.h63
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c78
-rw-r--r--tools/testing/selftests/bpf/urandom_read_aux.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib1.c35
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib2.c8
-rw-r--r--tools/testing/selftests/bpf/usdt.h545
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c66
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c378
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_and.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_bounds.c27
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch.c151
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch_add.c106
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_invalid.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_or.c102
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xchg.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xor.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c746
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_loop_inline.c270
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c99
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c445
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c73
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c198
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c33
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c181
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/d_path.c37
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c656
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c110
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c616
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/int_ptr.c160
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c117
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c206
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c444
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr.c95
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c180
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c29
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_tp_writable.c35
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c907
-rw-r--r--tools/testing/selftests/bpf/verifier/regalloc.c277
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c231
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/sleepable.c91
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c658
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c333
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c357
-rw-r--r--tools/testing/selftests/bpf/verifier/subreg.c533
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c523
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c95
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c171
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c876
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c248
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rwxr-xr-xtools/testing/selftests/bpf/verify_sig_setup.sh136
-rw-r--r--tools/testing/selftests/bpf/veristat.c3384
-rw-r--r--tools/testing/selftests/bpf/veristat.cfg18
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh484
-rwxr-xr-xtools/testing/selftests/bpf/with_addr.sh54
-rwxr-xr-xtools/testing/selftests/bpf/with_tunnels.sh36
-rw-r--r--tools/testing/selftests/bpf/xdp_features.c718
-rw-r--r--tools/testing/selftests/bpf/xdp_features.h20
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c894
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h52
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c471
-rw-r--r--tools/testing/selftests/bpf/xdping.c26
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c1074
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.h160
-rw-r--r--tools/testing/selftests/bpf/xsk.c781
-rw-r--r--tools/testing/selftests/bpf/xsk.h249
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh108
-rw-r--r--tools/testing/selftests/bpf/xsk_xdp_common.h13
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c456
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h34
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c6
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c6
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c47
-rw-r--r--tools/testing/selftests/cachestat/.gitignore3
-rw-r--r--tools/testing/selftests/cachestat/Makefile8
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c365
-rw-r--r--tools/testing/selftests/capabilities/Makefile2
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c28
-rw-r--r--tools/testing/selftests/capabilities/validate_cap.c17
-rw-r--r--tools/testing/selftests/cgroup/.gitignore11
-rw-r--r--tools/testing/selftests/cgroup/Makefile35
-rw-r--r--tools/testing/selftests/cgroup/config6
-rw-r--r--tools/testing/selftests/cgroup/lib/cgroup_util.c (renamed from tools/testing/selftests/cgroup/cgroup_util.c)265
-rw-r--r--tools/testing/selftests/cgroup/lib/include/cgroup_util.h (renamed from tools/testing/selftests/cgroup/cgroup_util.h)55
-rw-r--r--tools/testing/selftests/cgroup/lib/libcgroup.mk19
-rw-r--r--tools/testing/selftests/cgroup/memcg_protection.m89
-rw-r--r--tools/testing/selftests/cgroup/test_core.c265
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c825
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c276
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh1193
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_v1_base.sh77
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_v1_hp.sh46
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c733
-rw-r--r--tools/testing/selftests/cgroup/test_hugetlb_memcg.c234
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c298
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c57
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c872
-rw-r--r--tools/testing/selftests/cgroup/test_pids.c181
-rwxr-xr-xtools/testing/selftests/cgroup/test_stress.sh2
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c636
-rw-r--r--tools/testing/selftests/cgroup/wait_inotify.c87
-rw-r--r--tools/testing/selftests/clone3/Makefile2
-rw-r--r--tools/testing/selftests/clone3/clone3.c285
-rw-r--r--tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c6
-rw-r--r--tools/testing/selftests/clone3/clone3_clear_sighand.c8
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h17
-rw-r--r--tools/testing/selftests/clone3/clone3_set_tid.c125
-rw-r--r--tools/testing/selftests/connector/.gitignore1
-rw-r--r--tools/testing/selftests/connector/Makefile6
-rw-r--r--tools/testing/selftests/connector/proc_filter.c310
-rw-r--r--tools/testing/selftests/core/.gitignore1
-rw-r--r--tools/testing/selftests/core/Makefile4
-rw-r--r--tools/testing/selftests/core/close_range_test.c154
-rw-r--r--tools/testing/selftests/core/unshare_test.c94
-rw-r--r--tools/testing/selftests/coredump/.gitignore4
-rw-r--r--tools/testing/selftests/coredump/Makefile13
-rw-r--r--tools/testing/selftests/coredump/README.rst50
-rw-r--r--tools/testing/selftests/coredump/config3
-rw-r--r--tools/testing/selftests/coredump/coredump_socket_protocol_test.c1568
-rw-r--r--tools/testing/selftests/coredump/coredump_socket_test.c742
-rw-r--r--tools/testing/selftests/coredump/coredump_test.h59
-rw-r--r--tools/testing/selftests/coredump/coredump_test_helpers.c383
-rwxr-xr-xtools/testing/selftests/coredump/stackdump14
-rw-r--r--tools/testing/selftests/coredump/stackdump_test.c169
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/cpu-hotplug/config1
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh142
-rw-r--r--tools/testing/selftests/cpufreq/.gitignore2
-rw-r--r--tools/testing/selftests/cpufreq/Makefile1
-rw-r--r--tools/testing/selftests/cpufreq/config8
-rwxr-xr-xtools/testing/selftests/cpufreq/cpufreq.sh36
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh62
-rwxr-xr-xtools/testing/selftests/cpufreq/module.sh6
-rw-r--r--tools/testing/selftests/damon/.gitignore3
-rw-r--r--tools/testing/selftests/damon/Makefile26
-rw-r--r--tools/testing/selftests/damon/_common.sh11
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py837
-rw-r--r--tools/testing/selftests/damon/access_memory.c41
-rw-r--r--tools/testing/selftests/damon/access_memory_even.c39
-rw-r--r--tools/testing/selftests/damon/config6
-rwxr-xr-xtools/testing/selftests/damon/damon_nr_regions.py147
-rwxr-xr-xtools/testing/selftests/damon/damos_apply_interval.py67
-rwxr-xr-xtools/testing/selftests/damon/damos_quota.py70
-rwxr-xr-xtools/testing/selftests/damon/damos_quota_goal.py80
-rwxr-xr-xtools/testing/selftests/damon/damos_tried_regions.py65
-rwxr-xr-xtools/testing/selftests/damon/drgn_dump_damon_status.py223
-rwxr-xr-xtools/testing/selftests/damon/lru_sort.sh39
-rwxr-xr-xtools/testing/selftests/damon/reclaim.sh40
-rwxr-xr-xtools/testing/selftests/damon/sysfs.py303
-rwxr-xr-xtools/testing/selftests/damon/sysfs.sh370
-rwxr-xr-xtools/testing/selftests/damon/sysfs_memcg_path_leak.sh43
-rwxr-xr-xtools/testing/selftests/damon/sysfs_no_op_commit_break.py72
-rwxr-xr-xtools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh56
-rwxr-xr-xtools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py33
-rwxr-xr-xtools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py55
-rw-r--r--tools/testing/selftests/devices/error_logs/Makefile3
-rwxr-xr-xtools/testing/selftests/devices/error_logs/test_device_error_logs.py85
-rw-r--r--tools/testing/selftests/devices/probe/Makefile4
-rw-r--r--tools/testing/selftests/devices/probe/boards/Dell Inc.,XPS 13 9300.yaml40
-rw-r--r--tools/testing/selftests/devices/probe/boards/google,spherion.yaml54
-rwxr-xr-xtools/testing/selftests/devices/probe/test_discoverable_devices.py358
-rw-r--r--tools/testing/selftests/dma/Makefile6
-rw-r--r--tools/testing/selftests/dma/config1
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c123
-rw-r--r--tools/testing/selftests/dmabuf-heaps/.gitignore1
-rw-r--r--tools/testing/selftests/dmabuf-heaps/Makefile2
-rw-r--r--tools/testing/selftests/dmabuf-heaps/config3
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c277
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/dma-buf/Makefile2
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c236
-rwxr-xr-xtools/testing/selftests/drivers/gpu/drm_mm.sh4
-rw-r--r--tools/testing/selftests/drivers/net/.gitignore4
-rw-r--r--tools/testing/selftests/drivers/net/Makefile43
-rw-r--r--tools/testing/selftests/drivers/net/README.rst136
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile32
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh46
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh80
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh84
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh80
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_ipsec_offload.sh156
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_lacp_prio.sh108
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh97
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh578
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh105
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh161
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh45
-rw-r--r--tools/testing/selftests/drivers/net/bonding/config21
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh109
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh177
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/netcons_over_bonding.sh361
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings1
-rw-r--r--tools/testing/selftests/drivers/net/config10
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile36
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/forwarding.config2
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh1
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh9
l---------tools/testing/selftests/drivers/net/dsa/tc_actions.sh1
l---------tools/testing/selftests/drivers/net/dsa/tc_taprio.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh47
-rw-r--r--tools/testing/selftests/drivers/net/gro.c1369
-rwxr-xr-xtools/testing/selftests/drivers/net/gro.py164
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py329
-rw-r--r--tools/testing/selftests/drivers/net/hw/.gitignore4
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile79
-rw-r--r--tools/testing/selftests/drivers/net/hw/config11
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/csum.py110
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_port_split.py (renamed from tools/testing/selftests/net/devlink_port_split.py)32
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py439
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py77
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool.sh (renamed from tools/testing/selftests/net/forwarding/ethtool.sh)18
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_extended_state.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_extended_state.sh)46
-rw-r--r--tools/testing/selftests/drivers/net/hw/ethtool_lib.sh (renamed from tools/testing/selftests/net/forwarding/ethtool_lib.sh)34
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_mm.sh341
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/ethtool_rmon.sh145
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/hw_stats_l3.sh334
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/hw_stats_l3_gre.sh111
-rw-r--r--tools/testing/selftests/drivers/net/hw/iou-zcrx.c464
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/iou-zcrx.py145
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/irq.py99
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py52
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/loopback.sh (renamed from tools/testing/selftests/net/forwarding/loopback.sh)5
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c1524
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/nic_timestamp.py113
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/pp_alloc_fail.py144
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_api.py476
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py832
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_flow_label.py167
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_input_xfrm.py92
-rw-r--r--tools/testing/selftests/drivers/net/hw/settings1
-rw-r--r--tools/testing/selftests/drivers/net/hw/toeplitz.c655
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/toeplitz.py211
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/tso.py261
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/xsk_reconfig.py60
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py55
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py287
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py139
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote.py15
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote_netns.py21
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/remote_ssh.py39
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh419
-rwxr-xr-xtools/testing/selftests/drivers/net/microchip/ksz9477_qos.sh668
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh334
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh29
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh105
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh7
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh46
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh120
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh54
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh250
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh11
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh342
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh272
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh190
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/extack.sh24
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/fib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/hw_stats_l3.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh263
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1q.sh264
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_vxlan.sh311
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh71
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh22
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh64
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/pci_reset.sh58
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/port_range_occ.sh111
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_range_scale.sh95
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh62
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh26
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh27
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh182
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh74
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh42
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh243
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh151
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh46
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_bridge.sh184
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh107
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_lag.sh137
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_lag_vlan.sh147
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/rif_mac_profile_scale.sh72
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles.sh213
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh147
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/router_bridge_lag.sh50
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh258
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh40
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_offload.sh290
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh342
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh98
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh33
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh69
l---------tools/testing/selftests/drivers/net/mlxsw/spectrum-2/port_range_scale.sh1
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh77
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh44
l---------tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh1
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_mac_profile_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh161
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh15
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh339
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh6
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/port_range_scale.sh16
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh35
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh34
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_mac_profile_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/vxlan_flooding_ipv6.sh334
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh23
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh22
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh658
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh283
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh39
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto_ipv6.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh17
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_ipv6.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_id.py23
-rw-r--r--tools/testing/selftests/drivers/net/napi_id_helper.c100
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_threaded.py143
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh74
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_cmdline.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_fragmented_msg.sh122
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_overflow.sh67
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_sysdata.sh272
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_torture.sh130
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/Makefile26
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/config11
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh381
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh27
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh8
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/ethtool-features.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh114
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh85
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh430
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh421
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh117
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh626
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/peer.sh144
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/psample.sh183
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/settings1
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh80
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh87
-rwxr-xr-xtools/testing/selftests/drivers/net/netpoll_basic.py396
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/basic_qos.sh253
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh323
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh212
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py241
-rwxr-xr-xtools/testing/selftests/drivers/net/psp.py640
-rw-r--r--tools/testing/selftests/drivers/net/psp_responder.c483
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py125
-rwxr-xr-xtools/testing/selftests/drivers/net/ring_reconfig.py167
-rwxr-xr-xtools/testing/selftests/drivers/net/shaper.py461
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py321
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile18
-rw-r--r--tools/testing/selftests/drivers/net/team/config7
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh51
-rwxr-xr-xtools/testing/selftests/drivers/net/team/options.sh188
-rwxr-xr-xtools/testing/selftests/drivers/net/team/propagation.sh80
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/Makefile12
-rwxr-xr-xtools/testing/selftests/drivers/net/virtio_net/basic_features.sh131
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/config8
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/virtio_net_common.sh99
-rwxr-xr-xtools/testing/selftests/drivers/net/xdp.py779
-rw-r--r--tools/testing/selftests/drivers/ntsync/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/ntsync/Makefile7
-rw-r--r--tools/testing/selftests/drivers/ntsync/config1
-rw-r--r--tools/testing/selftests/drivers/ntsync/ntsync.c1343
-rw-r--r--tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh494
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/Makefile20
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/config1
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c270
-rwxr-xr-xtools/testing/selftests/drivers/sdsi/sdsi.sh25
-rw-r--r--tools/testing/selftests/drivers/sdsi/sdsi_test.py226
-rw-r--r--tools/testing/selftests/dt/.gitignore1
-rw-r--r--tools/testing/selftests/dt/Makefile21
-rw-r--r--tools/testing/selftests/dt/compatible_ignore_list1
-rwxr-xr-xtools/testing/selftests/dt/test_unprobed_devices.sh93
-rw-r--r--tools/testing/selftests/efivarfs/create-read.c2
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh173
-rw-r--r--tools/testing/selftests/exec/.gitignore9
-rw-r--r--tools/testing/selftests/exec/Makefile46
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script.py (renamed from tools/testing/selftests/exec/binfmt_script)10
-rwxr-xr-xtools/testing/selftests/exec/check-exec-tests.sh205
-rw-r--r--tools/testing/selftests/exec/check-exec.c463
-rw-r--r--tools/testing/selftests/exec/config2
-rw-r--r--tools/testing/selftests/exec/execveat.c172
-rw-r--r--tools/testing/selftests/exec/false.c5
-rw-r--r--tools/testing/selftests/exec/load_address.c83
-rw-r--r--tools/testing/selftests/exec/non-regular.c2
-rw-r--r--tools/testing/selftests/exec/null-argv.c78
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c53
-rw-r--r--tools/testing/selftests/fchmodat2/.gitignore (renamed from tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore)2
-rw-r--r--tools/testing/selftests/fchmodat2/Makefile15
-rw-r--r--tools/testing/selftests/fchmodat2/fchmodat2_test.c142
-rw-r--r--tools/testing/selftests/filelock/Makefile5
-rw-r--r--tools/testing/selftests/filelock/ofdlocks.c132
-rw-r--r--tools/testing/selftests/filesystems/.gitignore4
-rw-r--r--tools/testing/selftests/filesystems/Makefile4
-rw-r--r--tools/testing/selftests/filesystems/anon_inode_test.c69
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile4
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c26
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config1
-rw-r--r--tools/testing/selftests/filesystems/devpts_pts.c2
-rw-r--r--tools/testing/selftests/filesystems/epoll/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c46
-rw-r--r--tools/testing/selftests/filesystems/eventfd/.gitignore (renamed from tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore)2
-rw-r--r--tools/testing/selftests/filesystems/eventfd/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/eventfd/eventfd_test.c311
-rw-r--r--tools/testing/selftests/filesystems/fat/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/fat/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/fat/config2
-rw-r--r--tools/testing/selftests/filesystems/fat/rename_exchange.c37
-rwxr-xr-xtools/testing/selftests/filesystems/fat/run_fat_tests.sh82
-rw-r--r--tools/testing/selftests/filesystems/fclog.c130
-rw-r--r--tools/testing/selftests/filesystems/file_stressor.c194
-rw-r--r--tools/testing/selftests/filesystems/fuse/.gitignore3
-rw-r--r--tools/testing/selftests/filesystems/fuse/Makefile21
-rw-r--r--tools/testing/selftests/filesystems/fuse/fuse_mnt.c146
-rw-r--r--tools/testing/selftests/filesystems/fuse/fusectl_test.c140
-rw-r--r--tools/testing/selftests/filesystems/kernfs_test.c38
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/.gitignore3
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c528
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c555
-rw-r--r--tools/testing/selftests/filesystems/nsfs/.gitignore (renamed from tools/testing/selftests/nsfs/.gitignore)1
-rw-r--r--tools/testing/selftests/filesystems/nsfs/Makefile (renamed from tools/testing/selftests/nsfs/Makefile)4
-rw-r--r--tools/testing/selftests/filesystems/nsfs/config (renamed from tools/testing/selftests/nsfs/config)0
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c163
-rw-r--r--tools/testing/selftests/filesystems/nsfs/owner.c (renamed from tools/testing/selftests/nsfs/owner.c)0
-rw-r--r--tools/testing/selftests/filesystems/nsfs/pidns.c (renamed from tools/testing/selftests/nsfs/pidns.c)0
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/.gitignore3
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile14
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c160
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/log.h26
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c720
-rw-r--r--tools/testing/selftests/filesystems/statmount/.gitignore3
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile10
-rw-r--r--tools/testing/selftests/filesystems/statmount/listmount_test.c66
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h82
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c702
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c291
-rw-r--r--tools/testing/selftests/filesystems/utils.c589
-rw-r--r--tools/testing/selftests/filesystems/utils.h48
-rw-r--r--tools/testing/selftests/filesystems/wrappers.h108
-rw-r--r--tools/testing/selftests/firmware/Makefile2
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh170
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh19
-rw-r--r--tools/testing/selftests/firmware/fw_namespace.c9
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh4
-rwxr-xr-xtools/testing/selftests/firmware/fw_upload.sh214
-rw-r--r--tools/testing/selftests/ftrace/.gitignore1
-rw-r--r--tools/testing/selftests/ftrace/Makefile5
-rw-r--r--tools/testing/selftests/ftrace/config27
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest115
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest-ktap8
-rw-r--r--tools/testing/selftests/ftrace/poll.c74
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc101
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/ringbuffer_subbuf_size.tc95
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc31
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc122
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/trace_marker.tc82
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc107
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc78
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc97
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc82
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_repeat.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc41
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc61
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_uprobe.tc32
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc63
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc40
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc29
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_args_vfs.tc41
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc122
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc38
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc81
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-mod.tc191
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc30
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc109
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi-filter.tc177
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc103
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-profiler.tc31
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-retval.tc44
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc29
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc17
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_hotplug.tc42
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions79
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc60
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc25
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_vfs.tc40
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_insn_boundary.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_opt_types.tc34
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/profile.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc53
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc24
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc10
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc35
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-expressions.tc64
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-poll.tc74
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc18
-rw-r--r--tools/testing/selftests/futex/Makefile6
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore6
-rw-r--r--tools/testing/selftests/futex/functional/Makefile28
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa.c263
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa_mpol.c219
-rw-r--r--tools/testing/selftests/futex/functional/futex_priv_hash.c270
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue.c106
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c259
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c86
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c131
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c144
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c83
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c209
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c76
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c81
-rw-r--r--tools/testing/selftests/futex/functional/futex_waitv.c226
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh60
-rw-r--r--tools/testing/selftests/futex/include/futex2test.h98
-rw-r--r--tools/testing/selftests/futex/include/futextest.h22
-rw-r--r--tools/testing/selftests/futex/include/logging.h148
-rw-r--r--tools/testing/selftests/gpio/.gitignore4
-rw-r--r--tools/testing/selftests/gpio/Makefile29
-rw-r--r--tools/testing/selftests/gpio/config3
-rwxr-xr-xtools/testing/selftests/gpio/gpio-aggregator.sh727
-rw-r--r--tools/testing/selftests/gpio/gpio-chip-info.c57
-rw-r--r--tools/testing/selftests/gpio/gpio-line-name.c55
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-cdev.c198
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c323
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup-sysfs.sh168
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup.sh494
-rwxr-xr-xtools/testing/selftests/gpio/gpio-sim.sh418
-rw-r--r--tools/testing/selftests/hid/.gitignore7
-rw-r--r--tools/testing/selftests/hid/Makefile242
-rw-r--r--tools/testing/selftests/hid/config32
-rw-r--r--tools/testing/selftests/hid/config.common241
-rw-r--r--tools/testing/selftests/hid/config.x86_644
-rwxr-xr-xtools/testing/selftests/hid/hid-apple.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-core.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-gamepad.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-ite.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-keyboard.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-mouse.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-multitouch.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-sony.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-tablet.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-usb_crash.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-wacom.sh7
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c911
-rw-r--r--tools/testing/selftests/hid/hid_common.h480
-rw-r--r--tools/testing/selftests/hid/hidraw.c694
-rw-r--r--tools/testing/selftests/hid/progs/hid.c600
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h125
-rwxr-xr-xtools/testing/selftests/hid/run-hid-tools-tests.sh30
-rw-r--r--tools/testing/selftests/hid/settings3
-rw-r--r--tools/testing/selftests/hid/tests/__init__.py2
-rw-r--r--tools/testing/selftests/hid/tests/base.py432
-rw-r--r--tools/testing/selftests/hid/tests/base_device.py448
-rw-r--r--tools/testing/selftests/hid/tests/base_gamepad.py238
-rw-r--r--tools/testing/selftests/hid/tests/conftest.py81
-rw-r--r--tools/testing/selftests/hid/tests/descriptors_wacom.py1360
-rw-r--r--tools/testing/selftests/hid/tests/test_apple_keyboard.py441
-rw-r--r--tools/testing/selftests/hid/tests/test_gamepad.py665
-rw-r--r--tools/testing/selftests/hid/tests/test_hid_core.py154
-rw-r--r--tools/testing/selftests/hid/tests/test_ite_keyboard.py167
-rw-r--r--tools/testing/selftests/hid/tests/test_keyboard.py485
-rw-r--r--tools/testing/selftests/hid/tests/test_mouse.py1047
-rw-r--r--tools/testing/selftests/hid/tests/test_multitouch.py2143
-rw-r--r--tools/testing/selftests/hid/tests/test_sony.py343
-rw-r--r--tools/testing/selftests/hid/tests/test_tablet.py1577
-rw-r--r--tools/testing/selftests/hid/tests/test_usb_crash.py103
-rw-r--r--tools/testing/selftests/hid/tests/test_wacom_generic.py1407
-rwxr-xr-xtools/testing/selftests/hid/vmtest.sh474
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile8
-rw-r--r--tools/testing/selftests/intel_pstate/aperf.c2
-rwxr-xr-xtools/testing/selftests/intel_pstate/run.sh9
-rw-r--r--tools/testing/selftests/iommu/.gitignore3
-rw-r--r--tools/testing/selftests/iommu/Makefile10
-rw-r--r--tools/testing/selftests/iommu/config5
-rw-r--r--tools/testing/selftests/iommu/iommufd.c3544
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c748
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h1259
-rw-r--r--tools/testing/selftests/ipc/Makefile2
-rw-r--r--tools/testing/selftests/ipc/msgque.c68
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c13
-rwxr-xr-xtools/testing/selftests/ir/ir_loopback.sh2
-rw-r--r--tools/testing/selftests/kcmp/Makefile2
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c10
-rw-r--r--tools/testing/selftests/kexec/.gitignore2
-rw-r--r--tools/testing/selftests/kexec/Makefile13
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh67
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_file_load.sh13
-rw-r--r--tools/testing/selftests/kexec/test_kexec_jump.c72
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_jump.sh42
-rw-r--r--tools/testing/selftests/kho/arm64.conf9
-rw-r--r--tools/testing/selftests/kho/init.c95
-rwxr-xr-xtools/testing/selftests/kho/vmtest.sh186
-rw-r--r--tools/testing/selftests/kho/x86.conf7
-rw-r--r--tools/testing/selftests/kmod/config5
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh13
-rw-r--r--tools/testing/selftests/kselftest.h230
-rw-r--r--tools/testing/selftests/kselftest/ksft.py93
-rw-r--r--tools/testing/selftests/kselftest/ktap_helpers.sh126
-rwxr-xr-xtools/testing/selftests/kselftest/module.sh2
-rw-r--r--tools/testing/selftests/kselftest/runner.sh122
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh92
-rw-r--r--tools/testing/selftests/kselftest_harness.h633
-rw-r--r--tools/testing/selftests/kselftest_harness/.gitignore2
-rw-r--r--tools/testing/selftests/kselftest_harness/Makefile8
-rw-r--r--tools/testing/selftests/kselftest_harness/harness-selftest.c136
-rw-r--r--tools/testing/selftests/kselftest_harness/harness-selftest.expected64
-rwxr-xr-xtools/testing/selftests/kselftest_harness/harness-selftest.sh13
-rw-r--r--tools/testing/selftests/kselftest_module.h22
-rw-r--r--tools/testing/selftests/kvm/.gitignore43
-rw-r--r--tools/testing/selftests/kvm/Makefile149
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm356
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c3
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c609
-rw-r--r--tools/testing/selftests/kvm/arch_timer.c252
-rw-r--r--tools/testing/selftests/kvm/arm64/aarch32_id_regs.c167
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer.c215
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c1059
-rw-r--r--tools/testing/selftests/kvm/arm64/at.c166
-rw-r--r--tools/testing/selftests/kvm/arm64/debug-exceptions.c607
-rw-r--r--tools/testing/selftests/kvm/arm64/external_aborts.c415
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c (renamed from tools/testing/selftests/kvm/aarch64/get-reg-list.c)829
-rw-r--r--tools/testing/selftests/kvm/arm64/hello_el2.c71
-rw-r--r--tools/testing/selftests/kvm/arm64/host_sve.c127
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c334
-rw-r--r--tools/testing/selftests/kvm/arm64/kvm-uuid.c70
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic-v3.c177
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c1135
-rw-r--r--tools/testing/selftests/kvm/arm64/psci_test.c291
-rw-r--r--tools/testing/selftests/kvm/arm64/sea_to_user.c331
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c813
-rw-r--r--tools/testing/selftests/kvm/arm64/smccc_filter.c281
-rw-r--r--tools/testing/selftests/kvm/arm64/vcpu_width_config.c121
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_init.c1021
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_irq.c1084
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c413
-rw-r--r--tools/testing/selftests/kvm/arm64/vpmu_counter_access.c643
-rw-r--r--tools/testing/selftests/kvm/coalesced_io_test.c236
-rw-r--r--tools/testing/selftests/kvm/config3
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c562
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c412
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c862
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c405
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c492
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c235
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c180
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h59
-rw-r--r--tools/testing/selftests/kvm/include/arm64/arch_timer.h182
-rw-r--r--tools/testing/selftests/kvm/include/arm64/delay.h25
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic.h65
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3.h604
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3_its.h20
-rw-r--r--tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h10
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h387
-rw-r--r--tools/testing/selftests/kvm/include/arm64/spinlock.h13
-rw-r--r--tools/testing/selftests/kvm/include/arm64/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/arm64/vgic.h40
-rw-r--r--tools/testing/selftests/kvm/include/guest_modes.h21
-rw-r--r--tools/testing/selftests/kvm/include/kvm_syscalls.h81
-rw-r--r--tools/testing/selftests/kvm/include/kvm_test_harness.h36
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1370
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_types.h20
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/arch_timer.h85
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/kvm_util_arch.h7
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/processor.h220
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/lru_gen_util.h51
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h82
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h83
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h194
-rw-r--r--tools/testing/selftests/kvm/include/riscv/arch_timer.h71
-rw-r--r--tools/testing/selftests/kvm/include/riscv/kvm_util_arch.h7
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h195
-rw-r--r--tools/testing/selftests/kvm/include/riscv/sbi.h141
-rw-r--r--tools/testing/selftests/kvm/include/riscv/ucall.h21
-rw-r--r--tools/testing/selftests/kvm/include/s390/debug_print.h69
-rw-r--r--tools/testing/selftests/kvm/include/s390/diag318_test_handler.h (renamed from tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h)0
-rw-r--r--tools/testing/selftests/kvm/include/s390/facility.h50
-rw-r--r--tools/testing/selftests/kvm/include/s390/kvm_util_arch.h7
-rw-r--r--tools/testing/selftests/kvm/include/s390/processor.h (renamed from tools/testing/selftests/kvm/include/s390x/processor.h)19
-rw-r--r--tools/testing/selftests/kvm/include/s390/sie.h240
-rw-r--r--tools/testing/selftests/kvm/include/s390/ucall.h19
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h56
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h203
-rw-r--r--tools/testing/selftests/kvm/include/timer_test.h45
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h116
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h52
-rw-r--r--tools/testing/selftests/kvm/include/x86/apic.h118
-rw-r--r--tools/testing/selftests/kvm/include/x86/evmcs.h (renamed from tools/testing/selftests/kvm/include/evmcs.h)248
-rw-r--r--tools/testing/selftests/kvm/include/x86/hyperv.h361
-rw-r--r--tools/testing/selftests/kvm/include/x86/kvm_util_arch.h51
-rw-r--r--tools/testing/selftests/kvm/include/x86/mce.h23
-rw-r--r--tools/testing/selftests/kvm/include/x86/pmu.h123
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h1497
-rw-r--r--tools/testing/selftests/kvm/include/x86/sev.h147
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm.h (renamed from tools/testing/selftests/kvm/include/x86_64/svm.h)41
-rw-r--r--tools/testing/selftests/kvm/include/x86/svm_util.h62
-rw-r--r--tools/testing/selftests/kvm/include/x86/ucall.h13
-rw-r--r--tools/testing/selftests/kvm/include/x86/vmx.h (renamed from tools/testing/selftests/kvm/include/x86_64/vmx.h)101
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h439
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h49
-rw-r--r--tools/testing/selftests/kvm/irqfd_test.c143
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c273
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c10
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c477
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c339
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c115
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic.c163
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_private.h33
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3.c449
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c265
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/handlers.S126
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c732
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/spinlock.c27
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/ucall.c34
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/vgic.c212
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c27
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c17
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c129
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c314
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c2361
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h117
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/exception.S65
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c389
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/ucall.c38
-rw-r--r--tools/testing/selftests/kvm/lib/lru_gen_util.c387
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c391
-rw-r--r--tools/testing/selftests/kvm/lib/rbtree.c1
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/handlers.S104
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c517
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c32
-rw-r--r--tools/testing/selftests/kvm/lib/s390/diag318_test_handler.c (renamed from tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c)14
-rw-r--r--tools/testing/selftests/kvm/lib/s390/facility.c14
-rw-r--r--tools/testing/selftests/kvm/lib/s390/processor.c (renamed from tools/testing/selftests/kvm/lib/s390x/processor.c)85
-rw-r--r--tools/testing/selftests/kvm/lib/s390/ucall.c22
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c59
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c56
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c48
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c337
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c163
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c206
-rw-r--r--tools/testing/selftests/kvm/lib/x86/apic.c43
-rw-r--r--tools/testing/selftests/kvm/lib/x86/handlers.S (renamed from tools/testing/selftests/kvm/lib/x86_64/handlers.S)4
-rw-r--r--tools/testing/selftests/kvm/lib/x86/hyperv.c113
-rw-r--r--tools/testing/selftests/kvm/lib/x86/memstress.c112
-rw-r--r--tools/testing/selftests/kvm/lib/x86/pmu.c80
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c1325
-rw-r--r--tools/testing/selftests/kvm/lib/x86/sev.c199
-rw-r--r--tools/testing/selftests/kvm/lib/x86/svm.c (renamed from tools/testing/selftests/kvm/lib/x86_64/svm.c)48
-rw-r--r--tools/testing/selftests/kvm/lib/x86/ucall.c56
-rw-r--r--tools/testing/selftests/kvm/lib/x86/vmx.c (renamed from tools/testing/selftests/kvm/lib/x86_64/vmx.c)276
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c1226
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c59
-rw-r--r--tools/testing/selftests/kvm/loongarch/arch_timer.c200
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c191
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c1146
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c425
-rw-r--r--tools/testing/selftests/kvm/pre_fault_memory_test.c239
-rw-r--r--tools/testing/selftests/kvm/riscv/arch_timer.c109
-rw-r--r--tools/testing/selftests/kvm/riscv/ebreak_test.c83
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c1302
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c731
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c325
-rw-r--r--tools/testing/selftests/kvm/s390/cmma_test.c695
-rw-r--r--tools/testing/selftests/kvm/s390/config2
-rw-r--r--tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c301
-rw-r--r--tools/testing/selftests/kvm/s390/debug_test.c160
-rw-r--r--tools/testing/selftests/kvm/s390/memop.c1187
-rw-r--r--tools/testing/selftests/kvm/s390/resets.c (renamed from tools/testing/selftests/kvm/s390x/resets.c)184
-rw-r--r--tools/testing/selftests/kvm/s390/shared_zeropage_test.c111
-rw-r--r--tools/testing/selftests/kvm/s390/sync_regs_test.c (renamed from tools/testing/selftests/kvm/s390x/sync_regs_test.c)155
-rw-r--r--tools/testing/selftests/kvm/s390/tprot.c244
-rw-r--r--tools/testing/selftests/kvm/s390/ucontrol_test.c798
-rw-r--r--tools/testing/selftests/kvm/s390/user_operexec.c140
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c166
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c392
-rw-r--r--tools/testing/selftests/kvm/settings1
-rw-r--r--tools/testing/selftests/kvm/steal_time.c318
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c127
-rw-r--r--tools/testing/selftests/kvm/x86/amx_test.c315
-rw-r--r--tools/testing/selftests/kvm/x86/aperfmperf_test.c213
-rw-r--r--tools/testing/selftests/kvm/x86/apic_bus_clock_test.c194
-rw-r--r--tools/testing/selftests/kvm/x86/cpuid_test.c225
-rw-r--r--tools/testing/selftests/kvm/x86/cr4_cpuid_sync_test.c100
-rw-r--r--tools/testing/selftests/kvm/x86/debug_regs.c (renamed from tools/testing/selftests/kvm/x86_64/debug_regs.c)112
-rw-r--r--tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c263
-rw-r--r--tools/testing/selftests/kvm/x86/exit_on_emulation_failure_test.c39
-rw-r--r--tools/testing/selftests/kvm/x86/fastops_test.c209
-rw-r--r--tools/testing/selftests/kvm/x86/feature_msrs_test.c113
-rw-r--r--tools/testing/selftests/kvm/x86/fix_hypercall_test.c142
-rw-r--r--tools/testing/selftests/kvm/x86/flds_emulation.h52
-rw-r--r--tools/testing/selftests/kvm/x86/hwcr_msr_test.c45
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_clock.c263
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_cpuid.c170
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_evmcs.c307
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_extended_hypercalls.c98
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_features.c695
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c310
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_svm_test.c199
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c680
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_buslock_test.c135
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_clock_test.c156
-rw-r--r--tools/testing/selftests/kvm/x86/kvm_pv_test.c218
-rw-r--r--tools/testing/selftests/kvm/x86/max_vcpuid_cap_test.c62
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c136
-rw-r--r--tools/testing/selftests/kvm/x86/msrs_test.c489
-rw-r--r--tools/testing/selftests/kvm/x86/nested_close_kvm_test.c104
-rw-r--r--tools/testing/selftests/kvm/x86/nested_emulation_test.c146
-rw-r--r--tools/testing/selftests/kvm/x86/nested_exceptions_test.c290
-rw-r--r--tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c116
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c)102
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c244
-rw-r--r--tools/testing/selftests/kvm/x86/nx_huge_pages_test.c266
-rwxr-xr-xtools/testing/selftests/kvm/x86/nx_huge_pages_test.sh69
-rw-r--r--tools/testing/selftests/kvm/x86/platform_info_test.c78
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c697
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_event_filter_test.c878
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_conversions_test.c480
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_kvm_exits_test.c120
-rw-r--r--tools/testing/selftests/kvm/x86/recalc_apic_map_test.c74
-rw-r--r--tools/testing/selftests/kvm/x86/set_boot_cpu_id.c146
-rw-r--r--tools/testing/selftests/kvm/x86/set_sregs_test.c158
-rw-r--r--tools/testing/selftests/kvm/x86/sev_init2_tests.c165
-rw-r--r--tools/testing/selftests/kvm/x86/sev_migrate_tests.c397
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c229
-rw-r--r--tools/testing/selftests/kvm/x86/smaller_maxphyaddr_emulation_test.c105
-rw-r--r--tools/testing/selftests/kvm/x86/smm_test.c (renamed from tools/testing/selftests/kvm/x86_64/smm_test.c)117
-rw-r--r--tools/testing/selftests/kvm/x86/state_test.c (renamed from tools/testing/selftests/kvm/x86_64/state_test.c)149
-rw-r--r--tools/testing/selftests/kvm/x86/svm_int_ctl_test.c115
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_shutdown_test.c59
-rw-r--r--tools/testing/selftests/kvm/x86/svm_nested_soft_inject_test.c210
-rw-r--r--tools/testing/selftests/kvm/x86/svm_vmcall_test.c (renamed from tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c)24
-rw-r--r--tools/testing/selftests/kvm/x86/sync_regs_test.c411
-rw-r--r--tools/testing/selftests/kvm/x86/triple_fault_event_test.c124
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_msrs_test.c (renamed from tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c)94
-rw-r--r--tools/testing/selftests/kvm/x86/tsc_scaling_sync.c110
-rw-r--r--tools/testing/selftests/kvm/x86/ucna_injection_test.c295
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_io_test.c103
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c (renamed from tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c)341
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_apic_access_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c)47
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c)107
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c142
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_invalid_nested_guest_state.c103
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_msrs_test.c131
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c132
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c248
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_preemption_timer_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c)45
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_set_nested_state_test.c (renamed from tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c)117
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c500
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c262
-rw-r--r--tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c139
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c1145
-rw-r--r--tools/testing/selftests/kvm/x86/xen_vmcall_test.c143
-rw-r--r--tools/testing/selftests/kvm/x86/xss_msr_test.c54
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c116
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c164
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c201
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c234
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c126
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c107
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c142
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c243
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c86
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c76
-rw-r--r--tools/testing/selftests/landlock/.gitignore5
-rw-r--r--tools/testing/selftests/landlock/Makefile27
-rw-r--r--tools/testing/selftests/landlock/audit.h478
-rw-r--r--tools/testing/selftests/landlock/audit_test.c672
-rw-r--r--tools/testing/selftests/landlock/base_test.c529
-rw-r--r--tools/testing/selftests/landlock/common.h252
-rw-r--r--tools/testing/selftests/landlock/config19
-rw-r--r--tools/testing/selftests/landlock/config.um1
-rw-r--r--tools/testing/selftests/landlock/fs_test.c7650
-rw-r--r--tools/testing/selftests/landlock/net_test.c2003
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c577
-rw-r--r--tools/testing/selftests/landlock/sandbox-and-launch.c82
-rw-r--r--tools/testing/selftests/landlock/scoped_abstract_unix_test.c1152
-rw-r--r--tools/testing/selftests/landlock/scoped_base_variants.h156
-rw-r--r--tools/testing/selftests/landlock/scoped_common.h28
-rw-r--r--tools/testing/selftests/landlock/scoped_multiple_domain_variants.h152
-rw-r--r--tools/testing/selftests/landlock/scoped_signal_test.c562
-rw-r--r--tools/testing/selftests/landlock/scoped_test.c33
-rw-r--r--tools/testing/selftests/landlock/true.c5
-rw-r--r--tools/testing/selftests/landlock/wait-pipe-sandbox.c131
-rw-r--r--tools/testing/selftests/landlock/wait-pipe.c42
-rw-r--r--tools/testing/selftests/landlock/wrappers.h47
-rw-r--r--tools/testing/selftests/lib.mk184
-rw-r--r--tools/testing/selftests/lib/Makefile3
-rw-r--r--tools/testing/selftests/lib/config2
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh4
-rwxr-xr-xtools/testing/selftests/lib/printf.sh4
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh3
-rw-r--r--tools/testing/selftests/livepatch/.gitignore1
-rw-r--r--tools/testing/selftests/livepatch/Makefile7
-rw-r--r--tools/testing/selftests/livepatch/README25
-rw-r--r--tools/testing/selftests/livepatch/config1
-rw-r--r--tools/testing/selftests/livepatch/functions.sh217
-rwxr-xr-xtools/testing/selftests/livepatch/test-callbacks.sh76
-rwxr-xr-xtools/testing/selftests/livepatch/test-ftrace.sh45
-rwxr-xr-xtools/testing/selftests/livepatch/test-kprobe.sh64
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh143
-rwxr-xr-xtools/testing/selftests/livepatch/test-shadow-vars.sh2
-rwxr-xr-xtools/testing/selftests/livepatch/test-state.sh26
-rwxr-xr-xtools/testing/selftests/livepatch/test-syscall.sh56
-rwxr-xr-xtools/testing/selftests/livepatch/test-sysfs.sh205
-rw-r--r--tools/testing/selftests/livepatch/test_klp-call_getpid.c44
-rw-r--r--tools/testing/selftests/livepatch/test_modules/Makefile27
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_atomic_replace.c57
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_busy.c70
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_demo.c121
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_demo2.c93
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_callbacks_mod.c24
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_kprobe.c38
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_livepatch.c51
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_shadow_vars.c301
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_state.c162
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_state2.c191
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_state3.c5
-rw-r--r--tools/testing/selftests/livepatch/test_modules/test_klp_syscall.c116
-rw-r--r--tools/testing/selftests/liveupdate/.gitignore9
-rw-r--r--tools/testing/selftests/liveupdate/Makefile34
-rw-r--r--tools/testing/selftests/liveupdate/config11
-rwxr-xr-xtools/testing/selftests/liveupdate/do_kexec.sh16
-rw-r--r--tools/testing/selftests/liveupdate/liveupdate.c348
-rw-r--r--tools/testing/selftests/liveupdate/luo_kexec_simple.c89
-rw-r--r--tools/testing/selftests/liveupdate/luo_multi_session.c162
-rw-r--r--tools/testing/selftests/liveupdate/luo_test_utils.c266
-rw-r--r--tools/testing/selftests/liveupdate/luo_test_utils.h44
-rw-r--r--tools/testing/selftests/lkdtm/.gitignore1
-rw-r--r--tools/testing/selftests/lkdtm/Makefile1
-rw-r--r--tools/testing/selftests/lkdtm/config13
-rwxr-xr-xtools/testing/selftests/lkdtm/run.sh20
-rwxr-xr-xtools/testing/selftests/lkdtm/stack-entropy.sh51
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt31
-rw-r--r--tools/testing/selftests/lsm/.gitignore1
-rw-r--r--tools/testing/selftests/lsm/Makefile17
-rw-r--r--tools/testing/selftests/lsm/common.c89
-rw-r--r--tools/testing/selftests/lsm/common.h33
-rw-r--r--tools/testing/selftests/lsm/config3
-rw-r--r--tools/testing/selftests/lsm/lsm_get_self_attr_test.c275
-rw-r--r--tools/testing/selftests/lsm/lsm_list_modules_test.c146
-rw-r--r--tools/testing/selftests/lsm/lsm_set_self_attr_test.c73
-rw-r--r--tools/testing/selftests/media_tests/Makefile2
-rw-r--r--tools/testing/selftests/media_tests/media_device_open.c2
-rw-r--r--tools/testing/selftests/media_tests/media_device_test.c2
-rw-r--r--tools/testing/selftests/media_tests/regression_test.txt8
-rw-r--r--tools/testing/selftests/media_tests/video_device_test.c111
-rw-r--r--tools/testing/selftests/membarrier/Makefile2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_impl.h35
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_multi_thread.c4
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_single_thread.c8
-rw-r--r--tools/testing/selftests/memfd/Makefile4
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c3
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c606
-rw-r--r--tools/testing/selftests/memory-hotplug/config1
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh35
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c48
-rw-r--r--tools/testing/selftests/mm/.gitignore62
-rw-r--r--tools/testing/selftests/mm/Makefile257
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/mm/charge_reserved_hugetlb.sh (renamed from tools/testing/selftests/vm/charge_reserved_hugetlb.sh)47
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/mm/check_config.sh (renamed from tools/testing/selftests/vm/check_config.sh)14
-rw-r--r--tools/testing/selftests/mm/compaction_test.c276
-rw-r--r--tools/testing/selftests/mm/config13
-rw-r--r--tools/testing/selftests/mm/cow.c1897
-rw-r--r--tools/testing/selftests/mm/droppable.c53
-rw-r--r--tools/testing/selftests/mm/guard-regions.c2326
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c524
-rw-r--r--tools/testing/selftests/mm/gup_test.c260
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c2855
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c (renamed from tools/testing/selftests/vm/hugepage-mmap.c)49
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c178
-rw-r--r--tools/testing/selftests/mm/hugepage-shm.c (renamed from tools/testing/selftests/vm/hugepage-shm.c)22
-rw-r--r--tools/testing/selftests/mm/hugepage-vmemmap.c134
-rw-r--r--tools/testing/selftests/mm/hugetlb-madvise.c367
-rw-r--r--tools/testing/selftests/mm/hugetlb-read-hwpoison.c322
-rw-r--r--tools/testing/selftests/mm/hugetlb-soft-offline.c228
-rw-r--r--tools/testing/selftests/mm/hugetlb_dio.c125
-rw-r--r--tools/testing/selftests/mm/hugetlb_fault_after_madv.c109
-rw-r--r--tools/testing/selftests/mm/hugetlb_madv_vs_map.c126
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/mm/hugetlb_reparenting_test.sh (renamed from tools/testing/selftests/vm/hugetlb_reparenting_test.sh)131
-rw-r--r--tools/testing/selftests/mm/khugepaged.c1290
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c759
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c926
-rw-r--r--tools/testing/selftests/mm/madv_populate.c294
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c201
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c88
-rw-r--r--tools/testing/selftests/mm/map_populate.c (renamed from tools/testing/selftests/vm/map_populate.c)48
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c303
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c346
-rw-r--r--tools/testing/selftests/mm/merge.c1174
-rw-r--r--tools/testing/selftests/mm/migration.c313
-rw-r--r--tools/testing/selftests/mm/mkdirty.c380
-rw-r--r--tools/testing/selftests/mm/mlock-random-test.c (renamed from tools/testing/selftests/vm/mlock-random-test.c)137
-rw-r--r--tools/testing/selftests/mm/mlock2-tests.c (renamed from tools/testing/selftests/vm/mlock2-tests.c)296
-rw-r--r--tools/testing/selftests/mm/mlock2.h (renamed from tools/testing/selftests/vm/mlock2.h)27
-rw-r--r--tools/testing/selftests/mm/mrelease_test.c184
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c (renamed from tools/testing/selftests/vm/mremap_dontunmap.c)91
-rw-r--r--tools/testing/selftests/mm/mremap_test.c1485
-rw-r--r--tools/testing/selftests/mm/mseal_helpers.h41
-rw-r--r--tools/testing/selftests/mm/mseal_test.c1989
-rw-r--r--tools/testing/selftests/mm/on-fault-limit.c42
-rw-r--r--tools/testing/selftests/mm/page_frag/Makefile18
-rw-r--r--tools/testing/selftests/mm/page_frag/page_frag_test.c198
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c1738
-rw-r--r--tools/testing/selftests/mm/pfnmap.c269
-rw-r--r--tools/testing/selftests/mm/pkey-arm64.h140
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h (renamed from tools/testing/selftests/vm/pkey-helpers.h)80
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h (renamed from tools/testing/selftests/vm/pkey-powerpc.h)24
-rw-r--r--tools/testing/selftests/mm/pkey-x86.h (renamed from tools/testing/selftests/vm/pkey-x86.h)66
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c546
-rw-r--r--tools/testing/selftests/mm/pkey_util.c41
-rw-r--r--tools/testing/selftests/mm/prctl_thp_disable.c291
-rw-r--r--tools/testing/selftests/mm/process_madv.c344
-rw-r--r--tools/testing/selftests/mm/protection_keys.c (renamed from tools/testing/selftests/vm/protection_keys.c)556
-rw-r--r--tools/testing/selftests/mm/rmap.c433
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh553
-rw-r--r--tools/testing/selftests/mm/settings1
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c348
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c837
-rwxr-xr-xtools/testing/selftests/mm/test_hmm.sh (renamed from tools/testing/selftests/vm/test_hmm.sh)24
-rwxr-xr-xtools/testing/selftests/mm/test_page_frag.sh175
-rwxr-xr-xtools/testing/selftests/mm/test_vmalloc.sh (renamed from tools/testing/selftests/vm/test_vmalloc.sh)27
-rw-r--r--tools/testing/selftests/mm/thp_settings.c401
-rw-r--r--tools/testing/selftests/mm/thp_settings.h90
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c (renamed from tools/testing/selftests/vm/thuge-gen.c)215
-rw-r--r--tools/testing/selftests/mm/transhuge-stress.c138
-rw-r--r--tools/testing/selftests/mm/uffd-common.c745
-rw-r--r--tools/testing/selftests/mm/uffd-common.h139
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c521
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c1813
-rw-r--r--tools/testing/selftests/mm/uffd-wp-mremap.c380
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c336
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh117
-rw-r--r--tools/testing/selftests/mm/virtual_address_range.c260
-rw-r--r--tools/testing/selftests/mm/vm_util.c725
-rw-r--r--tools/testing/selftests/mm/vm_util.h158
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/mm/write_hugetlb_memory.sh (renamed from tools/testing/selftests/vm/write_hugetlb_memory.sh)4
-rw-r--r--tools/testing/selftests/mm/write_to_hugetlbfs.c (renamed from tools/testing/selftests/vm/write_to_hugetlbfs.c)23
-rw-r--r--tools/testing/selftests/module/Makefile (renamed from tools/testing/selftests/user/Makefile)7
-rw-r--r--tools/testing/selftests/module/config3
-rwxr-xr-xtools/testing/selftests/module/find_symbol.sh81
-rw-r--r--tools/testing/selftests/mount/unprivileged-remount-test.c4
-rw-r--r--tools/testing/selftests/mount_setattr/.gitignore1
-rw-r--r--tools/testing/selftests/mount_setattr/Makefile9
-rw-r--r--tools/testing/selftests/mount_setattr/config1
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c2140
-rw-r--r--tools/testing/selftests/move_mount_set_group/.gitignore1
-rw-r--r--tools/testing/selftests/move_mount_set_group/Makefile7
-rw-r--r--tools/testing/selftests/move_mount_set_group/config1
-rw-r--r--tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c375
-rw-r--r--tools/testing/selftests/mqueue/mq_open_tests.c2
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c37
-rw-r--r--tools/testing/selftests/mqueue/setting1
-rw-r--r--tools/testing/selftests/mseal_system_mappings/.gitignore2
-rw-r--r--tools/testing/selftests/mseal_system_mappings/Makefile6
-rw-r--r--tools/testing/selftests/mseal_system_mappings/config1
-rw-r--r--tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c119
-rw-r--r--tools/testing/selftests/namespaces/.gitignore12
-rw-r--r--tools/testing/selftests/namespaces/Makefile29
-rw-r--r--tools/testing/selftests/namespaces/config7
-rw-r--r--tools/testing/selftests/namespaces/cred_change_test.c814
-rw-r--r--tools/testing/selftests/namespaces/file_handle_test.c1429
-rw-r--r--tools/testing/selftests/namespaces/init_ino_test.c61
-rw-r--r--tools/testing/selftests/namespaces/listns_efault_test.c530
-rw-r--r--tools/testing/selftests/namespaces/listns_pagination_bug.c138
-rw-r--r--tools/testing/selftests/namespaces/listns_permissions_test.c759
-rw-r--r--tools/testing/selftests/namespaces/listns_test.c679
-rw-r--r--tools/testing/selftests/namespaces/ns_active_ref_test.c2672
-rw-r--r--tools/testing/selftests/namespaces/nsid_test.c981
-rw-r--r--tools/testing/selftests/namespaces/regression_pidfd_setns_test.c113
-rw-r--r--tools/testing/selftests/namespaces/siocgskns_test.c1824
-rw-r--r--tools/testing/selftests/namespaces/stress_test.c626
-rw-r--r--tools/testing/selftests/namespaces/wrappers.h35
-rw-r--r--tools/testing/selftests/nci/.gitignore1
-rw-r--r--tools/testing/selftests/nci/Makefile6
-rw-r--r--tools/testing/selftests/nci/config3
-rw-r--r--tools/testing/selftests/nci/nci_dev.c904
-rw-r--r--tools/testing/selftests/net/.gitignore61
-rw-r--r--tools/testing/selftests/net/Makefile246
-rw-r--r--tools/testing/selftests/net/af_unix/.gitignore8
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile14
-rw-r--r--tools/testing/selftests/net/af_unix/config3
-rw-r--r--tools/testing/selftests/net/af_unix/diag_uid.c177
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c891
-rw-r--r--tools/testing/selftests/net/af_unix/scm_inq.c123
-rw-r--r--tools/testing/selftests/net/af_unix/scm_pidfd.c556
-rw-r--r--tools/testing/selftests/net/af_unix/scm_rights.c381
-rw-r--r--tools/testing/selftests/net/af_unix/so_peek_off.c162
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connect.c148
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connreset.c180
-rwxr-xr-xtools/testing/selftests/net/altnames.sh2
-rwxr-xr-xtools/testing/selftests/net/amt.sh298
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh213
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_untracked_subnets.sh283
-rwxr-xr-xtools/testing/selftests/net/bareudp.sh51
-rw-r--r--tools/testing/selftests/net/bench/Makefile7
-rw-r--r--tools/testing/selftests/net/bench/page_pool/Makefile17
-rw-r--r--tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c267
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.c394
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.h238
-rwxr-xr-xtools/testing/selftests/net/bench/test_bench_page_pool.sh32
-rwxr-xr-xtools/testing/selftests/net/big_tcp.sh182
-rw-r--r--tools/testing/selftests/net/bind_bhash.c144
-rwxr-xr-xtools/testing/selftests/net/bind_bhash.sh68
-rw-r--r--tools/testing/selftests/net/bind_timewait.c92
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c809
-rw-r--r--tools/testing/selftests/net/bpf.mk53
-rwxr-xr-xtools/testing/selftests/net/bpf_offload.py (renamed from tools/testing/selftests/bpf/test_offload.py)223
-rwxr-xr-xtools/testing/selftests/net/broadcast_ether_dst.sh83
-rwxr-xr-xtools/testing/selftests/net/broadcast_pmtu.sh47
-rwxr-xr-xtools/testing/selftests/net/busy_poll_test.sh187
-rw-r--r--tools/testing/selftests/net/busy_poller.c368
-rw-r--r--tools/testing/selftests/net/can/.gitignore2
-rw-r--r--tools/testing/selftests/net/can/Makefile11
-rw-r--r--tools/testing/selftests/net/can/config3
-rw-r--r--tools/testing/selftests/net/can/test_raw_filter.c405
-rwxr-xr-xtools/testing/selftests/net/can/test_raw_filter.sh45
-rwxr-xr-xtools/testing/selftests/net/cmsg_ip.sh187
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c583
-rwxr-xr-xtools/testing/selftests/net/cmsg_so_mark.sh78
-rwxr-xr-xtools/testing/selftests/net/cmsg_so_priority.sh151
-rwxr-xr-xtools/testing/selftests/net/cmsg_time.sh102
-rw-r--r--tools/testing/selftests/net/config147
-rwxr-xr-xtools/testing/selftests/net/drop_monitor_tests.sh23
-rw-r--r--tools/testing/selftests/net/epoll_busy_poll.c320
-rwxr-xr-xtools/testing/selftests/net/fcnal-ipv4.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-ipv6.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-other.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh1260
-rwxr-xr-xtools/testing/selftests/net/fdb_flush.sh813
-rwxr-xr-xtools/testing/selftests/net/fdb_notify.sh96
-rwxr-xr-xtools/testing/selftests/net/fib-onlink-tests.sh9
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_multiprefix.sh98
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_nongw.sh115
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh1070
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh644
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1222
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile96
-rw-r--r--tools/testing/selftests/net/forwarding/README50
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_activity_notify.sh170
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh301
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh387
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh99
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_locked_port.sh365
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh1467
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_host.sh103
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_max.sh1347
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh118
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh97
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh153
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh546
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh30
-rw-r--r--tools/testing/selftests/net/forwarding/config54
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh372
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh367
-rw-r--r--tools/testing/selftests/net/forwarding/fib_offload_lib.sh14
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample41
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh464
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh.sh41
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh323
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh174
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh466
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_key.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_key.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh6
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh518
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh1286
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib_sh_test.sh215
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh597
-rwxr-xr-xtools/testing/selftests/net/forwarding/min_max_mtu.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre.sh45
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bound.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh28
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh28
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh42
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh76
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh66
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh92
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_neigh.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh35
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan.sh21
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh82
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh101
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_topo_lib.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_vlan.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/no_forwarding.sh264
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_dsfield.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_ip.sh201
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_l4port.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/q_in_vni.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/q_in_vni_ipv6.sh347
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh47
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge.sh79
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_1d.sh185
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_1d_lag.sh409
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_lag.sh324
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_pvid_vlan_upper.sh155
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan.sh102
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan_upper.sh169
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan_upper_pvid.sh171
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh125
-rw-r--r--tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh490
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_seed.sh333
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh127
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh47
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_nh.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_vid_1.sh27
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_ets.sh8
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_core.sh84
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh41
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_red.sh118
-rw-r--r--tools/testing/selftests/net/forwarding/sch_tbf_core.sh95
-rw-r--r--tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_tbf_root.sh7
-rw-r--r--tools/testing/selftests/net/forwarding/settings1
-rwxr-xr-xtools/testing/selftests/net/forwarding/skbedit_priority.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh143
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh12
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh402
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_cfm.sh206
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh357
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_port_range.sh274
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_police.sh130
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_taprio.sh421
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_tunnel_key.sh162
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh275
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric_ipv6.sh504
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh804
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472_ipv6.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh45
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh837
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh766
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472_ipv6.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_reserved.sh347
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric_ipv6.sh563
-rwxr-xr-xtools/testing/selftests/net/fq_band_pktlimit.sh59
-rwxr-xr-xtools/testing/selftests/net/gre_gso.sh235
-rwxr-xr-xtools/testing/selftests/net/gre_ipv6_lladdr.sh184
-rw-r--r--tools/testing/selftests/net/hsr/Makefile12
-rw-r--r--tools/testing/selftests/net/hsr/config6
-rw-r--r--tools/testing/selftests/net/hsr/hsr_common.sh84
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh289
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_redbox.sh136
-rw-r--r--tools/testing/selftests/net/hsr/settings1
-rw-r--r--tools/testing/selftests/net/hwtstamp_config.c6
-rwxr-xr-xtools/testing/selftests/net/icmp.sh72
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh195
-rw-r--r--tools/testing/selftests/net/io_uring_zerocopy_tx.c320
-rwxr-xr-xtools/testing/selftests/net/io_uring_zerocopy_tx.sh126
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh1683
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c1101
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c463
-rwxr-xr-xtools/testing/selftests/net/ip_local_port_range.sh7
-rw-r--r--tools/testing/selftests/net/ipsec.c281
-rw-r--r--tools/testing/selftests/net/ipv6_flowlabel.c75
-rwxr-xr-xtools/testing/selftests/net/ipv6_flowlabel.sh16
-rwxr-xr-xtools/testing/selftests/net/ipv6_force_forwarding.sh105
-rw-r--r--tools/testing/selftests/net/ipv6_fragmentation.c114
-rwxr-xr-xtools/testing/selftests/net/ipv6_route_update_soft_lockup.sh261
-rwxr-xr-xtools/testing/selftests/net/l2_tos_ttl_inherit.sh446
-rwxr-xr-xtools/testing/selftests/net/l2tp.sh130
-rw-r--r--tools/testing/selftests/net/lib.sh671
-rw-r--r--tools/testing/selftests/net/lib/.gitignore3
-rw-r--r--tools/testing/selftests/net/lib/Makefile24
-rw-r--r--tools/testing/selftests/net/lib/csum.c1010
-rw-r--r--tools/testing/selftests/net/lib/ksft.h56
-rwxr-xr-xtools/testing/selftests/net/lib/ksft_setup_loopback.sh111
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py33
-rw-r--r--tools/testing/selftests/net/lib/py/consts.py9
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py370
-rw-r--r--tools/testing/selftests/net/lib/py/netns.py49
-rw-r--r--tools/testing/selftests/net/lib/py/nsim.py135
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py278
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py68
-rw-r--r--tools/testing/selftests/net/lib/sh/defer.sh131
-rw-r--r--tools/testing/selftests/net/lib/xdp_dummy.bpf.c19
-rw-r--r--tools/testing/selftests/net/lib/xdp_helper.c131
-rw-r--r--tools/testing/selftests/net/lib/xdp_native.bpf.c680
-rwxr-xr-xtools/testing/selftests/net/link_netns.py141
-rwxr-xr-xtools/testing/selftests/net/lwt_dst_cache_ref_loop.sh246
-rw-r--r--tools/testing/selftests/net/mptcp/.gitignore3
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile32
-rw-r--r--tools/testing/selftests/net/mptcp/config39
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh418
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c904
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh668
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh5
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_diag.c435
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c613
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh4771
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh760
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c884
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh371
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh304
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c1010
-rw-r--r--tools/testing/selftests/net/mptcp/settings2
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh197
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh945
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c36
-rwxr-xr-xtools/testing/selftests/net/msg_zerocopy.sh93
-rw-r--r--tools/testing/selftests/net/nat6to4.bpf.c285
-rwxr-xr-xtools/testing/selftests/net/nat6to4.sh15
-rwxr-xr-xtools/testing/selftests/net/ndisc_unsolicited_na_test.sh249
-rwxr-xr-xtools/testing/selftests/net/netdev-l2addr.sh59
-rwxr-xr-xtools/testing/selftests/net/netdevice.sh60
-rw-r--r--tools/testing/selftests/net/netfilter/.gitignore8
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile73
-rw-r--r--tools/testing/selftests/net/netfilter/audit_logread.c165
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter.sh175
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter_queue.sh85
-rwxr-xr-xtools/testing/selftests/net/netfilter/bridge_brouter.sh120
-rw-r--r--tools/testing/selftests/net/netfilter/config101
-rw-r--r--tools/testing/selftests/net/netfilter/connect_close.c136
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_clash.sh174
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_dump_flush.c476
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_dump_flush.sh3
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_icmp_related.sh278
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_ipip_mtu.sh191
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_resize.sh515
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c125
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh51
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_sctp_collision.sh87
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_tcp_unreplied.sh164
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_vrf.sh216
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh205
-rw-r--r--tools/testing/selftests/net/netfilter/lib.sh10
-rwxr-xr-xtools/testing/selftests/net/netfilter/nf_conntrack_packetdrill.sh71
-rwxr-xr-xtools/testing/selftests/net/netfilter/nf_nat_edemux.sh121
-rw-r--r--tools/testing/selftests/net/netfilter/nf_queue.c (renamed from tools/testing/selftests/netfilter/nf-queue.c)0
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_audit.sh269
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh (renamed from tools/testing/selftests/netfilter/nft_concat_range.sh)713
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range_perf.sh9
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_conntrack_helper.sh171
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_fib.sh850
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_flowtable.sh811
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_interface_stress.sh157
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_meta.sh (renamed from tools/testing/selftests/netfilter/nft_meta.sh)6
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat.sh1227
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat_zones.sh265
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh672
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_synproxy.sh96
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh358
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_udp.sh262
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_zones_many.sh164
-rwxr-xr-xtools/testing/selftests/net/netfilter/packetdrill/common.sh33
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_ack_loss_stall.pkt118
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_inexact_rst.pkt62
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_rst_invalid.pkt59
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt44
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_synack_old.pkt51
-rw-r--r--tools/testing/selftests/net/netfilter/packetdrill/conntrack_synack_reuse.pkt34
-rwxr-xr-xtools/testing/selftests/net/netfilter/rpath.sh181
-rw-r--r--tools/testing/selftests/net/netfilter/sctp_collision.c100
-rw-r--r--tools/testing/selftests/net/netfilter/settings1
-rw-r--r--tools/testing/selftests/net/netfilter/udpclash.c158
-rwxr-xr-xtools/testing/selftests/net/netfilter/vxlan_mtu_frag.sh121
-rwxr-xr-xtools/testing/selftests/net/netfilter/xt_string.sh133
-rw-r--r--tools/testing/selftests/net/netlink-dumps.c263
-rwxr-xr-xtools/testing/selftests/net/netns-name.sh102
-rwxr-xr-xtools/testing/selftests/net/netns-sysctl.sh40
-rw-r--r--tools/testing/selftests/net/nettest.c815
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py254
-rw-r--r--tools/testing/selftests/net/openvswitch/Makefile13
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh948
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py2775
-rw-r--r--tools/testing/selftests/net/openvswitch/settings1
-rw-r--r--tools/testing/selftests/net/ovpn/.gitignore2
-rw-r--r--tools/testing/selftests/net/ovpn/Makefile34
-rw-r--r--tools/testing/selftests/net/ovpn/common.sh108
-rw-r--r--tools/testing/selftests/net/ovpn/config10
-rw-r--r--tools/testing/selftests/net/ovpn/data64.key5
-rw-r--r--tools/testing/selftests/net/ovpn/ovpn-cli.c2387
-rw-r--r--tools/testing/selftests/net/ovpn/tcp_peers.txt5
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-chachapoly.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-close-socket.sh45
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-float.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-large-mtu.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-tcp.sh9
-rwxr-xr-xtools/testing/selftests/net/ovpn/test.sh117
-rw-r--r--tools/testing/selftests/net/ovpn/udp_peers.txt6
-rw-r--r--tools/testing/selftests/net/packetdrill/Makefile12
-rw-r--r--tools/testing/selftests/net/packetdrill/config11
-rwxr-xr-xtools/testing/selftests/net/packetdrill/defaults.sh64
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh62
-rwxr-xr-xtools/testing/selftests/net/packetdrill/set_sysctls.py38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-accept.pkt18
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-connect.pkt13
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt31
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-write.pkt35
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_close_close-local-close-then-remote-fin.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_close_close-on-syn-sent.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_close_close-remote-fin-then-close.pkt36
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_close_no_rst.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt45
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ecn_ecn-uses-ect0.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_eor_no-coalesce-large.pkt38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_eor_no-coalesce-retrans.pkt72
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_eor_no-coalesce-small.pkt36
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_eor_no-coalesce-subsequent.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fast_recovery_prr-ss-10pkt-lost-1.pkt72
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fast_recovery_prr-ss-30pkt-lost-1_4-11_16.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fast_recovery_prr-ss-30pkt-lost1_4.pkt43
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fast_recovery_prr-ss-ack-below-snd_una-cubic.pkt41
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-cookie-not-reqd.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-no-setsockopt.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-non-tfo-listener.pkt26
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-pure-syn-data.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-rw.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-zero-payload.pkt26
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_client-ack-dropped-then-recovery-ms-timestamps.pkt46
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_experimental_option.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_fin-close-socket.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_icmp-before-accept.pkt49
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-accept.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-before-accept.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-close-with-unread-data.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-non-tfo-socket.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_sockopt-fastopen-key.pkt74
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-listener-closed.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-reconnect.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-unread-data-closed.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt54
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt54
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_limited_transmit_limited-transmit-no-sack.pkt53
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_limited_transmit_limited-transmit-sack.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_md5_md5-only-on-client-ack.pkt28
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_nagle_https_client.pkt40
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_nagle_sendmsg_msg_more.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_nagle_sockopt_cork_nodelay.pkt43
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt53
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt27
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt33
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rto_synack_rto_max.pkt54
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_sack_sack-route-refresh-ip-tos.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_sack_sack-shift-sacked-2-6-8-3-9-nofack.pkt64
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_sack_sack-shift-sacked-7-3-4-8-9-fack.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_sack_sack-shift-sacked-7-5-6-8-9-fack.pkt62
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_sendfile_sendfile-simple.pkt26
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt56
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt33
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt34
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt42
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt35
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt39
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt36
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt63
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_splice_tcp_splice_loop_test.pkt20
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_fastopen-invalid-buf-ptr.pkt42
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt34
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_syscall-invalid-buf-ptr.pkt25
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_tcp_info_tcp-info-last_data_recv.pkt20
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_tcp_info_tcp-info-rwnd-limited.pkt54
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_tcp_info_tcp-info-sndbuf-limited.pkt38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_timestamping_client-only-last-byte.pkt92
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_timestamping_partial.pkt91
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_timestamping_server.pkt145
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ts_recent_fin_tsval.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ts_recent_invalid_ack.pkt25
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ts_recent_reset_tsval.pkt25
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt39
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_user_timeout_user_timeout.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_validate_validate-established-no-flags.pkt24
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt57
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt43
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt46
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt64
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt69
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt58
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt46
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt120
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt59
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh753
-rw-r--r--tools/testing/selftests/net/proc_net_pktgen.c690
-rw-r--r--tools/testing/selftests/net/psock_fanout.c93
-rw-r--r--tools/testing/selftests/net/psock_lib.h8
-rw-r--r--tools/testing/selftests/net/psock_snd.c2
-rwxr-xr-xtools/testing/selftests/net/psock_snd.sh3
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c8
-rw-r--r--tools/testing/selftests/net/rds/.gitignore1
-rw-r--r--tools/testing/selftests/net/rds/Makefile18
-rw-r--r--tools/testing/selftests/net/rds/README.txt41
-rwxr-xr-xtools/testing/selftests/net/rds/config.sh53
-rwxr-xr-xtools/testing/selftests/net/rds/run.sh224
-rwxr-xr-xtools/testing/selftests/net/rds/test.py265
-rw-r--r--tools/testing/selftests/net/reuseaddr_conflict.c2
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c36
-rw-r--r--tools/testing/selftests/net/reuseport_addr_any.c36
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c6
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c8
-rwxr-xr-xtools/testing/selftests/net/route_hint.sh79
-rwxr-xr-xtools/testing/selftests/net/route_localnet.sh6
-rwxr-xr-xtools/testing/selftests/net/rps_default_mask.sh79
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.py30
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh1378
-rwxr-xr-xtools/testing/selftests/net/rtnetlink_notification.sh112
-rwxr-xr-xtools/testing/selftests/net/run_afpackettests5
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c20
-rw-r--r--tools/testing/selftests/net/sample_map_ret0.bpf.c (renamed from tools/testing/selftests/bpf/progs/sample_map_ret0.c)26
-rw-r--r--tools/testing/selftests/net/sample_ret0.bpf.c (renamed from tools/testing/selftests/bpf/progs/sample_ret0.c)3
-rw-r--r--tools/testing/selftests/net/sctp_hello.c124
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh189
-rw-r--r--tools/testing/selftests/net/settings1
-rw-r--r--tools/testing/selftests/net/sk_bind_sendto_listen.c80
-rw-r--r--tools/testing/selftests/net/sk_connect_zero_addr.c62
-rw-r--r--tools/testing/selftests/net/sk_so_peek_off.c202
-rw-r--r--tools/testing/selftests/net/skf_net_off.c244
-rwxr-xr-xtools/testing/selftests/net/skf_net_off.sh30
-rw-r--r--tools/testing/selftests/net/so_incoming_cpu.c274
-rw-r--r--tools/testing/selftests/net/so_netns_cookie.c61
-rw-r--r--tools/testing/selftests/net/so_rcv_listener.c168
-rw-r--r--tools/testing/selftests/net/so_txtime.c258
-rwxr-xr-xtools/testing/selftests/net/so_txtime.sh120
-rw-r--r--tools/testing/selftests/net/socket.c14
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh568
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh59
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh51
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx4_netfilter_test.sh335
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dx6_netfilter_test.sh340
-rwxr-xr-xtools/testing/selftests/net/srv6_end_flavors_test.sh869
-rwxr-xr-xtools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh1104
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh1220
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh837
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh780
-rw-r--r--tools/testing/selftests/net/stress_reuseport_listen.c105
-rwxr-xr-xtools/testing/selftests/net/stress_reuseport_listen.sh25
-rw-r--r--tools/testing/selftests/net/tap.c434
-rw-r--r--tools/testing/selftests/net/tcp_ao/.gitignore2
-rw-r--r--tools/testing/selftests/net/tcp_ao/Makefile57
-rw-r--r--tools/testing/selftests/net/tcp_ao/bench-lookups.c360
-rw-r--r--tools/testing/selftests/net/tcp_ao/config11
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect-deny.c291
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect.c90
l---------tools/testing/selftests/net/tcp_ao/icmps-accept.c1
-rw-r--r--tools/testing/selftests/net/tcp_ao/icmps-discard.c448
-rw-r--r--tools/testing/selftests/net/tcp_ao/key-management.c1198
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/aolib.h832
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c556
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/ftrace.c543
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/kconfig.c157
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/netlink.c413
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/proc.c273
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/repair.c254
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/setup.c368
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/sock.c730
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/utils.c56
-rw-r--r--tools/testing/selftests/net/tcp_ao/restore.c251
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c459
-rw-r--r--tools/testing/selftests/net/tcp_ao/self-connect.c191
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c255
-rw-r--r--tools/testing/selftests/net/tcp_ao/setsockopt-closed.c1011
-rw-r--r--tools/testing/selftests/net/tcp_ao/settings1
-rw-r--r--tools/testing/selftests/net/tcp_ao/unsigned-md5.c772
-rw-r--r--tools/testing/selftests/net/tcp_fastopen_backup_key.c6
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c123
-rw-r--r--tools/testing/selftests/net/tcp_port_share.c258
-rwxr-xr-xtools/testing/selftests/net/test_blackhole_dev.sh11
-rwxr-xr-xtools/testing/selftests/net/test_bridge_backup_port.sh798
-rwxr-xr-xtools/testing/selftests/net/test_bridge_neigh_suppress.sh972
-rw-r--r--tools/testing/selftests/net/test_ingress_egress_chaining.sh79
-rwxr-xr-xtools/testing/selftests/net/test_neigh.sh366
-rwxr-xr-xtools/testing/selftests/net/test_so_rcv.sh73
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_fdb_changelink.sh111
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh2562
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_nh.sh223
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_nolocalbypass.sh238
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_under_vrf.sh72
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh606
-rw-r--r--tools/testing/selftests/net/tfo.c171
-rwxr-xr-xtools/testing/selftests/net/tfo_passive.sh112
-rw-r--r--tools/testing/selftests/net/timestamping.c55
-rw-r--r--tools/testing/selftests/net/tls.c2278
-rwxr-xr-xtools/testing/selftests/net/traceroute.sh645
-rw-r--r--tools/testing/selftests/net/tun.c162
-rw-r--r--tools/testing/selftests/net/txtimestamp.c67
-rwxr-xr-xtools/testing/selftests/net/txtimestamp.sh24
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh94
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh13
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh104
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh265
-rw-r--r--tools/testing/selftests/net/udpgso.c214
-rwxr-xr-xtools/testing/selftests/net/udpgso.sh92
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh29
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c23
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c46
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh218
-rwxr-xr-xtools/testing/selftests/net/veth.sh390
-rwxr-xr-xtools/testing/selftests/net/vlan_bridge_binding.sh258
-rwxr-xr-xtools/testing/selftests/net/vlan_hw_filter.sh103
-rwxr-xr-xtools/testing/selftests/net/vrf-xfrm-tests.sh109
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh299
-rwxr-xr-xtools/testing/selftests/net/vrf_strict_mode_test.sh98
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh181
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy_add_speed.sh83
-rw-r--r--tools/testing/selftests/net/ynl.mk40
-rw-r--r--tools/testing/selftests/netfilter/Makefile12
-rwxr-xr-xtools/testing/selftests/netfilter/bridge_brouter.sh146
-rw-r--r--tools/testing/selftests/netfilter/config8
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh283
-rwxr-xr-xtools/testing/selftests/netfilter/ipvs.sh228
-rwxr-xr-xtools/testing/selftests/netfilter/nft_conntrack_helper.sh175
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh421
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh869
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh376
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh78
-rw-r--r--tools/testing/selftests/nolibc/.gitignore7
-rw-r--r--tools/testing/selftests/nolibc/Makefile26
-rw-r--r--tools/testing/selftests/nolibc/Makefile.include10
-rw-r--r--tools/testing/selftests/nolibc/Makefile.nolibc382
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.c24
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.h9
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c2027
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh212
-rw-r--r--tools/testing/selftests/openat2/Makefile10
-rw-r--r--tools/testing/selftests/openat2/helpers.h14
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c28
-rw-r--r--tools/testing/selftests/openat2/rename_attack_test.c2
-rw-r--r--tools/testing/selftests/openat2/resolve_test.c2
-rw-r--r--tools/testing/selftests/pci_endpoint/.gitignore2
-rw-r--r--tools/testing/selftests/pci_endpoint/Makefile7
-rw-r--r--tools/testing/selftests/pci_endpoint/config4
-rw-r--r--tools/testing/selftests/pci_endpoint/pci_endpoint_test.c264
-rw-r--r--tools/testing/selftests/pcie_bwctrl/Makefile3
-rwxr-xr-xtools/testing/selftests/pcie_bwctrl/set_pcie_cooling_state.sh122
-rwxr-xr-xtools/testing/selftests/pcie_bwctrl/set_pcie_speed.sh67
-rw-r--r--tools/testing/selftests/perf_events/.gitignore5
-rw-r--r--tools/testing/selftests/perf_events/Makefile6
-rw-r--r--tools/testing/selftests/perf_events/config1
-rw-r--r--tools/testing/selftests/perf_events/mmap.c236
-rw-r--r--tools/testing/selftests/perf_events/remove_on_exec.c260
-rw-r--r--tools/testing/selftests/perf_events/settings1
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c240
-rw-r--r--tools/testing/selftests/perf_events/watermark_signal.c144
-rw-r--r--tools/testing/selftests/pid_namespace/.gitignore1
-rw-r--r--tools/testing/selftests/pid_namespace/Makefile8
-rw-r--r--tools/testing/selftests/pid_namespace/pid_max.c359
-rw-r--r--tools/testing/selftests/pid_namespace/regression_enomem.c2
-rw-r--r--tools/testing/selftests/pidfd/.gitignore6
-rw-r--r--tools/testing/selftests/pidfd/Makefile8
-rw-r--r--tools/testing/selftests/pidfd/config1
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h217
-rw-r--r--tools/testing/selftests/pidfd/pidfd_bind_mount.c116
-rw-r--r--tools/testing/selftests/pidfd/pidfd_exec_helper.c12
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c30
-rw-r--r--tools/testing/selftests/pidfd/pidfd_file_handle_test.c563
-rw-r--r--tools/testing/selftests/pidfd/pidfd_getfd_test.c33
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c766
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c62
-rw-r--r--tools/testing/selftests/pidfd/pidfd_poll_test.c4
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setattr_test.c69
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c264
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c107
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c67
-rw-r--r--tools/testing/selftests/pidfd/pidfd_xattr_test.c132
-rw-r--r--tools/testing/selftests/power_supply/Makefile4
-rw-r--r--tools/testing/selftests/power_supply/helpers.sh178
-rwxr-xr-xtools/testing/selftests/power_supply/test_power_supply_properties.sh114
-rw-r--r--tools/testing/selftests/powerpc/Makefile31
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c11
-rw-r--r--tools/testing/selftests/powerpc/alignment/settings1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/exec_target.c16
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/gettimeofday.c6
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c3
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/settings1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile30
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h14
-rw-r--r--tools/testing/selftests/powerpc/copyloops/linux/export.h (renamed from tools/testing/selftests/powerpc/copyloops/asm/export.h)0
l---------tools/testing/selftests/powerpc/copyloops/mem_64.S1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S8
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memmove_validate.c58
-rw-r--r--tools/testing/selftests/powerpc/copyloops/settings1
-rw-r--r--tools/testing/selftests/powerpc/dexcr/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/dexcr/Makefile12
-rw-r--r--tools/testing/selftests/powerpc/dexcr/chdexcr.c112
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr.c172
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr.h106
-rw-r--r--tools/testing/selftests/powerpc/dexcr/dexcr_test.c215
-rw-r--r--tools/testing/selftests/powerpc/dexcr/hashchk_test.c233
-rw-r--r--tools/testing/selftests/powerpc/dexcr/lsdexcr.c172
-rw-r--r--tools/testing/selftests/powerpc/dexcr/settings1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h55
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c207
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c169
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c4
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c39
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_user_test.c4
-rw-r--r--tools/testing/selftests/powerpc/dscr/settings2
-rw-r--r--tools/testing/selftests/powerpc/eeh/Makefile1
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-basic.sh41
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/powerpc/eeh/eeh-functions.sh168
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh45
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh35
-rw-r--r--tools/testing/selftests/powerpc/flags.mk9
-rw-r--r--tools/testing/selftests/powerpc/harness.c4
-rw-r--r--tools/testing/selftests/powerpc/include/basic_asm.h63
-rw-r--r--tools/testing/selftests/powerpc/include/instructions.h2
-rw-r--r--tools/testing/selftests/powerpc/include/pkeys.h15
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h81
-rw-r--r--tools/testing/selftests/powerpc/include/subunit.h16
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h73
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S107
-rw-r--r--tools/testing/selftests/powerpc/lib/settings1
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/math/fpu.h25
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_asm.S48
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_preempt.c30
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_signal.c16
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_syscall.c8
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S36
-rw-r--r--tools/testing/selftests/powerpc/math/mma.c48
-rw-r--r--tools/testing/selftests/powerpc/math/settings1
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c10
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_signal.c1
-rw-r--r--tools/testing/selftests/powerpc/mce/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mce/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/mce/inject-ra-err.c65
-rw-r--r--tools/testing/selftests/powerpc/mce/settings1
l---------tools/testing/selftests/powerpc/mce/vas-api.h1
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore16
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/mm/exec_prot.c231
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c158
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c2
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c2
-rw-r--r--tools/testing/selftests/powerpc/mm/settings1
-rw-r--r--tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c2
-rwxr-xr-xtools/testing/selftests/powerpc/mm/stress_code_patching.sh49
-rw-r--r--tools/testing/selftests/powerpc/mm/subpage_prot.c4
-rw-r--r--tools/testing/selftests/powerpc/mm/tlbie_test.c11
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c71
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/settings1
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/.gitignore (renamed from tools/testing/selftests/netfilter/.gitignore)2
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/attr_test.c113
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/settings1
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/papr_sysparm.c196
-rw-r--r--tools/testing/selftests/powerpc/papr_sysparm/settings1
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c352
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/settings1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile46
-rw-r--r--tools/testing/selftests/powerpc/pmu/branch_loops.S28
-rw-r--r--tools/testing/selftests/powerpc/pmu/count_stcx_fail.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile25
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S43
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c6
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.c19
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.h6
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore20
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile16
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c132
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c110
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c116
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c131
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c60
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c54
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c70
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c96
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c88
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c78
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c44
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c53
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h1
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore21
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile17
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c117
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/check_extended_reg_test.c35
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c57
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c553
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h236
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c85
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c65
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c80
-rw-r--r--tools/testing/selftests/powerpc/pmu/settings1
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile5
l---------tools/testing/selftests/powerpc/primitives/asm/extable.h1
-rw-r--r--tools/testing/selftests/powerpc/primitives/linux/bitops.h (renamed from tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h)0
l---------tools/testing/selftests/powerpc/primitives/linux/wordpart.h1
-rw-r--r--tools/testing/selftests/powerpc/primitives/settings1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile42
-rw-r--r--tools/testing/selftests/powerpc/ptrace/child.h4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c69
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c671
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S52
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c125
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h14
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c111
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-perf-asm.S33
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c445
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c34
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c23
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c25
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h84
-rw-r--r--tools/testing/selftests/powerpc/ptrace/settings1
-rwxr-xr-xtools/testing/selftests/powerpc/scripts/hmi.sh2
-rw-r--r--tools/testing/selftests/powerpc/scripts/settings1
-rw-r--r--tools/testing/selftests/powerpc/security/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile10
-rw-r--r--tools/testing/selftests/powerpc/security/entry_flush.c14
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.c16
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.h7
-rwxr-xr-xtools/testing/selftests/powerpc/security/mitigation-patching.sh78
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c14
-rw-r--r--tools/testing/selftests/powerpc/security/settings1
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c34
-rw-r--r--tools/testing/selftests/powerpc/security/uaccess_flush.c158
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile6
-rw-r--r--tools/testing/selftests/powerpc/signal/sigfuz.c2
-rw-r--r--tools/testing/selftests/powerpc/signal/signal_tm.c1
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_kernel.c132
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/linux/export.h (renamed from tools/testing/selftests/powerpc/stringloops/asm/export.h)0
-rw-r--r--tools/testing/selftests/powerpc/stringloops/settings1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/settings1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/syscalls/rtas_filter.c81
-rw-r--r--tools/testing/selftests/powerpc/syscalls/settings1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-exec.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-fork.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-poison.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c4
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-stack.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-sigreturn.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S37
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c38
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tmspr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c5
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h36
-rw-r--r--tools/testing/selftests/powerpc/utils.c459
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile5
l---------tools/testing/selftests/powerpc/vphn/asm/lppaca.h1
l---------tools/testing/selftests/powerpc/vphn/asm/vphn.h1
-rw-r--r--tools/testing/selftests/powerpc/vphn/settings1
-rw-r--r--tools/testing/selftests/powerpc/vphn/test-vphn.c2
-rw-r--r--tools/testing/selftests/prctl/.gitignore2
-rw-r--r--tools/testing/selftests/prctl/Makefile4
-rw-r--r--tools/testing/selftests/prctl/config1
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c2
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c2
-rw-r--r--tools/testing/selftests/prctl/set-anon-vma-name-test.c104
-rw-r--r--tools/testing/selftests/prctl/set-process-name.c94
-rw-r--r--tools/testing/selftests/proc/.gitignore8
-rw-r--r--tools/testing/selftests/proc/Makefile11
-rw-r--r--tools/testing/selftests/proc/proc-2-is-kthread.c53
-rw-r--r--tools/testing/selftests/proc/proc-empty-vm.c541
-rw-r--r--tools/testing/selftests/proc/proc-maps-race.c806
-rw-r--r--tools/testing/selftests/proc/proc-net-dev-lseek.c68
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c154
-rw-r--r--tools/testing/selftests/proc/proc-pidns.c211
-rw-r--r--tools/testing/selftests/proc/proc-self-isnt-kthread.c37
-rw-r--r--tools/testing/selftests/proc/proc-subset-pid.c121
-rw-r--r--tools/testing/selftests/proc/proc-tid0.c81
-rw-r--r--tools/testing/selftests/proc/proc-uptime-001.c25
-rw-r--r--tools/testing/selftests/proc/proc-uptime-002.c30
-rw-r--r--tools/testing/selftests/proc/proc-uptime.h28
-rw-r--r--tools/testing/selftests/proc/read.c4
-rw-r--r--tools/testing/selftests/ptp/Makefile9
-rw-r--r--tools/testing/selftests/ptp/ptpchmaskfmt.sh14
-rw-r--r--tools/testing/selftests/ptp/testptp.c255
-rw-r--r--tools/testing/selftests/ptrace/.gitignore2
-rw-r--r--tools/testing/selftests/ptrace/Makefile4
-rw-r--r--tools/testing/selftests/ptrace/get_set_sud.c72
-rw-r--r--tools/testing/selftests/ptrace/get_syscall_info.c2
-rw-r--r--tools/testing/selftests/ptrace/peeksiginfo.c16
-rw-r--r--tools/testing/selftests/ptrace/set_syscall_info.c519
-rw-r--r--tools/testing/selftests/ptrace/vmaccess.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2csv.sh66
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh3
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh65
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh3
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/console-badness.sh3
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh1
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/rcutorture/bin/functions.sh77
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh49
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstart.sh37
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstop.sh23
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kcsan-collapse.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh245
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh105
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh13
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-check-branches.sh15
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh39
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh21
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh88
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-scf.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh55
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh30
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh297
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-series.sh116
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh93
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh184
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh237
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-transform.sh102
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh303
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh22
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mktestid.sh29
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh29
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/srcu_lockdep.sh118
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh831
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/CFLIST2
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK086
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK08.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK096
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK09.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot8
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.i6862
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.ppc64le1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFcommon.x86_642
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/RUDE014
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-N2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t)1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u)1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS013
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS024
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS035
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TINY011
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TINY021
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE015
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE023
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE013
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE021
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE048
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE051
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE061
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE075
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE081
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE097
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE107
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh21
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon9
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TINY2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TRACE013
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE5
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE543
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/CFcommon4
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT6
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/PREEMPT3
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/TINY20
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT4
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/PREEMPT1
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh4
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt5
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h152
-rwxr-xr-xtools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk376
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h41
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h14
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c14
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h28
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c32
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h34
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h221
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c12
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h58
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h93
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c79
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h59
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c51
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h103
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile12
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c73
-rwxr-xr-xtools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh103
-rw-r--r--tools/testing/selftests/resctrl/.gitignore2
-rw-r--r--tools/testing/selftests/resctrl/Makefile20
-rw-r--r--tools/testing/selftests/resctrl/README43
-rw-r--r--tools/testing/selftests/resctrl/cache.c265
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c466
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c190
-rw-r--r--tools/testing/selftests/resctrl/config2
-rw-r--r--tools/testing/selftests/resctrl/cqm_test.c176
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c189
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c176
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c155
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h255
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c388
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c765
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c783
-rw-r--r--tools/testing/selftests/resctrl/settings3
-rw-r--r--tools/testing/selftests/ring-buffer/.gitignore1
-rw-r--r--tools/testing/selftests/ring-buffer/Makefile7
-rw-r--r--tools/testing/selftests/ring-buffer/config2
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c324
-rw-r--r--tools/testing/selftests/riscv/Makefile58
-rw-r--r--tools/testing/selftests/riscv/README24
-rw-r--r--tools/testing/selftests/riscv/abi/.gitignore1
-rw-r--r--tools/testing/selftests/riscv/abi/Makefile10
-rw-r--r--tools/testing/selftests/riscv/abi/pointer_masking.c348
-rw-r--r--tools/testing/selftests/riscv/hwprobe/.gitignore3
-rw-r--r--tools/testing/selftests/riscv/hwprobe/Makefile18
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c377
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.c66
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.h25
-rw-r--r--tools/testing/selftests/riscv/hwprobe/sys_hwprobe.S12
-rw-r--r--tools/testing/selftests/riscv/hwprobe/which-cpus.c154
-rw-r--r--tools/testing/selftests/riscv/mm/.gitignore2
-rw-r--r--tools/testing/selftests/riscv/mm/Makefile15
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_bottomup.c12
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_default.c12
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h23
-rwxr-xr-xtools/testing/selftests/riscv/mm/run_mmap.sh12
-rw-r--r--tools/testing/selftests/riscv/sigreturn/.gitignore1
-rw-r--r--tools/testing/selftests/riscv/sigreturn/Makefile12
-rw-r--r--tools/testing/selftests/riscv/sigreturn/sigreturn.c82
-rw-r--r--tools/testing/selftests/riscv/vector/.gitignore4
-rw-r--r--tools/testing/selftests/riscv/vector/Makefile31
-rw-r--r--tools/testing/selftests/riscv/vector/v_exec_initval_nolibc.c90
-rw-r--r--tools/testing/selftests/riscv/vector/v_helpers.c68
-rw-r--r--tools/testing/selftests/riscv/vector/v_helpers.h8
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval.c22
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c118
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c246
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_ptrace.c134
-rw-r--r--tools/testing/selftests/rlimits/.gitignore2
-rw-r--r--tools/testing/selftests/rlimits/Makefile6
-rw-r--r--tools/testing/selftests/rlimits/config1
-rw-r--r--tools/testing/selftests/rlimits/rlimits-per-userns.c161
-rw-r--r--tools/testing/selftests/rseq/.gitignore5
-rw-r--r--tools/testing/selftests/rseq/Makefile35
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c63
-rw-r--r--tools/testing/selftests/rseq/basic_test.c4
-rw-r--r--tools/testing/selftests/rseq/compiler.h62
-rw-r--r--tools/testing/selftests/rseq/param_test.c254
-rw-r--r--tools/testing/selftests/rseq/rseq-abi.h173
-rw-r--r--tools/testing/selftests/rseq/rseq-arm-bits.h505
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h703
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64-bits.h392
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h545
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-reset.h11
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-template.h41
-rw-r--r--tools/testing/selftests/rseq/rseq-generic-thread-pointer.h25
-rw-r--r--tools/testing/selftests/rseq/rseq-mips-bits.h462
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h687
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k-bits.h412
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k-thread-pointer.h13
-rw-r--r--tools/testing/selftests/rseq/rseq-or1k.h181
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc-bits.h454
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h30
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h637
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv-bits.h410
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h197
-rw-r--r--tools/testing/selftests/rseq/rseq-s390-bits.h474
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h509
-rw-r--r--tools/testing/selftests/rseq/rseq-skip.h65
-rw-r--r--tools/testing/selftests/rseq/rseq-thread-pointer.h21
-rw-r--r--tools/testing/selftests/rseq/rseq-x86-bits.h993
-rw-r--r--tools/testing/selftests/rseq/rseq-x86-thread-pointer.h40
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h1133
-rw-r--r--tools/testing/selftests/rseq/rseq.c302
-rw-r--r--tools/testing/selftests/rseq/rseq.h247
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh5
-rwxr-xr-xtools/testing/selftests/rseq/run_syscall_errors_test.sh5
-rw-r--r--tools/testing/selftests/rseq/syscall_errors_test.c124
-rw-r--r--tools/testing/selftests/rtc/.gitignore1
-rw-r--r--tools/testing/selftests/rtc/Makefile4
-rw-r--r--tools/testing/selftests/rtc/rtctest.c192
-rw-r--r--tools/testing/selftests/rtc/setdate.c77
-rw-r--r--tools/testing/selftests/rtc/settings2
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh40
-rw-r--r--tools/testing/selftests/rust/Makefile4
-rw-r--r--tools/testing/selftests/rust/config6
-rwxr-xr-xtools/testing/selftests/rust/test_probe_samples.sh41
-rw-r--r--tools/testing/selftests/safesetid/Makefile2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c295
-rw-r--r--tools/testing/selftests/sched/.gitignore1
-rw-r--r--tools/testing/selftests/sched/Makefile14
-rw-r--r--tools/testing/selftests/sched/config1
-rw-r--r--tools/testing/selftests/sched/cs_prctl_test.c359
-rw-r--r--tools/testing/selftests/sched_ext/.gitignore6
-rw-r--r--tools/testing/selftests/sched_ext/Makefile214
-rw-r--r--tools/testing/selftests/sched_ext/allowed_cpus.bpf.c144
-rw-r--r--tools/testing/selftests/sched_ext/allowed_cpus.c84
-rw-r--r--tools/testing/selftests/sched_ext/config8
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.bpf.c58
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.c57
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c42
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c60
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c39
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c59
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c68
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.c60
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c29
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c64
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c74
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu.c88
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c86
-rw-r--r--tools/testing/selftests/sched_ext/exit.c64
-rw-r--r--tools/testing/selftests/sched_ext/exit_test.h20
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.bpf.c61
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.c169
-rw-r--r--tools/testing/selftests/sched_ext/hotplug_test.h15
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.bpf.c53
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.c157
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c171
-rw-r--r--tools/testing/selftests/sched_ext/maximal.c54
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.bpf.c36
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.c49
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c25
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c28
-rw-r--r--tools/testing/selftests/sched_ext/minimal.bpf.c21
-rw-r--r--tools/testing/selftests/sched_ext/minimal.c58
-rw-r--r--tools/testing/selftests/sched_ext/numa.bpf.c100
-rw-r--r--tools/testing/selftests/sched_ext/numa.c59
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.bpf.c251
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.c224
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.bpf.c33
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.c78
-rw-r--r--tools/testing/selftests/sched_ext/reload_loop.c74
-rw-r--r--tools/testing/selftests/sched_ext/runner.c212
-rw-r--r--tools/testing/selftests/sched_ext/scx_test.h131
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c40
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.c75
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c89
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c75
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c41
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.c73
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c37
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c59
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c38
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c59
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c92
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.c62
-rw-r--r--tools/testing/selftests/sched_ext/test_example.c49
-rw-r--r--tools/testing/selftests/sched_ext/util.c71
-rw-r--r--tools/testing/selftests/sched_ext/util.h13
-rw-r--r--tools/testing/selftests/seccomp/Makefile3
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c154
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c1257
-rw-r--r--tools/testing/selftests/seccomp/settings2
-rw-r--r--tools/testing/selftests/sgx/Makefile17
-rw-r--r--tools/testing/selftests/sgx/call.S6
-rw-r--r--tools/testing/selftests/sgx/defines.h62
-rw-r--r--tools/testing/selftests/sgx/load.c145
-rw-r--r--tools/testing/selftests/sgx/main.c1941
-rw-r--r--tools/testing/selftests/sgx/main.h11
-rw-r--r--tools/testing/selftests/sgx/sigstruct.c64
-rw-r--r--tools/testing/selftests/sgx/test_encl.c146
-rw-r--r--tools/testing/selftests/sgx/test_encl.lds13
-rw-r--r--tools/testing/selftests/sgx/test_encl_bootstrap.S53
-rw-r--r--tools/testing/selftests/signal/.gitignore3
-rw-r--r--tools/testing/selftests/signal/Makefile (renamed from tools/testing/selftests/sigaltstack/Makefile)3
-rw-r--r--tools/testing/selftests/signal/current_stack_pointer.h23
-rw-r--r--tools/testing/selftests/signal/mangle_uc_sigmask.c184
-rw-r--r--tools/testing/selftests/signal/sas.c (renamed from tools/testing/selftests/sigaltstack/sas.c)29
-rw-r--r--tools/testing/selftests/sparc64/drivers/adi-test.c6
-rwxr-xr-xtools/testing/selftests/splice/short_splice_read.sh119
-rwxr-xr-xtools/testing/selftests/static_keys/test_static_keys.sh8
-rw-r--r--tools/testing/selftests/sync/Makefile2
-rw-r--r--tools/testing/selftests/sync/config2
-rw-r--r--tools/testing/selftests/sync/sync_test.c5
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_benchmark.c8
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c170
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh308
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/Makefile31
-rw-r--r--tools/testing/selftests/tc-testing/README67
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/TdcResults.py3
-rw-r--r--tools/testing/selftests/tc-testing/action-ebpfbin0 -> 856 bytes
-rw-r--r--tools/testing/selftests/tc-testing/config54
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt2
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py67
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py242
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/rootPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py42
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py5
-rwxr-xr-xtools/testing/selftests/tc-testing/scripts/sfq_rejects_limit_1.py21
-rwxr-xr-xtools/testing/selftests/tc-testing/scripts/taprio_wait_for_admin.sh16
-rw-r--r--tools/testing/selftests/tc-testing/settings1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json14
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json95
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json69
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ct.json99
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json352
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json152
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gate.json351
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json194
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json475
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mpls.json159
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json145
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json198
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json184
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json137
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json110
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json90
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json78
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json192
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json136
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json53
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json175
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/cgroup.json1236
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/flow.json623
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/flower.json (renamed from tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json)98
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/fw.json315
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json222
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/route.json206
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json129
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/u32.json152
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/actions.json438
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/filter.json26
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json1037
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json445
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json214
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json172
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json217
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json90
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json254
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json107
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/ets.json284
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fifo.json121
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json403
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json320
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json35
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json150
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json173
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json214
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json261
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json50
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json182
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mqprio.json114
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/multiq.json114
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json421
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json109
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/pie.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json172
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json99
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json280
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json51
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json255
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json304
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json87
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json306
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json193
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json85
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py397
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh69
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py3
-rw-r--r--tools/testing/selftests/tdx/.gitignore1
-rw-r--r--tools/testing/selftests/tdx/Makefile7
-rw-r--r--tools/testing/selftests/tdx/config1
-rw-r--r--tools/testing/selftests/tdx/tdx_guest_test.c163
-rw-r--r--tools/testing/selftests/thermal/intel/power_floor/.gitignore1
-rw-r--r--tools/testing/selftests/thermal/intel/power_floor/Makefile12
-rw-r--r--tools/testing/selftests/thermal/intel/power_floor/power_floor_test.c108
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/.gitignore1
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/Makefile12
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/workload_hint_test.c157
-rw-r--r--tools/testing/selftests/timens/.gitignore2
-rw-r--r--tools/testing/selftests/timens/Makefile2
-rw-r--r--tools/testing/selftests/timens/clock_nanosleep.c4
-rw-r--r--tools/testing/selftests/timens/exec.c8
-rw-r--r--tools/testing/selftests/timens/futex.c2
-rw-r--r--tools/testing/selftests/timens/gettime_perf.c10
-rw-r--r--tools/testing/selftests/timens/procfs.c4
-rw-r--r--tools/testing/selftests/timens/timens.c4
-rw-r--r--tools/testing/selftests/timens/timens.h2
-rw-r--r--tools/testing/selftests/timens/timer.c6
-rw-r--r--tools/testing/selftests/timens/timerfd.c8
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c141
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--tools/testing/selftests/timers/adjtick.c14
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c34
-rw-r--r--tools/testing/selftests/timers/change_skew.c11
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c79
-rw-r--r--tools/testing/selftests/timers/freq-step.c6
-rw-r--r--tools/testing/selftests/timers/inconsistency-check.c57
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c16
-rw-r--r--tools/testing/selftests/timers/leapcrash.c10
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c8
-rw-r--r--tools/testing/selftests/timers/nanosleep.c96
-rw-r--r--tools/testing/selftests/timers/nsleep-lat.c48
-rw-r--r--tools/testing/selftests/timers/posix_timers.c673
-rw-r--r--tools/testing/selftests/timers/raw_skew.c17
-rw-r--r--tools/testing/selftests/timers/rtcpie.c13
-rw-r--r--tools/testing/selftests/timers/set-2038.c9
-rw-r--r--tools/testing/selftests/timers/set-tai.c6
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c29
-rw-r--r--tools/testing/selftests/timers/set-tz.c6
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c12
-rw-r--r--tools/testing/selftests/timers/threadtest.c10
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c87
-rw-r--r--tools/testing/selftests/tmpfs/Makefile1
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c41
-rw-r--r--tools/testing/selftests/tpm2/.gitignore3
-rw-r--r--tools/testing/selftests/tpm2/Makefile2
-rw-r--r--tools/testing/selftests/tpm2/settings1
-rwxr-xr-xtools/testing/selftests/tpm2/test_async.sh10
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh5
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh2
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py41
-rw-r--r--tools/testing/selftests/tpm2/tpm2_tests.py45
-rw-r--r--tools/testing/selftests/tty/.gitignore3
-rw-r--r--tools/testing/selftests/tty/Makefile9
-rw-r--r--tools/testing/selftests/tty/config1
-rw-r--r--tools/testing/selftests/tty/tty_tiocsti_test.c650
-rw-r--r--tools/testing/selftests/tty/tty_tstamp_update.c106
-rwxr-xr-xtools/testing/selftests/turbostat/added_perf_counters.py178
-rwxr-xr-xtools/testing/selftests/turbostat/defcolumns.py60
-rwxr-xr-xtools/testing/selftests/turbostat/smi_aperf_mperf.py157
-rw-r--r--tools/testing/selftests/ublk/.gitignore3
-rw-r--r--tools/testing/selftests/ublk/Makefile51
-rw-r--r--tools/testing/selftests/ublk/common.c55
-rw-r--r--tools/testing/selftests/ublk/config1
-rw-r--r--tools/testing/selftests/ublk/fault_inject.c106
-rw-r--r--tools/testing/selftests/ublk/file_backed.c182
-rw-r--r--tools/testing/selftests/ublk/kublk.c1729
-rw-r--r--tools/testing/selftests/ublk/kublk.h421
-rw-r--r--tools/testing/selftests/ublk/null.c152
-rw-r--r--tools/testing/selftests/ublk/stripe.c391
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh384
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_01.sh48
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_02.sh48
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_03.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_04.sh40
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_05.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_06.sh41
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_07.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_08.sh32
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_09.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_10.sh30
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_11.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_12.sh59
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_13.sh20
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh26
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_02.sh20
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh25
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_04.sh21
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh26
-rwxr-xr-xtools/testing/selftests/ublk/test_null_01.sh24
-rwxr-xr-xtools/testing/selftests/ublk/test_null_02.sh24
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh34
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh36
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_03.sh54
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh51
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh84
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh26
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_02.sh21
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh26
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh21
-rw-r--r--tools/testing/selftests/ublk/trace/count_ios_per_tid.bt11
-rw-r--r--tools/testing/selftests/ublk/trace/seq_io.bt25
-rw-r--r--tools/testing/selftests/ublk/ublk_dep.h18
-rw-r--r--tools/testing/selftests/ublk/utils.h68
-rw-r--r--tools/testing/selftests/uevent/.gitignore1
-rw-r--r--tools/testing/selftests/uevent/uevent_filtering.c10
-rw-r--r--tools/testing/selftests/user/config1
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh18
-rw-r--r--tools/testing/selftests/user_events/.gitignore4
-rw-r--r--tools/testing/selftests/user_events/Makefile9
-rw-r--r--tools/testing/selftests/user_events/abi_test.c423
-rw-r--r--tools/testing/selftests/user_events/config1
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c296
-rw-r--r--tools/testing/selftests/user_events/ftrace_test.c597
-rw-r--r--tools/testing/selftests/user_events/perf_test.c254
-rw-r--r--tools/testing/selftests/user_events/settings1
-rw-r--r--tools/testing/selftests/user_events/user_events_selftests.h114
-rw-r--r--tools/testing/selftests/vDSO/.gitignore4
-rw-r--r--tools/testing/selftests/vDSO/Makefile57
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c159
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.h1
-rw-r--r--tools/testing/selftests/vDSO/vdso_call.h69
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h32
l---------[-rw-r--r--]tools/testing/selftests/vDSO/vdso_standalone_test_x86.c127
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c254
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_chacha.c133
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_clock_getres.c124
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_correctness.c34
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c17
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getrandom.c322
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c32
-rw-r--r--tools/testing/selftests/vDSO/vgetrandom-chacha.S20
-rw-r--r--tools/testing/selftests/verification/.gitignore (renamed from tools/testing/selftests/sigaltstack/.gitignore)2
-rw-r--r--tools/testing/selftests/verification/Makefile8
-rw-r--r--tools/testing/selftests/verification/config1
-rw-r--r--tools/testing/selftests/verification/settings1
-rw-r--r--tools/testing/selftests/verification/test.d/functions39
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitor_enable_disable.tc75
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitor_reactor.tc68
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitors_available.tc18
-rw-r--r--tools/testing/selftests/verification/test.d/rv_wwnr_printk.tc30
-rwxr-xr-xtools/testing/selftests/verification/verificationtest-ktap8
-rw-r--r--tools/testing/selftests/vfio/.gitignore10
-rw-r--r--tools/testing/selftests/vfio/Makefile29
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c416
l---------tools/testing/selftests/vfio/lib/drivers/dsa/registers.h1
l---------tools/testing/selftests/vfio/lib/drivers/ioat/hw.h1
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c235
l---------tools/testing/selftests/vfio/lib/drivers/ioat/registers.h1
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio.h26
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/assert.h54
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iommu.h76
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h23
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h125
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h97
-rw-r--r--tools/testing/selftests/vfio/lib/iommu.c465
-rw-r--r--tools/testing/selftests/vfio/lib/iova_allocator.c94
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.c78
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.mk29
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_device.c378
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_driver.c112
-rwxr-xr-xtools/testing/selftests/vfio/scripts/cleanup.sh41
-rwxr-xr-xtools/testing/selftests/vfio/scripts/lib.sh42
-rwxr-xr-xtools/testing/selftests/vfio/scripts/run.sh16
-rwxr-xr-xtools/testing/selftests/vfio/scripts/setup.sh48
-rw-r--r--tools/testing/selftests/vfio/vfio_dma_mapping_test.c312
-rw-r--r--tools/testing/selftests/vfio/vfio_iommufd_setup_test.c127
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c168
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_test.c182
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_driver_test.c263
-rw-r--r--tools/testing/selftests/vm/.gitignore24
-rw-r--r--tools/testing/selftests/vm/Makefile157
-rw-r--r--tools/testing/selftests/vm/compaction_test.c231
-rw-r--r--tools/testing/selftests/vm/config6
-rw-r--r--tools/testing/selftests/vm/gup_test.c194
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c1488
-rw-r--r--tools/testing/selftests/vm/khugepaged.c1035
-rw-r--r--tools/testing/selftests/vm/map_fixed_noreplace.c206
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c109
-rw-r--r--tools/testing/selftests/vm/mremap_test.c344
-rw-r--r--tools/testing/selftests/vm/on-fault-limit.c48
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests349
-rw-r--r--tools/testing/selftests/vm/transhuge-stress.c144
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c1548
-rw-r--r--tools/testing/selftests/vm/va_128TBswitch.c289
-rw-r--r--tools/testing/selftests/vm/virtual_address_range.c139
-rw-r--r--tools/testing/selftests/vsock/.gitignore2
-rw-r--r--tools/testing/selftests/vsock/Makefile17
-rw-r--r--tools/testing/selftests/vsock/config111
-rw-r--r--tools/testing/selftests/vsock/settings1
-rwxr-xr-xtools/testing/selftests/vsock/vmtest.sh607
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c118
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh118
-rw-r--r--tools/testing/selftests/wireguard/qemu/.gitignore1
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile255
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/arm.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/armeb.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/i686.config7
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/m68k.config10
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64el.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mipsel.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc64.config13
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv32.config14
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv64.config13
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/s390x.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/um.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/x86_64.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config11
-rw-r--r--tools/testing/selftests/wireguard/qemu/init.c46
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config15
-rw-r--r--tools/testing/selftests/x86/Makefile61
-rw-r--r--tools/testing/selftests/x86/amx.c518
-rw-r--r--tools/testing/selftests/x86/apx.c10
-rw-r--r--tools/testing/selftests/x86/avx.c12
-rw-r--r--tools/testing/selftests/x86/bugs/Makefile3
-rwxr-xr-xtools/testing/selftests/x86/bugs/common.py164
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_indirect_alignment.py150
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_permutations.py109
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_ret_alignment.py139
-rwxr-xr-xtools/testing/selftests/x86/bugs/its_sysfs.py65
-rwxr-xr-xtools/testing/selftests/x86/check_cc.sh2
-rw-r--r--tools/testing/selftests/x86/clang_helpers_32.S11
-rw-r--r--tools/testing/selftests/x86/clang_helpers_64.S28
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c90
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c24
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c30
-rw-r--r--tools/testing/selftests/x86/fsgsbase_restore.c11
-rw-r--r--tools/testing/selftests/x86/helpers.h52
-rw-r--r--tools/testing/selftests/x86/ioperm.c25
-rw-r--r--tools/testing/selftests/x86/iopl.c103
-rw-r--r--tools/testing/selftests/x86/lam.c1371
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c20
-rw-r--r--tools/testing/selftests/x86/mov_ss_trap.c18
-rw-r--r--tools/testing/selftests/x86/nx_stack.c212
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c24
-rw-r--r--tools/testing/selftests/x86/sigaltstack.c106
-rw-r--r--tools/testing/selftests/x86/sigreturn.c33
-rw-r--r--tools/testing/selftests/x86/sigtrap_loop.c101
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c26
-rw-r--r--tools/testing/selftests/x86/srso.c70
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c20
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c12
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c492
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c42
-rw-r--r--tools/testing/selftests/x86/test_FISTTP.c8
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c86
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c1088
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c506
-rw-r--r--tools/testing/selftests/x86/thunks_32.S2
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c12
-rw-r--r--tools/testing/selftests/x86/vdso_restorer.c2
-rw-r--r--tools/testing/selftests/x86/xstate.c478
-rw-r--r--tools/testing/selftests/x86/xstate.h197
-rw-r--r--tools/testing/selftests/zram/.gitignore (renamed from tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore)2
-rw-r--r--tools/testing/selftests/zram/README1
-rwxr-xr-xtools/testing/selftests/zram/zram.sh15
-rwxr-xr-xtools/testing/selftests/zram/zram01.sh33
-rwxr-xr-xtools/testing/selftests/zram/zram02.sh1
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh134
-rw-r--r--tools/testing/shared/autoconf.h (renamed from tools/testing/radix-tree/generated/autoconf.h)1
-rw-r--r--tools/testing/shared/interval_tree-shim.c5
-rw-r--r--tools/testing/shared/linux.c379
-rw-r--r--tools/testing/shared/linux/bug.h (renamed from tools/testing/radix-tree/linux/bug.h)0
-rw-r--r--tools/testing/shared/linux/cleanup.h2
-rw-r--r--tools/testing/shared/linux/cpu.h (renamed from tools/testing/radix-tree/linux/cpu.h)0
-rw-r--r--tools/testing/shared/linux/idr.h5
-rw-r--r--tools/testing/shared/linux/interval_tree.h7
-rw-r--r--tools/testing/shared/linux/interval_tree_generic.h2
-rw-r--r--tools/testing/shared/linux/kconfig.h (renamed from tools/testing/radix-tree/linux/kconfig.h)0
-rw-r--r--tools/testing/shared/linux/kernel.h (renamed from tools/testing/radix-tree/linux/kernel.h)3
-rw-r--r--tools/testing/shared/linux/kmemleak.h (renamed from tools/testing/radix-tree/linux/kmemleak.h)0
-rw-r--r--tools/testing/shared/linux/local_lock.h (renamed from tools/testing/radix-tree/linux/local_lock.h)0
-rw-r--r--tools/testing/shared/linux/lockdep.h (renamed from tools/testing/radix-tree/linux/lockdep.h)5
-rw-r--r--tools/testing/shared/linux/maple_tree.h5
-rw-r--r--tools/testing/shared/linux/percpu.h (renamed from tools/testing/radix-tree/linux/percpu.h)0
-rw-r--r--tools/testing/shared/linux/preempt.h (renamed from tools/testing/radix-tree/linux/preempt.h)0
-rw-r--r--tools/testing/shared/linux/radix-tree.h (renamed from tools/testing/radix-tree/linux/radix-tree.h)0
-rw-r--r--tools/testing/shared/linux/rbtree.h8
-rw-r--r--tools/testing/shared/linux/rbtree_augmented.h7
-rw-r--r--tools/testing/shared/linux/rbtree_types.h8
-rw-r--r--tools/testing/shared/linux/rcupdate.h (renamed from tools/testing/radix-tree/linux/rcupdate.h)0
-rw-r--r--tools/testing/shared/linux/xarray.h (renamed from tools/testing/radix-tree/linux/xarray.h)0
-rw-r--r--tools/testing/shared/maple-shared.h24
-rw-r--r--tools/testing/shared/maple-shim.c14
-rw-r--r--tools/testing/shared/rbtree-shim.c6
-rw-r--r--tools/testing/shared/shared.h37
-rw-r--r--tools/testing/shared/shared.mk79
-rw-r--r--tools/testing/shared/trace/events/maple_tree.h5
-rw-r--r--tools/testing/shared/xarray-shared.c5
-rw-r--r--tools/testing/shared/xarray-shared.h8
-rw-r--r--tools/testing/vma/.gitignore7
-rw-r--r--tools/testing/vma/Makefile18
-rw-r--r--tools/testing/vma/linux/mmzone.h38
-rw-r--r--tools/testing/vma/vma.c1785
-rw-r--r--tools/testing/vma/vma_internal.h1863
-rw-r--r--tools/testing/vsock/.gitignore2
-rw-r--r--tools/testing/vsock/Makefile26
-rw-r--r--tools/testing/vsock/README49
-rw-r--r--tools/testing/vsock/control.c37
-rw-r--r--tools/testing/vsock/control.h2
-rw-r--r--tools/testing/vsock/msg_zerocopy_common.c77
-rw-r--r--tools/testing/vsock/msg_zerocopy_common.h17
-rw-r--r--tools/testing/vsock/timeout.c18
-rw-r--r--tools/testing/vsock/timeout.h1
-rw-r--r--tools/testing/vsock/util.c732
-rw-r--r--tools/testing/vsock/util.h70
-rw-r--r--tools/testing/vsock/vsock_diag_test.c25
-rw-r--r--tools/testing/vsock/vsock_perf.c499
-rw-r--r--tools/testing/vsock/vsock_test.c2148
-rw-r--r--tools/testing/vsock/vsock_test_zerocopy.c358
-rw-r--r--tools/testing/vsock/vsock_test_zerocopy.h15
-rw-r--r--tools/testing/vsock/vsock_uring_test.c353
4474 files changed, 692032 insertions, 77390 deletions
diff --git a/tools/testing/crypto/chacha20-s390/Makefile b/tools/testing/crypto/chacha20-s390/Makefile
new file mode 100644
index 000000000000..db81cd2fb9c5
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+
+obj-m += test_cipher.o
+test_cipher-y := test-cipher.o
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) modules
+clean:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) clean
diff --git a/tools/testing/crypto/chacha20-s390/run-tests.sh b/tools/testing/crypto/chacha20-s390/run-tests.sh
new file mode 100644
index 000000000000..43108794b996
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/run-tests.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+#
+# This script runs (via instmod) test-cipher.ko module which invokes
+# generic and s390-native ChaCha20 encryprion algorithms with different
+# size of data. Check 'dmesg' for results.
+#
+# The insmod error is expected:
+# insmod: ERROR: could not insert module test_cipher.ko: Operation not permitted
+
+lsmod | grep chacha | cut -f1 -d' ' | xargs rmmod
+modprobe chacha_generic
+modprobe chacha_s390
+
+# run encryption for different data size, including whole block(s) +/- 1
+insmod test_cipher.ko size=63
+insmod test_cipher.ko size=64
+insmod test_cipher.ko size=65
+insmod test_cipher.ko size=127
+insmod test_cipher.ko size=128
+insmod test_cipher.ko size=129
+insmod test_cipher.ko size=511
+insmod test_cipher.ko size=512
+insmod test_cipher.ko size=513
+insmod test_cipher.ko size=4096
+insmod test_cipher.ko size=65611
+insmod test_cipher.ko size=6291456
+insmod test_cipher.ko size=62914560
+
+# print test logs
+dmesg | tail -170
diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c
new file mode 100644
index 000000000000..827507844e8f
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/test-cipher.c
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2022 Red Hat, Inc.
+ * Author: Vladis Dronov <vdronoff@gmail.com>
+ */
+
+#include <asm/elf.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <crypto/skcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/acompress.h>
+#include <crypto/rng.h>
+#include <crypto/drbg.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/simd.h>
+#include <crypto/chacha.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/fips.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static unsigned int data_size __read_mostly = 256;
+static unsigned int debug __read_mostly = 0;
+
+/* tie all skcipher structures together */
+struct skcipher_def {
+ struct scatterlist sginp, sgout;
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+};
+
+/* Perform cipher operations with the chacha lib */
+static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain)
+{
+ struct chacha_state chacha_state;
+ u8 iv[16], key[32];
+ u64 start, end;
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ /* Encrypt */
+ chacha_init(&chacha_state, (u32 *)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(&chacha_state, cipher, plain, data_size, 20);
+ end = ktime_get_ns();
+
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib encryption took: %lld nsec", end - start);
+
+ /* Decrypt */
+ chacha_init(&chacha_state, (u32 *)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(&chacha_state, revert, cipher, data_size, 20);
+ end = ktime_get_ns();
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib decryption took: %lld nsec", end - start);
+
+ return 0;
+}
+
+/* Perform cipher operations with skcipher */
+static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
+ int enc)
+{
+ int rc;
+
+ if (enc) {
+ rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher encrypt returned with result"
+ "%d\n", rc);
+ }
+ else
+ {
+ rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher decrypt returned with result"
+ "%d\n", rc);
+ }
+
+ return rc;
+}
+
+/* Initialize and trigger cipher operations */
+static int test_skcipher(char *name, u8 *revert, u8 *cipher, u8 *plain)
+{
+ struct skcipher_def sk;
+ struct crypto_skcipher *skcipher = NULL;
+ struct skcipher_request *req = NULL;
+ u8 iv[16], key[32];
+ u64 start, end;
+ int ret = -EFAULT;
+
+ skcipher = crypto_alloc_skcipher(name, 0, 0);
+ if (IS_ERR(skcipher)) {
+ pr_info("could not allocate skcipher %s handle\n", name);
+ return PTR_ERR(skcipher);
+ }
+
+ req = skcipher_request_alloc(skcipher, GFP_KERNEL);
+ if (!req) {
+ pr_info("could not allocate skcipher request\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done,
+ &sk.wait);
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (crypto_skcipher_setkey(skcipher, key, 32)) {
+ pr_info("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ sk.tfm = skcipher;
+ sk.req = req;
+
+ /* Encrypt in one pass */
+ sg_init_one(&sk.sginp, plain, data_size);
+ sg_init_one(&sk.sgout, cipher, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Encrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 1);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm encryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Prepare for decryption */
+ memset(iv, 'I', sizeof(iv));
+
+ sg_init_one(&sk.sginp, cipher, data_size);
+ sg_init_one(&sk.sgout, revert, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Decrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 0);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm decryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Dump some internal skcipher data */
+ if (debug)
+ pr_info("skcipher %s: cryptlen %d blksize %d stride %d "
+ "ivsize %d alignmask 0x%x\n",
+ name, sk.req->cryptlen,
+ crypto_skcipher_blocksize(sk.tfm),
+ crypto_skcipher_alg(sk.tfm)->walksize,
+ crypto_skcipher_ivsize(sk.tfm),
+ crypto_skcipher_alignmask(sk.tfm));
+
+out:
+ if (skcipher)
+ crypto_free_skcipher(skcipher);
+ if (req)
+ skcipher_request_free(req);
+ return ret;
+}
+
+static int __init chacha_s390_test_init(void)
+{
+ u8 *plain = NULL, *revert = NULL;
+ u8 *cipher_generic = NULL, *cipher_s390 = NULL;
+ int ret = -1;
+
+ pr_info("s390 ChaCha20 test module: size=%d debug=%d\n",
+ data_size, debug);
+
+ /* Allocate and fill buffers */
+ plain = vmalloc(data_size);
+ if (!plain) {
+ pr_info("could not allocate plain buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(plain, 'a', data_size);
+ get_random_bytes(plain, (data_size > 256 ? 256 : data_size));
+
+ cipher_generic = vzalloc(data_size);
+ if (!cipher_generic) {
+ pr_info("could not allocate cipher_generic buffer\n");
+ ret = -2;
+ goto out;
+ }
+
+ cipher_s390 = vzalloc(data_size);
+ if (!cipher_s390) {
+ pr_info("could not allocate cipher_s390 buffer\n");
+ ret = -2;
+ goto out;
+ }
+
+ revert = vzalloc(data_size);
+ if (!revert) {
+ pr_info("could not allocate revert buffer\n");
+ ret = -2;
+ goto out;
+ }
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "src: ", DUMP_PREFIX_OFFSET,
+ 16, 1, plain,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Use chacha20 generic */
+ ret = test_skcipher("chacha20-generic", revert, cipher_generic, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("generic en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("generic en/decryption check OK\n");
+
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 s390 */
+ ret = test_skcipher("chacha20-s390", revert, cipher_s390, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("s390 en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("s390 vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 vs generic check OK\n");
+
+ memset(cipher_s390, 0, data_size);
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 lib */
+ test_lib_chacha(revert, cipher_s390, plain);
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("lib en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("lib vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib vs generic check OK\n");
+
+ pr_info("--- chacha20 s390 test end ---\n");
+
+out:
+ if (plain)
+ vfree(plain);
+ if (cipher_generic)
+ vfree(cipher_generic);
+ if (cipher_s390)
+ vfree(cipher_s390);
+ if (revert)
+ vfree(revert);
+
+ return -1;
+}
+
+static void __exit chacha_s390_test_exit(void)
+{
+ pr_info("s390 ChaCha20 test module exit\n");
+}
+
+module_param_named(size, data_size, uint, 0660);
+module_param(debug, int, 0660);
+MODULE_PARM_DESC(size, "Size of a plaintext");
+MODULE_PARM_DESC(debug, "Debug level (0=off,1=on)");
+
+module_init(chacha_s390_test_init);
+module_exit(chacha_s390_test_exit);
+
+MODULE_DESCRIPTION("s390 ChaCha20 self-test");
+MODULE_AUTHOR("Vladis Dronov <vdronoff@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
new file mode 100644
index 000000000000..0e151d0572d1
--- /dev/null
+++ b/tools/testing/cxl/Kbuild
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0
+ldflags-y += --wrap=acpi_table_parse_cedt
+ldflags-y += --wrap=is_acpi_device_node
+ldflags-y += --wrap=acpi_evaluate_integer
+ldflags-y += --wrap=acpi_pci_find_root
+ldflags-y += --wrap=nvdimm_bus_register
+ldflags-y += --wrap=cxl_await_media_ready
+ldflags-y += --wrap=devm_cxl_add_rch_dport
+ldflags-y += --wrap=cxl_endpoint_parse_cdat
+ldflags-y += --wrap=cxl_dport_init_ras_reporting
+ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
+ldflags-y += --wrap=hmat_get_extended_linear_cache_size
+
+DRIVERS := ../../../drivers
+CXL_SRC := $(DRIVERS)/cxl
+CXL_CORE_SRC := $(DRIVERS)/cxl/core
+ccflags-y := -I$(srctree)/drivers/cxl/
+ccflags-y += -D__mock=__weak
+ccflags-y += -DCXL_TEST_ENABLE=1
+ccflags-y += -DTRACE_INCLUDE_PATH=$(CXL_CORE_SRC) -I$(srctree)/drivers/cxl/core/
+
+obj-m += cxl_acpi.o
+
+cxl_acpi-y := $(CXL_SRC)/acpi.o
+cxl_acpi-y += mock_acpi.o
+cxl_acpi-y += config_check.o
+cxl_acpi-y += cxl_acpi_test.o
+
+obj-m += cxl_pmem.o
+
+cxl_pmem-y := $(CXL_SRC)/pmem.o
+cxl_pmem-y += $(CXL_SRC)/security.o
+cxl_pmem-y += config_check.o
+cxl_pmem-y += cxl_pmem_test.o
+
+obj-m += cxl_port.o
+
+cxl_port-y := $(CXL_SRC)/port.o
+cxl_port-y += config_check.o
+cxl_port-y += cxl_port_test.o
+
+
+obj-m += cxl_mem.o
+
+cxl_mem-y := $(CXL_SRC)/mem.o
+cxl_mem-y += config_check.o
+cxl_mem-y += cxl_mem_test.o
+
+obj-m += cxl_core.o
+
+cxl_core-y := $(CXL_CORE_SRC)/port.o
+cxl_core-y += $(CXL_CORE_SRC)/pmem.o
+cxl_core-y += $(CXL_CORE_SRC)/regs.o
+cxl_core-y += $(CXL_CORE_SRC)/memdev.o
+cxl_core-y += $(CXL_CORE_SRC)/mbox.o
+cxl_core-y += $(CXL_CORE_SRC)/pci.o
+cxl_core-y += $(CXL_CORE_SRC)/hdm.o
+cxl_core-y += $(CXL_CORE_SRC)/pmu.o
+cxl_core-y += $(CXL_CORE_SRC)/cdat.o
+cxl_core-y += $(CXL_CORE_SRC)/ras.o
+cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
+cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
+cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += $(CXL_CORE_SRC)/edac.o
+cxl_core-y += config_check.o
+cxl_core-y += cxl_core_test.o
+cxl_core-y += cxl_core_exports.o
+
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
+obj-m += test/
diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c
new file mode 100644
index 000000000000..a80bc2c062fe
--- /dev/null
+++ b/tools/testing/cxl/config_check.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
+
+void check(void)
+{
+ /*
+ * These kconfig symbols must be set to "m" for cxl_test to load
+ * and operate.
+ */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT));
+ BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS));
+ BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI));
+ BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM));
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST));
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST));
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_DEBUG_FS));
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_MEMORY_HOTPLUG));
+}
diff --git a/tools/testing/cxl/cxl_acpi_test.c b/tools/testing/cxl/cxl_acpi_test.c
new file mode 100644
index 000000000000..8602dc27c81c
--- /dev/null
+++ b/tools/testing/cxl/cxl_acpi_test.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "watermark.h"
+
+cxl_test_watermark(cxl_acpi);
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
new file mode 100644
index 000000000000..6754de35598d
--- /dev/null
+++ b/tools/testing/cxl/cxl_core_exports.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "cxl.h"
+#include "exports.h"
+
+/* Exporting of cxl_core symbols that are only used by cxl_test */
+EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL");
+
+cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev =
+ __devm_cxl_add_dport_by_dev;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_add_dport_by_dev, "CXL");
+
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ return _devm_cxl_add_dport_by_dev(port, dport_dev);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
+
+cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_switch_port_decoders_setup, "CXL");
+
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ return _devm_cxl_switch_port_decoders_setup(port);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
diff --git a/tools/testing/cxl/cxl_core_test.c b/tools/testing/cxl/cxl_core_test.c
new file mode 100644
index 000000000000..464a9255e4d6
--- /dev/null
+++ b/tools/testing/cxl/cxl_core_test.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "watermark.h"
+
+cxl_test_watermark(cxl_core);
diff --git a/tools/testing/cxl/cxl_mem_test.c b/tools/testing/cxl/cxl_mem_test.c
new file mode 100644
index 000000000000..ba7fb8a44288
--- /dev/null
+++ b/tools/testing/cxl/cxl_mem_test.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "watermark.h"
+
+cxl_test_watermark(cxl_mem);
diff --git a/tools/testing/cxl/cxl_pmem_test.c b/tools/testing/cxl/cxl_pmem_test.c
new file mode 100644
index 000000000000..3fd884fae537
--- /dev/null
+++ b/tools/testing/cxl/cxl_pmem_test.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "watermark.h"
+
+cxl_test_watermark(cxl_pmem);
diff --git a/tools/testing/cxl/cxl_port_test.c b/tools/testing/cxl/cxl_port_test.c
new file mode 100644
index 000000000000..be183917a9f6
--- /dev/null
+++ b/tools/testing/cxl/cxl_port_test.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+
+#include "watermark.h"
+
+cxl_test_watermark(cxl_port);
diff --git a/tools/testing/cxl/exports.h b/tools/testing/cxl/exports.h
new file mode 100644
index 000000000000..7ebee7c0bd67
--- /dev/null
+++ b/tools/testing/cxl/exports.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef __MOCK_CXL_EXPORTS_H_
+#define __MOCK_CXL_EXPORTS_H_
+
+typedef struct cxl_dport *(*cxl_add_dport_by_dev_fn)(struct cxl_port *port,
+ struct device *dport_dev);
+extern cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev;
+
+typedef int(*cxl_switch_decoders_setup_fn)(struct cxl_port *port);
+extern cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup;
+
+#endif
diff --git a/tools/testing/cxl/mock_acpi.c b/tools/testing/cxl/mock_acpi.c
new file mode 100644
index 000000000000..8da94378ccec
--- /dev/null
+++ b/tools/testing/cxl/mock_acpi.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <cxl.h>
+#include "test/mock.h"
+
+struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev)
+{
+ int index;
+ struct acpi_device *adev, *found = NULL;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_bridge(dev)) {
+ found = ACPI_COMPANION(dev);
+ goto out;
+ }
+
+ if (dev_is_platform(dev))
+ goto out;
+
+ adev = to_acpi_device(dev);
+ if (!acpi_pci_find_root(adev->handle))
+ goto out;
+
+ if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) {
+ found = adev;
+ dev_dbg(host, "found host bridge %s\n", dev_name(&adev->dev));
+ }
+out:
+ put_cxl_mock_ops(index);
+ return found;
+}
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
new file mode 100644
index 000000000000..af50972c8b6d
--- /dev/null
+++ b/tools/testing/cxl/test/Kbuild
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y := -I$(srctree)/drivers/cxl/ -I$(srctree)/drivers/cxl/core
+
+obj-m += cxl_test.o
+obj-m += cxl_mock.o
+obj-m += cxl_mock_mem.o
+obj-m += cxl_translate.o
+
+cxl_test-y := cxl.o
+cxl_mock-y := mock.o
+cxl_mock_mem-y := mem.o
+
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
new file mode 100644
index 000000000000..81e2aef3627a
--- /dev/null
+++ b/tools/testing/cxl/test/cxl.c
@@ -0,0 +1,1628 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+
+#include <linux/platform_device.h>
+#include <linux/memory_hotplug.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <cxlmem.h>
+
+#include "../watermark.h"
+#include "mock.h"
+
+static int interleave_arithmetic;
+static bool extended_linear_cache;
+
+#define FAKE_QTG_ID 42
+
+#define NR_CXL_HOST_BRIDGES 2
+#define NR_CXL_SINGLE_HOST 1
+#define NR_CXL_RCH 1
+#define NR_CXL_ROOT_PORTS 2
+#define NR_CXL_SWITCH_PORTS 2
+#define NR_CXL_PORT_DECODERS 8
+#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
+
+#define MOCK_AUTO_REGION_SIZE_DEFAULT SZ_512M
+static int mock_auto_region_size = MOCK_AUTO_REGION_SIZE_DEFAULT;
+
+static struct platform_device *cxl_acpi;
+static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
+#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
+static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
+static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
+#define NR_MEM_MULTI \
+ (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
+static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
+
+static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
+static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
+static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
+#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
+static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
+
+struct platform_device *cxl_mem[NR_MEM_MULTI];
+struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
+
+static struct platform_device *cxl_rch[NR_CXL_RCH];
+static struct platform_device *cxl_rcd[NR_CXL_RCH];
+
+static inline bool is_multi_bridge(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
+ if (&cxl_host_bridge[i]->dev == dev)
+ return true;
+ return false;
+}
+
+static inline bool is_single_bridge(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
+ if (&cxl_hb_single[i]->dev == dev)
+ return true;
+ return false;
+}
+
+static struct acpi_device acpi0017_mock;
+static struct acpi_device host_bridge[NR_BRIDGES] = {
+ [0] = {
+ .handle = &host_bridge[0],
+ .pnp.unique_id = "0",
+ },
+ [1] = {
+ .handle = &host_bridge[1],
+ .pnp.unique_id = "1",
+ },
+ [2] = {
+ .handle = &host_bridge[2],
+ .pnp.unique_id = "2",
+ },
+ [3] = {
+ .handle = &host_bridge[3],
+ .pnp.unique_id = "3",
+ },
+};
+
+static bool is_mock_dev(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
+ if (dev == &cxl_mem[i]->dev)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
+ if (dev == &cxl_mem_single[i]->dev)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
+ if (dev == &cxl_rcd[i]->dev)
+ return true;
+ if (dev == &cxl_acpi->dev)
+ return true;
+ return false;
+}
+
+static bool is_mock_adev(struct acpi_device *adev)
+{
+ int i;
+
+ if (adev == &acpi0017_mock)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
+ if (adev == &host_bridge[i])
+ return true;
+
+ return false;
+}
+
+static struct {
+ struct acpi_table_cedt cedt;
+ struct acpi_cedt_chbs chbs[NR_BRIDGES];
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws0;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[2];
+ } cfmws1;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws2;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[2];
+ } cfmws3;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws4;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws5;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[1];
+ } cfmws6;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[2];
+ } cfmws7;
+ struct {
+ struct acpi_cedt_cfmws cfmws;
+ u32 target[3];
+ } cfmws8;
+ struct {
+ struct acpi_cedt_cxims cxims;
+ u64 xormap_list[2];
+ } cxims0;
+} __packed mock_cedt = {
+ .cedt = {
+ .header = {
+ .signature = "CEDT",
+ .length = sizeof(mock_cedt),
+ .revision = 1,
+ },
+ },
+ .chbs[0] = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CHBS,
+ .length = sizeof(mock_cedt.chbs[0]),
+ },
+ .uid = 0,
+ .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
+ },
+ .chbs[1] = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CHBS,
+ .length = sizeof(mock_cedt.chbs[0]),
+ },
+ .uid = 1,
+ .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
+ },
+ .chbs[2] = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CHBS,
+ .length = sizeof(mock_cedt.chbs[0]),
+ },
+ .uid = 2,
+ .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
+ },
+ .chbs[3] = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CHBS,
+ .length = sizeof(mock_cedt.chbs[0]),
+ },
+ .uid = 3,
+ .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
+ },
+ .cfmws0 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws0),
+ },
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 4UL,
+ },
+ .target = { 0 },
+ },
+ .cfmws1 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws1),
+ },
+ .interleave_ways = 1,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 8UL,
+ },
+ .target = { 0, 1, },
+ },
+ .cfmws2 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws2),
+ },
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 4UL,
+ },
+ .target = { 0 },
+ },
+ .cfmws3 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws3),
+ },
+ .interleave_ways = 1,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 8UL,
+ },
+ .target = { 0, 1, },
+ },
+ .cfmws4 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws4),
+ },
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 4UL,
+ },
+ .target = { 2 },
+ },
+ .cfmws5 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws5),
+ },
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M,
+ },
+ .target = { 3 },
+ },
+ /* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
+ .cfmws6 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws6),
+ },
+ .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
+ .interleave_ways = 0,
+ .granularity = 4,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 8UL,
+ },
+ .target = { 0, },
+ },
+ .cfmws7 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws7),
+ },
+ .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
+ .interleave_ways = 1,
+ .granularity = 0,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_256M * 8UL,
+ },
+ .target = { 0, 1, },
+ },
+ .cfmws8 = {
+ .cfmws = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CFMWS,
+ .length = sizeof(mock_cedt.cfmws8),
+ },
+ .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
+ .interleave_ways = 8,
+ .granularity = 1,
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
+ ACPI_CEDT_CFMWS_RESTRICT_PMEM,
+ .qtg_id = FAKE_QTG_ID,
+ .window_size = SZ_512M * 6UL,
+ },
+ .target = { 0, 1, 2, },
+ },
+ .cxims0 = {
+ .cxims = {
+ .header = {
+ .type = ACPI_CEDT_TYPE_CXIMS,
+ .length = sizeof(mock_cedt.cxims0),
+ },
+ .hbig = 0,
+ .nr_xormaps = 2,
+ },
+ .xormap_list = { 0x404100, 0x808200, },
+ },
+};
+
+struct acpi_cedt_cfmws *mock_cfmws[] = {
+ [0] = &mock_cedt.cfmws0.cfmws,
+ [1] = &mock_cedt.cfmws1.cfmws,
+ [2] = &mock_cedt.cfmws2.cfmws,
+ [3] = &mock_cedt.cfmws3.cfmws,
+ [4] = &mock_cedt.cfmws4.cfmws,
+ [5] = &mock_cedt.cfmws5.cfmws,
+ /* Modulo Math above, XOR Math below */
+ [6] = &mock_cedt.cfmws6.cfmws,
+ [7] = &mock_cedt.cfmws7.cfmws,
+ [8] = &mock_cedt.cfmws8.cfmws,
+};
+
+static int cfmws_start;
+static int cfmws_end;
+#define CFMWS_MOD_ARRAY_START 0
+#define CFMWS_MOD_ARRAY_END 5
+#define CFMWS_XOR_ARRAY_START 6
+#define CFMWS_XOR_ARRAY_END 8
+
+struct acpi_cedt_cxims *mock_cxims[1] = {
+ [0] = &mock_cedt.cxims0.cxims,
+};
+
+struct cxl_mock_res {
+ struct list_head list;
+ struct range range;
+};
+
+static LIST_HEAD(mock_res);
+static DEFINE_MUTEX(mock_res_lock);
+static struct gen_pool *cxl_mock_pool;
+
+static void depopulate_all_mock_resources(void)
+{
+ struct cxl_mock_res *res, *_res;
+
+ mutex_lock(&mock_res_lock);
+ list_for_each_entry_safe(res, _res, &mock_res, list) {
+ gen_pool_free(cxl_mock_pool, res->range.start,
+ range_len(&res->range));
+ list_del(&res->list);
+ kfree(res);
+ }
+ mutex_unlock(&mock_res_lock);
+}
+
+static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
+{
+ struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
+ struct genpool_data_align data = {
+ .align = align,
+ };
+ unsigned long phys;
+
+ INIT_LIST_HEAD(&res->list);
+ phys = gen_pool_alloc_algo(cxl_mock_pool, size,
+ gen_pool_first_fit_align, &data);
+ if (!phys)
+ return NULL;
+
+ res->range = (struct range) {
+ .start = phys,
+ .end = phys + size - 1,
+ };
+ mutex_lock(&mock_res_lock);
+ list_add(&res->list, &mock_res);
+ mutex_unlock(&mock_res_lock);
+
+ return res;
+}
+
+/* Only update CFMWS0 as this is used by the auto region. */
+static void cfmws_elc_update(struct acpi_cedt_cfmws *window, int index)
+{
+ if (!extended_linear_cache)
+ return;
+
+ if (index != 0)
+ return;
+
+ /*
+ * The window size should be 2x of the CXL region size where half is
+ * DRAM and half is CXL
+ */
+ window->window_size = mock_auto_region_size * 2;
+}
+
+static int populate_cedt(void)
+{
+ struct cxl_mock_res *res;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
+ struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
+ resource_size_t size;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
+ size = ACPI_CEDT_CHBS_LENGTH_CXL20;
+ else
+ size = ACPI_CEDT_CHBS_LENGTH_CXL11;
+
+ res = alloc_mock_res(size, size);
+ if (!res)
+ return -ENOMEM;
+ chbs->base = res->range.start;
+ chbs->length = size;
+ }
+
+ for (i = cfmws_start; i <= cfmws_end; i++) {
+ struct acpi_cedt_cfmws *window = mock_cfmws[i];
+
+ cfmws_elc_update(window, i);
+ res = alloc_mock_res(window->window_size, SZ_256M);
+ if (!res)
+ return -ENOMEM;
+ window->base_hpa = res->range.start;
+ }
+
+ return 0;
+}
+
+static bool is_mock_port(struct device *dev);
+
+/*
+ * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
+ * and 'struct cxl_chbs_context' share the property that the first
+ * struct member is a cxl_test device being probed by the cxl_acpi
+ * driver.
+ */
+struct cxl_cedt_context {
+ struct device *dev;
+};
+
+static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg,
+ void *arg)
+{
+ struct cxl_cedt_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ union acpi_subtable_headers *h;
+ unsigned long end;
+ int i;
+
+ if (!is_mock_port(dev) && !is_mock_dev(dev))
+ return acpi_table_parse_cedt(id, handler_arg, arg);
+
+ if (id == ACPI_CEDT_TYPE_CHBS)
+ for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
+ h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
+ end = (unsigned long)&mock_cedt.chbs[i + 1];
+ handler_arg(h, arg, end);
+ }
+
+ if (id == ACPI_CEDT_TYPE_CFMWS)
+ for (i = cfmws_start; i <= cfmws_end; i++) {
+ h = (union acpi_subtable_headers *) mock_cfmws[i];
+ end = (unsigned long) h + mock_cfmws[i]->header.length;
+ handler_arg(h, arg, end);
+ }
+
+ if (id == ACPI_CEDT_TYPE_CXIMS)
+ for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
+ h = (union acpi_subtable_headers *)mock_cxims[i];
+ end = (unsigned long)h + mock_cxims[i]->header.length;
+ handler_arg(h, arg, end);
+ }
+
+ return 0;
+}
+
+static bool is_mock_bridge(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
+ if (dev == &cxl_host_bridge[i]->dev)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
+ if (dev == &cxl_hb_single[i]->dev)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
+ if (dev == &cxl_rch[i]->dev)
+ return true;
+
+ return false;
+}
+
+static bool is_mock_port(struct device *dev)
+{
+ int i;
+
+ if (is_mock_bridge(dev))
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
+ if (dev == &cxl_root_port[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
+ if (dev == &cxl_switch_uport[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
+ if (dev == &cxl_switch_dport[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
+ if (dev == &cxl_root_single[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
+ if (dev == &cxl_swu_single[i]->dev)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
+ if (dev == &cxl_swd_single[i]->dev)
+ return true;
+
+ if (is_cxl_memdev(dev))
+ return is_mock_dev(dev->parent);
+
+ return false;
+}
+
+static int host_bridge_index(struct acpi_device *adev)
+{
+ return adev - host_bridge;
+}
+
+static struct acpi_device *find_host_bridge(acpi_handle handle)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
+ if (handle == host_bridge[i].handle)
+ return &host_bridge[i];
+ return NULL;
+}
+
+static acpi_status
+mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ unsigned long long *data)
+{
+ struct acpi_device *adev = find_host_bridge(handle);
+
+ if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
+ return acpi_evaluate_integer(handle, pathname, arguments, data);
+
+ *data = host_bridge_index(adev);
+ return AE_OK;
+}
+
+static int
+mock_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *cache_size)
+{
+ struct acpi_cedt_cfmws *window = mock_cfmws[0];
+ struct resource cfmws0_res =
+ DEFINE_RES_MEM(window->base_hpa, window->window_size);
+
+ if (!extended_linear_cache ||
+ !resource_contains(&cfmws0_res, backing_res)) {
+ return hmat_get_extended_linear_cache_size(backing_res,
+ nid, cache_size);
+ }
+
+ *cache_size = mock_auto_region_size;
+
+ return 0;
+}
+
+static struct pci_bus mock_pci_bus[NR_BRIDGES];
+static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
+ [0] = {
+ .bus = &mock_pci_bus[0],
+ },
+ [1] = {
+ .bus = &mock_pci_bus[1],
+ },
+ [2] = {
+ .bus = &mock_pci_bus[2],
+ },
+ [3] = {
+ .bus = &mock_pci_bus[3],
+ },
+
+};
+
+static bool is_mock_bus(struct pci_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
+ if (bus == &mock_pci_bus[i])
+ return true;
+ return false;
+}
+
+static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
+{
+ struct acpi_device *adev = find_host_bridge(handle);
+
+ if (!adev)
+ return acpi_pci_find_root(handle);
+ return &mock_pci_root[host_bridge_index(adev)];
+}
+
+static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
+ struct device *dev = &port->dev;
+
+ if (!cxlhdm)
+ return ERR_PTR(-ENOMEM);
+
+ cxlhdm->port = port;
+ cxlhdm->interleave_mask = ~0U;
+ cxlhdm->iw_cap_mask = ~0UL;
+ dev_set_drvdata(dev, cxlhdm);
+ return cxlhdm;
+}
+
+struct target_map_ctx {
+ u32 *target_map;
+ int index;
+ int target_count;
+};
+
+static int map_targets(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct target_map_ctx *ctx = data;
+
+ ctx->target_map[ctx->index++] = pdev->id;
+
+ if (ctx->index > ctx->target_count) {
+ dev_WARN_ONCE(dev, 1, "too many targets found?\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mock_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ port->commit_end++;
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static void mock_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
+ return;
+
+ dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+}
+
+static void default_mock_decoder(struct cxl_decoder *cxld)
+{
+ cxld->hpa_range = (struct range){
+ .start = 0,
+ .end = -1,
+ };
+
+ cxld->interleave_ways = 1;
+ cxld->interleave_granularity = 256;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+}
+
+static int first_decoder(struct device *dev, const void *data)
+{
+ struct cxl_decoder *cxld;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+ cxld = to_cxl_decoder(dev);
+ if (cxld->id == 0)
+ return 1;
+ return 0;
+}
+
+static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+{
+ struct acpi_cedt_cfmws *window = mock_cfmws[0];
+ struct platform_device *pdev = NULL;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_port *port, *iter;
+ struct cxl_memdev *cxlmd;
+ struct cxl_dport *dport;
+ struct device *dev;
+ bool hb0 = false;
+ u64 base;
+ int i;
+
+ if (is_endpoint_decoder(&cxld->dev)) {
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ cxlmd = cxled_to_memdev(cxled);
+ WARN_ON(!dev_is_platform(cxlmd->dev.parent));
+ pdev = to_platform_device(cxlmd->dev.parent);
+
+ /* check is endpoint is attach to host-bridge0 */
+ port = cxled_to_port(cxled);
+ do {
+ if (port->uport_dev == &cxl_host_bridge[0]->dev) {
+ hb0 = true;
+ break;
+ }
+ if (is_cxl_port(port->dev.parent))
+ port = to_cxl_port(port->dev.parent);
+ else
+ port = NULL;
+ } while (port);
+ port = cxled_to_port(cxled);
+ }
+
+ /*
+ * The first decoder on the first 2 devices on the first switch
+ * attached to host-bridge0 mock a fake / static RAM region. All
+ * other decoders are default disabled. Given the round robin
+ * assignment those devices are named cxl_mem.0, and cxl_mem.4.
+ *
+ * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
+ */
+ if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
+ default_mock_decoder(cxld);
+ return;
+ }
+
+ base = window->base_hpa;
+ if (extended_linear_cache)
+ base += mock_auto_region_size;
+ cxld->hpa_range = (struct range) {
+ .start = base,
+ .end = base + mock_auto_region_size - 1,
+ };
+
+ cxld->interleave_ways = 2;
+ eig_to_granularity(window->granularity, &cxld->interleave_granularity);
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->flags = CXL_DECODER_F_ENABLE;
+ cxled->state = CXL_DECODER_STATE_AUTO;
+ port->commit_end = cxld->id;
+ devm_cxl_dpa_reserve(cxled, 0,
+ mock_auto_region_size / cxld->interleave_ways, 0);
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+
+ /*
+ * Now that endpoint decoder is set up, walk up the hierarchy
+ * and setup the switch and root port decoders targeting @cxlmd.
+ */
+ iter = port;
+ for (i = 0; i < 2; i++) {
+ dport = iter->parent_dport;
+ iter = dport->port;
+ dev = device_find_child(&iter->dev, NULL, first_decoder);
+ /*
+ * Ancestor ports are guaranteed to be enumerated before
+ * @port, and all ports have at least one decoder.
+ */
+ if (WARN_ON(!dev))
+ continue;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ if (i == 0) {
+ /* put cxl_mem.4 second in the decode order */
+ if (pdev->id == 4) {
+ cxlsd->target[1] = dport;
+ cxld->target_map[1] = dport->port_id;
+ } else {
+ cxlsd->target[0] = dport;
+ cxld->target_map[0] = dport->port_id;
+ }
+ } else {
+ cxlsd->target[0] = dport;
+ cxld->target_map[0] = dport->port_id;
+ }
+ cxld = &cxlsd->cxld;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->flags = CXL_DECODER_F_ENABLE;
+ iter->commit_end = 0;
+ /*
+ * Switch targets 2 endpoints, while host bridge targets
+ * one root port
+ */
+ if (i == 0)
+ cxld->interleave_ways = 2;
+ else
+ cxld->interleave_ways = 1;
+ cxld->interleave_granularity = 4096;
+ cxld->hpa_range = (struct range) {
+ .start = base,
+ .end = base + mock_auto_region_size - 1,
+ };
+ put_device(dev);
+ }
+}
+
+static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ struct cxl_port *port = cxlhdm->port;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ int target_count, i;
+
+ if (is_cxl_endpoint(port))
+ target_count = 0;
+ else if (is_cxl_root(parent_port))
+ target_count = NR_CXL_ROOT_PORTS;
+ else
+ target_count = NR_CXL_SWITCH_PORTS;
+
+ for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
+ struct target_map_ctx ctx = {
+ .target_count = target_count,
+ };
+ struct cxl_decoder *cxld;
+ int rc;
+
+ if (target_count) {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
+ } else {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
+ }
+
+ ctx.target_map = cxld->target_map;
+
+ mock_init_hdm_decoder(cxld);
+
+ if (target_count) {
+ rc = device_for_each_child(port->uport_dev, &ctx,
+ map_targets);
+ if (rc) {
+ put_device(&cxld->dev);
+ return rc;
+ }
+ }
+
+ rc = cxl_decoder_add_locked(cxld);
+ if (rc) {
+ put_device(&cxld->dev);
+ dev_err(&port->dev, "Failed to add decoder\n");
+ return rc;
+ }
+
+ rc = cxl_decoder_autoremove(&port->dev, cxld);
+ if (rc)
+ return rc;
+ dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ }
+
+ return 0;
+}
+
+static int __mock_cxl_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ cxlhdm = mock_cxl_setup_hdm(port, NULL);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) != -ENODEV)
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ return mock_cxl_enumerate_decoders(cxlhdm, NULL);
+}
+
+static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int get_port_array(struct cxl_port *port,
+ struct platform_device ***port_array,
+ int *port_array_size)
+{
+ struct platform_device **array;
+ int array_size;
+
+ if (port->depth == 1) {
+ if (is_multi_bridge(port->uport_dev)) {
+ array_size = ARRAY_SIZE(cxl_root_port);
+ array = cxl_root_port;
+ } else if (is_single_bridge(port->uport_dev)) {
+ array_size = ARRAY_SIZE(cxl_root_single);
+ array = cxl_root_single;
+ } else {
+ dev_dbg(&port->dev, "%s: unknown bridge type\n",
+ dev_name(port->uport_dev));
+ return -ENXIO;
+ }
+ } else if (port->depth == 2) {
+ struct cxl_port *parent = to_cxl_port(port->dev.parent);
+
+ if (is_multi_bridge(parent->uport_dev)) {
+ array_size = ARRAY_SIZE(cxl_switch_dport);
+ array = cxl_switch_dport;
+ } else if (is_single_bridge(parent->uport_dev)) {
+ array_size = ARRAY_SIZE(cxl_swd_single);
+ array = cxl_swd_single;
+ } else {
+ dev_dbg(&port->dev, "%s: unknown bridge type\n",
+ dev_name(port->uport_dev));
+ return -ENXIO;
+ }
+ } else {
+ dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
+ port->depth);
+ return -ENXIO;
+ }
+
+ *port_array = array;
+ *port_array_size = array_size;
+
+ return 0;
+}
+
+static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct platform_device **array;
+ int rc, i, array_size;
+
+ rc = get_port_array(port, &array, &array_size);
+ if (rc)
+ return ERR_PTR(rc);
+
+ for (i = 0; i < array_size; i++) {
+ struct platform_device *pdev = array[i];
+
+ if (pdev->dev.parent != port->uport_dev) {
+ dev_dbg(&port->dev, "%s: mismatch parent %s\n",
+ dev_name(port->uport_dev),
+ dev_name(pdev->dev.parent));
+ continue;
+ }
+
+ if (&pdev->dev != dport_dev)
+ continue;
+
+ return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
+ CXL_RESOURCE_NONE);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/*
+ * Faking the cxl_dpa_perf for the memdev when appropriate.
+ */
+static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ dpa_perf->qos_class = FAKE_QTG_ID;
+ dpa_perf->dpa_range = *range;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ dpa_perf->coord[i].read_latency = 500;
+ dpa_perf->coord[i].write_latency = 500;
+ dpa_perf->coord[i].read_bandwidth = 1000;
+ dpa_perf->coord[i].write_bandwidth = 1000;
+ }
+}
+
+static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(port);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
+
+ if (!cxl_root)
+ return;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
+
+ dpa_perf_setup(port, &range, perf);
+ }
+
+ cxl_memdev_update_perf(cxlmd);
+
+ /*
+ * This function is here to only test the topology iterator. It serves
+ * no other purpose.
+ */
+ cxl_endpoint_get_perf_coordinates(port, ep_c);
+}
+
+static struct cxl_mock_ops cxl_mock_ops = {
+ .is_mock_adev = is_mock_adev,
+ .is_mock_bridge = is_mock_bridge,
+ .is_mock_bus = is_mock_bus,
+ .is_mock_port = is_mock_port,
+ .is_mock_dev = is_mock_dev,
+ .acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
+ .acpi_evaluate_integer = mock_acpi_evaluate_integer,
+ .acpi_pci_find_root = mock_acpi_pci_find_root,
+ .devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
+ .devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
+ .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
+ .devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
+ .hmat_get_extended_linear_cache_size =
+ mock_hmat_get_extended_linear_cache_size,
+ .list = LIST_HEAD_INIT(cxl_mock_ops.list),
+};
+
+static void mock_companion(struct acpi_device *adev, struct device *dev)
+{
+ device_initialize(&adev->dev);
+ fwnode_init(&adev->fwnode, NULL);
+ dev->fwnode = &adev->fwnode;
+ adev->fwnode.dev = dev;
+}
+
+#ifndef SZ_64G
+#define SZ_64G (SZ_32G * 2)
+#endif
+
+static __init int cxl_rch_topo_init(void)
+{
+ int rc, i;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
+ int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
+ struct acpi_device *adev = &host_bridge[idx];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_host_bridge", idx);
+ if (!pdev)
+ goto err_bridge;
+
+ mock_companion(adev, &pdev->dev);
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_bridge;
+ }
+
+ cxl_rch[i] = pdev;
+ mock_pci_bus[idx].bridge = &pdev->dev;
+ rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
+ "firmware_node");
+ if (rc)
+ goto err_bridge;
+ }
+
+ return 0;
+
+err_bridge:
+ for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_rch[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
+ platform_device_unregister(cxl_rch[i]);
+ }
+
+ return rc;
+}
+
+static void cxl_rch_topo_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_rch[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
+ platform_device_unregister(cxl_rch[i]);
+ }
+}
+
+static __init int cxl_single_topo_init(void)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
+ struct acpi_device *adev =
+ &host_bridge[NR_CXL_HOST_BRIDGES + i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_host_bridge",
+ NR_CXL_HOST_BRIDGES + i);
+ if (!pdev)
+ goto err_bridge;
+
+ mock_companion(adev, &pdev->dev);
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_bridge;
+ }
+
+ cxl_hb_single[i] = pdev;
+ mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
+ rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
+ "physical_node");
+ if (rc)
+ goto err_bridge;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
+ struct platform_device *bridge =
+ cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_root_port",
+ NR_MULTI_ROOT + i);
+ if (!pdev)
+ goto err_port;
+ pdev->dev.parent = &bridge->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_port;
+ }
+ cxl_root_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
+ struct platform_device *root_port = cxl_root_single[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_uport",
+ NR_MULTI_ROOT + i);
+ if (!pdev)
+ goto err_uport;
+ pdev->dev.parent = &root_port->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_uport;
+ }
+ cxl_swu_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
+ struct platform_device *uport =
+ cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_dport",
+ i + NR_MEM_MULTI);
+ if (!pdev)
+ goto err_dport;
+ pdev->dev.parent = &uport->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_dport;
+ }
+ cxl_swd_single[i] = pdev;
+ }
+
+ return 0;
+
+err_dport:
+ for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swd_single[i]);
+err_uport:
+ for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swu_single[i]);
+err_port:
+ for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_single[i]);
+err_bridge:
+ for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_hb_single[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_hb_single[i]);
+ }
+
+ return rc;
+}
+
+static void cxl_single_topo_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swd_single[i]);
+ for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_swu_single[i]);
+ for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_single[i]);
+ for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_hb_single[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_hb_single[i]);
+ }
+}
+
+static void cxl_mem_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_rcd[i]);
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+ for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem[i]);
+}
+
+static int cxl_mem_init(void)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
+ struct platform_device *dport = cxl_switch_dport[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_mem", i);
+ if (!pdev)
+ goto err_mem;
+ pdev->dev.parent = &dport->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_mem;
+ }
+ cxl_mem[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
+ struct platform_device *dport = cxl_swd_single[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
+ if (!pdev)
+ goto err_single;
+ pdev->dev.parent = &dport->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_single;
+ }
+ cxl_mem_single[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
+ int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
+ struct platform_device *rch = cxl_rch[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_rcd", idx);
+ if (!pdev)
+ goto err_rcd;
+ pdev->dev.parent = &rch->dev;
+ set_dev_node(&pdev->dev, i % 2);
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_rcd;
+ }
+ cxl_rcd[i] = pdev;
+ }
+
+ return 0;
+
+err_rcd:
+ for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_rcd[i]);
+err_single:
+ for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem_single[i]);
+err_mem:
+ for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_mem[i]);
+ return rc;
+}
+
+static __init int cxl_test_init(void)
+{
+ int rc, i;
+ struct range mappable;
+
+ cxl_acpi_test();
+ cxl_core_test();
+ cxl_mem_test();
+ cxl_pmem_test();
+ cxl_port_test();
+
+ register_cxl_mock_ops(&cxl_mock_ops);
+
+ cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
+ if (!cxl_mock_pool) {
+ rc = -ENOMEM;
+ goto err_gen_pool_create;
+ }
+ mappable = mhp_get_pluggable_range(true);
+
+ rc = gen_pool_add(cxl_mock_pool,
+ min(iomem_resource.end + 1 - SZ_64G,
+ mappable.end + 1 - SZ_64G),
+ SZ_64G, NUMA_NO_NODE);
+ if (rc)
+ goto err_gen_pool_add;
+
+ if (interleave_arithmetic == 1) {
+ cfmws_start = CFMWS_XOR_ARRAY_START;
+ cfmws_end = CFMWS_XOR_ARRAY_END;
+ } else {
+ cfmws_start = CFMWS_MOD_ARRAY_START;
+ cfmws_end = CFMWS_MOD_ARRAY_END;
+ }
+
+ rc = populate_cedt();
+ if (rc)
+ goto err_populate;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
+ struct acpi_device *adev = &host_bridge[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_host_bridge", i);
+ if (!pdev)
+ goto err_bridge;
+
+ mock_companion(adev, &pdev->dev);
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_bridge;
+ }
+
+ cxl_host_bridge[i] = pdev;
+ mock_pci_bus[i].bridge = &pdev->dev;
+ rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
+ "physical_node");
+ if (rc)
+ goto err_bridge;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
+ struct platform_device *bridge =
+ cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_root_port", i);
+ if (!pdev)
+ goto err_port;
+ pdev->dev.parent = &bridge->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_port;
+ }
+ cxl_root_port[i] = pdev;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
+ for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
+ struct platform_device *root_port = cxl_root_port[i];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_uport", i);
+ if (!pdev)
+ goto err_uport;
+ pdev->dev.parent = &root_port->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_uport;
+ }
+ cxl_switch_uport[i] = pdev;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
+ struct platform_device *uport =
+ cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc("cxl_switch_dport", i);
+ if (!pdev)
+ goto err_dport;
+ pdev->dev.parent = &uport->dev;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ platform_device_put(pdev);
+ goto err_dport;
+ }
+ cxl_switch_dport[i] = pdev;
+ }
+
+ rc = cxl_single_topo_init();
+ if (rc)
+ goto err_dport;
+
+ rc = cxl_rch_topo_init();
+ if (rc)
+ goto err_single;
+
+ cxl_acpi = platform_device_alloc("cxl_acpi", 0);
+ if (!cxl_acpi)
+ goto err_rch;
+
+ mock_companion(&acpi0017_mock, &cxl_acpi->dev);
+ acpi0017_mock.dev.bus = &platform_bus_type;
+
+ rc = platform_device_add(cxl_acpi);
+ if (rc)
+ goto err_root;
+
+ rc = cxl_mem_init();
+ if (rc)
+ goto err_root;
+
+ return 0;
+
+err_root:
+ platform_device_put(cxl_acpi);
+err_rch:
+ cxl_rch_topo_exit();
+err_single:
+ cxl_single_topo_exit();
+err_dport:
+ for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_switch_dport[i]);
+err_uport:
+ for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_switch_uport[i]);
+err_port:
+ for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_port[i]);
+err_bridge:
+ for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_host_bridge[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_host_bridge[i]);
+ }
+err_populate:
+ depopulate_all_mock_resources();
+err_gen_pool_add:
+ gen_pool_destroy(cxl_mock_pool);
+err_gen_pool_create:
+ unregister_cxl_mock_ops(&cxl_mock_ops);
+ return rc;
+}
+
+static __exit void cxl_test_exit(void)
+{
+ int i;
+
+ cxl_mem_exit();
+ platform_device_unregister(cxl_acpi);
+ cxl_rch_topo_exit();
+ cxl_single_topo_exit();
+ for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_switch_dport[i]);
+ for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_switch_uport[i]);
+ for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
+ platform_device_unregister(cxl_root_port[i]);
+ for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
+ struct platform_device *pdev = cxl_host_bridge[i];
+
+ if (!pdev)
+ continue;
+ sysfs_remove_link(&pdev->dev.kobj, "physical_node");
+ platform_device_unregister(cxl_host_bridge[i]);
+ }
+ depopulate_all_mock_resources();
+ gen_pool_destroy(cxl_mock_pool);
+ unregister_cxl_mock_ops(&cxl_mock_ops);
+}
+
+module_param(interleave_arithmetic, int, 0444);
+MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
+module_param(extended_linear_cache, bool, 0444);
+MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
+module_init(cxl_test_init);
+module_exit(cxl_test_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: setup module");
+MODULE_IMPORT_NS("ACPI");
+MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/cxl_translate.c b/tools/testing/cxl/test/cxl_translate.c
new file mode 100644
index 000000000000..2200ae21795c
--- /dev/null
+++ b/tools/testing/cxl/test/cxl_translate.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2025 Intel Corporation. All rights reserved.
+
+/* Preface all log entries with "cxl_translate" */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <cxlmem.h>
+#include <cxl.h>
+
+/* Maximum number of test vectors and entry length */
+#define MAX_TABLE_ENTRIES 128
+#define MAX_ENTRY_LEN 128
+
+/* Expected number of parameters in each test vector */
+#define EXPECTED_PARAMS 7
+
+/* Module parameters for test vectors */
+static char *table[MAX_TABLE_ENTRIES];
+static int table_num;
+
+/* Interleave Arithmetic */
+#define MODULO_MATH 0
+#define XOR_MATH 1
+
+/*
+ * XOR mapping configuration
+ * The test data sets all use the same set of xormaps. When additional
+ * data sets arrive for validation, this static setup will need to
+ * be changed to accept xormaps as additional parameters.
+ */
+struct cxl_cxims_data *cximsd;
+static u64 xormaps[] = {
+ 0x2020900,
+ 0x4041200,
+ 0x1010400,
+ 0x800,
+};
+
+static int nr_maps = ARRAY_SIZE(xormaps);
+
+#define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
+static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
+ [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
+};
+
+/**
+ * to_hpa - calculate an HPA offset from a DPA offset and position
+ *
+ * dpa_offset: device physical address offset
+ * pos: devices position in interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: host physical address offset
+ */
+static u64 to_hpa(u64 dpa_offset, int pos, u8 r_eiw, u16 r_eig, u8 hb_ways,
+ u8 math)
+{
+ u64 hpa_offset;
+
+ /* Calculate base HPA offset from DPA and position */
+ hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, r_eiw, r_eig);
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ return cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return hpa_offset;
+}
+
+/**
+ * to_dpa - translate an HPA offset to DPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: device physical address offset
+ */
+static u64 to_dpa(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ offset =
+ cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return cxl_calculate_dpa_offset(offset, r_eiw, r_eig);
+}
+
+/**
+ * to_pos - extract an interleave position from an HPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: devices position in region interleave
+ */
+static u64 to_pos(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ /* Reverse XOR mapping if specified */
+ if (math == XOR_MATH)
+ offset = cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+
+ return cxl_calculate_position(offset, r_eiw, r_eig);
+}
+
+/**
+ * run_translation_test - execute forward and reverse translations
+ *
+ * @dpa: device physical address
+ * @pos: expected position in region interleave
+ * @r_eiw: region encoded interleave ways
+ * @r_eig: region encoded interleave granularity
+ * @hb_ways: host bridge interleave ways
+ * @math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * @expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static int run_translation_test(u64 dpa, int pos, u8 r_eiw, u16 r_eig,
+ u8 hb_ways, int math, u64 expect_hpa)
+{
+ u64 translated_spa, reverse_dpa;
+ int reverse_pos;
+
+ /* Test Device to Host translation: DPA + POS -> SPA */
+ translated_spa = to_hpa(dpa, pos, r_eiw, r_eig, hb_ways, math);
+ if (translated_spa != expect_hpa) {
+ pr_err("Device to host failed: expected HPA %llu, got %llu\n",
+ expect_hpa, translated_spa);
+ return -1;
+ }
+
+ /* Test Host to Device DPA translation: SPA -> DPA */
+ reverse_dpa = to_dpa(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_dpa != dpa) {
+ pr_err("Host to Device DPA failed: expected %llu, got %llu\n",
+ dpa, reverse_dpa);
+ return -1;
+ }
+
+ /* Test Host to Device Position translation: SPA -> POS */
+ reverse_pos = to_pos(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_pos != pos) {
+ pr_err("Position lookup failed: expected %d, got %d\n", pos,
+ reverse_pos);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * parse_test_vector - parse a single test vector string
+ *
+ * entry: test vector string to parse
+ * dpa: device physical address
+ * pos: expected position in region interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int parse_test_vector(const char *entry, u64 *dpa, int *pos, u8 *r_eiw,
+ u16 *r_eig, u8 *hb_ways, int *math,
+ u64 *expect_hpa)
+{
+ unsigned int tmp_r_eiw, tmp_r_eig, tmp_hb_ways;
+ int parsed;
+
+ parsed = sscanf(entry, "%llu %d %u %u %u %d %llu", dpa, pos, &tmp_r_eiw,
+ &tmp_r_eig, &tmp_hb_ways, math, expect_hpa);
+
+ if (parsed != EXPECTED_PARAMS) {
+ pr_err("Parse error: expected %d parameters, got %d in '%s'\n",
+ EXPECTED_PARAMS, parsed, entry);
+ return -EINVAL;
+ }
+ if (tmp_r_eiw > U8_MAX || tmp_r_eig > U16_MAX || tmp_hb_ways > U8_MAX) {
+ pr_err("Parameter overflow in entry: '%s'\n", entry);
+ return -ERANGE;
+ }
+ if (*math != MODULO_MATH && *math != XOR_MATH) {
+ pr_err("Invalid math type %d in entry: '%s'\n", *math, entry);
+ return -EINVAL;
+ }
+ *r_eiw = tmp_r_eiw;
+ *r_eig = tmp_r_eig;
+ *hb_ways = tmp_hb_ways;
+
+ return 0;
+}
+
+/*
+ * setup_xor_mapping - Initialize XOR mapping data structure
+ *
+ * The test data sets all use the same HBIG so we can use one set
+ * of xormaps, and set the number to apply based on HBIW before
+ * calling cxl_do_xormap_calc().
+ *
+ * When additional data sets arrive for validation with different
+ * HBIG's this static setup will need to be updated.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int setup_xor_mapping(void)
+{
+ if (nr_maps <= 0)
+ return -EINVAL;
+
+ cximsd = kzalloc(struct_size(cximsd, xormaps, nr_maps), GFP_KERNEL);
+ if (!cximsd)
+ return -ENOMEM;
+
+ memcpy(cximsd->xormaps, xormaps, nr_maps * sizeof(*cximsd->xormaps));
+ cximsd->nr_maps = nr_maps;
+
+ return 0;
+}
+
+static int test_random_params(void)
+{
+ u8 valid_eiws[] = { 0, 1, 2, 3, 4, 8, 9, 10 };
+ u16 valid_eigs[] = { 0, 1, 2, 3, 4, 5, 6 };
+ int i, ways, pos, reverse_pos;
+ u64 dpa, hpa, reverse_dpa;
+ int iterations = 10000;
+ int failures = 0;
+
+ for (i = 0; i < iterations; i++) {
+ /* Generate valid random parameters for eiw, eig, pos, dpa */
+ u8 eiw = valid_eiws[get_random_u32() % ARRAY_SIZE(valid_eiws)];
+ u16 eig = valid_eigs[get_random_u32() % ARRAY_SIZE(valid_eigs)];
+
+ eiw_to_ways(eiw, &ways);
+ pos = get_random_u32() % ways;
+ dpa = get_random_u64() >> 12;
+
+ hpa = cxl_calculate_hpa_offset(dpa, pos, eiw, eig);
+ reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
+ reverse_pos = cxl_calculate_position(hpa, eiw, eig);
+
+ if (reverse_dpa != dpa || reverse_pos != pos) {
+ pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
+ i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw,
+ eig);
+
+ if (failures++ > 10) {
+ pr_err("test random too many failures, stop\n");
+ break;
+ }
+ }
+ }
+ pr_info("..... test random: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct param_test {
+ u8 eiw;
+ u16 eig;
+ int pos;
+ bool expect; /* true: expect pass, false: expect fail */
+ const char *desc;
+};
+
+static struct param_test param_tests[] = {
+ { 0x0, 0, 0, true, "1-way, min eig=0, pos=0" },
+ { 0x0, 3, 0, true, "1-way, mid eig=3, pos=0" },
+ { 0x0, 6, 0, true, "1-way, max eig=6, pos=0" },
+ { 0x1, 0, 0, true, "2-way, eig=0, pos=0" },
+ { 0x1, 3, 1, true, "2-way, eig=3, max pos=1" },
+ { 0x1, 6, 1, true, "2-way, eig=6, max pos=1" },
+ { 0x2, 0, 0, true, "4-way, eig=0, pos=0" },
+ { 0x2, 3, 3, true, "4-way, eig=3, max pos=3" },
+ { 0x2, 6, 3, true, "4-way, eig=6, max pos=3" },
+ { 0x3, 0, 0, true, "8-way, eig=0, pos=0" },
+ { 0x3, 3, 7, true, "8-way, eig=3, max pos=7" },
+ { 0x3, 6, 7, true, "8-way, eig=6, max pos=7" },
+ { 0x4, 0, 0, true, "16-way, eig=0, pos=0" },
+ { 0x4, 3, 15, true, "16-way, eig=3, max pos=15" },
+ { 0x4, 6, 15, true, "16-way, eig=6, max pos=15" },
+ { 0x8, 0, 0, true, "3-way, eig=0, pos=0" },
+ { 0x8, 3, 2, true, "3-way, eig=3, max pos=2" },
+ { 0x8, 6, 2, true, "3-way, eig=6, max pos=2" },
+ { 0x9, 0, 0, true, "6-way, eig=0, pos=0" },
+ { 0x9, 3, 5, true, "6-way, eig=3, max pos=5" },
+ { 0x9, 6, 5, true, "6-way, eig=6, max pos=5" },
+ { 0xA, 0, 0, true, "12-way, eig=0, pos=0" },
+ { 0xA, 3, 11, true, "12-way, eig=3, max pos=11" },
+ { 0xA, 6, 11, true, "12-way, eig=6, max pos=11" },
+ { 0x5, 0, 0, false, "invalid eiw=5" },
+ { 0x7, 0, 0, false, "invalid eiw=7" },
+ { 0xB, 0, 0, false, "invalid eiw=0xB" },
+ { 0xFF, 0, 0, false, "invalid eiw=0xFF" },
+ { 0x1, 7, 0, false, "invalid eig=7 (out of range)" },
+ { 0x2, 0x10, 0, false, "invalid eig=0x10" },
+ { 0x3, 0xFFFF, 0, false, "invalid eig=0xFFFF" },
+ { 0x1, 0, -1, false, "pos < 0" },
+ { 0x1, 0, 2, false, "2-way, pos=2 (>= ways)" },
+ { 0x2, 0, 4, false, "4-way, pos=4 (>= ways)" },
+ { 0x3, 0, 8, false, "8-way, pos=8 (>= ways)" },
+ { 0x4, 0, 16, false, "16-way, pos=16 (>= ways)" },
+ { 0x8, 0, 3, false, "3-way, pos=3 (>= ways)" },
+ { 0x9, 0, 6, false, "6-way, pos=6 (>= ways)" },
+ { 0xA, 0, 12, false, "12-way, pos=12 (>= ways)" },
+};
+
+static int test_cxl_validate_translation_params(void)
+{
+ int i, rc, failures = 0;
+ bool valid;
+
+ for (i = 0; i < ARRAY_SIZE(param_tests); i++) {
+ struct param_test *t = &param_tests[i];
+
+ rc = cxl_validate_translation_params(t->eiw, t->eig, t->pos);
+ valid = (rc == 0);
+
+ if (valid != t->expect) {
+ pr_err("test params failed: %s\n", t->desc);
+ failures++;
+ }
+ }
+ pr_info("..... test params: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * cxl_translate_init
+ *
+ * Run the internal validation tests when no params are passed.
+ * Otherwise, parse the parameters (test vectors), and kick off
+ * the translation test.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int __init cxl_translate_init(void)
+{
+ int rc, i;
+
+ /* If no tables are passed, validate module params only */
+ if (table_num == 0) {
+ pr_info("Internal validation test start...\n");
+ rc = test_cxl_validate_translation_params();
+ if (rc)
+ return rc;
+
+ rc = test_random_params();
+ if (rc)
+ return rc;
+
+ pr_info("Internal validation test completed successfully\n");
+
+ return 0;
+ }
+
+ pr_info("CXL translate test module loaded with %d test vectors\n",
+ table_num);
+
+ rc = setup_xor_mapping();
+ if (rc)
+ return rc;
+
+ /* Process each test vector */
+ for (i = 0; i < table_num; i++) {
+ u64 dpa, expect_spa;
+ int pos, math;
+ u8 r_eiw, hb_ways;
+ u16 r_eig;
+
+ pr_debug("Processing test vector %d: '%s'\n", i, table[i]);
+
+ /* Parse the test vector */
+ rc = parse_test_vector(table[i], &dpa, &pos, &r_eiw, &r_eig,
+ &hb_ways, &math, &expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " Failed to parse test vector '%s'\n",
+ i, table[i]);
+ continue;
+ }
+ /* Run the translation test */
+ rc = run_translation_test(dpa, pos, r_eiw, r_eig, hb_ways, math,
+ expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " dpa=%llu pos=%d r_eiw=%u r_eig=%u hb_ways=%u math=%s expect_spa=%llu\n",
+ i, dpa, pos, r_eiw, r_eig, hb_ways,
+ (math == XOR_MATH) ? "XOR" : "MODULO",
+ expect_spa);
+ } else {
+ pr_info("CXL Translate Test %d: PASS\n", i);
+ }
+ }
+
+ kfree(cximsd);
+ pr_info("CXL translate test completed\n");
+
+ return 0;
+}
+
+static void __exit cxl_translate_exit(void)
+{
+ pr_info("CXL translate test module unloaded\n");
+}
+
+module_param_array(table, charp, &table_num, 0444);
+MODULE_PARM_DESC(table, "Test vectors as space-separated decimal strings");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cxl_test: cxl address translation test module");
+MODULE_IMPORT_NS("CXL");
+
+module_init(cxl_translate_init);
+module_exit(cxl_translate_exit);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
new file mode 100644
index 000000000000..176dcde570cd
--- /dev/null
+++ b/tools/testing/cxl/test/mem.c
@@ -0,0 +1,1895 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2021 Intel Corporation. All rights reserved.
+
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sizes.h>
+#include <linux/bits.h>
+#include <cxl/mailbox.h>
+#include <linux/unaligned.h>
+#include <crypto/sha2.h>
+#include <cxlmem.h>
+
+#include "trace.h"
+
+#define LSA_SIZE SZ_128K
+#define FW_SIZE SZ_64M
+#define FW_SLOTS 3
+#define DEV_SIZE SZ_2G
+#define EFFECT(x) (1U << x)
+
+#define MOCK_INJECT_DEV_MAX 8
+#define MOCK_INJECT_TEST_MAX 128
+
+static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
+
+enum cxl_command_effects {
+ CONF_CHANGE_COLD_RESET = 0,
+ CONF_CHANGE_IMMEDIATE,
+ DATA_CHANGE_IMMEDIATE,
+ POLICY_CHANGE_IMMEDIATE,
+ LOG_CHANGE_IMMEDIATE,
+ SECURITY_CHANGE_IMMEDIATE,
+ BACKGROUND_OP,
+ SECONDARY_MBOX_SUPPORTED,
+};
+
+#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
+
+static struct cxl_cel_entry mock_cel[] = {
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_FEATURES),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FEATURE),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_FEATURE),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
+ EFFECT(DATA_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
+ .effect = POLICY_CHANGE_IMMEDIATE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(BACKGROUND_OP)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(CONF_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
+ EFFECT(SECURITY_CHANGE_IMMEDIATE) |
+ EFFECT(BACKGROUND_OP)),
+ },
+};
+
+/* See CXL 2.0 Table 181 Get Health Info Output Payload */
+struct cxl_mbox_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 ext_status;
+ u8 life_used;
+ __le16 temperature;
+ __le32 dirty_shutdowns;
+ __le32 volatile_errors;
+ __le32 pmem_errors;
+} __packed;
+
+static struct {
+ struct cxl_mbox_get_supported_logs gsl;
+ struct cxl_gsl_entry entry;
+} mock_gsl_payload = {
+ .gsl = {
+ .entries = cpu_to_le16(1),
+ },
+ .entry = {
+ .uuid = DEFINE_CXL_CEL_UUID,
+ .size = cpu_to_le32(sizeof(mock_cel)),
+ },
+};
+
+#define PASS_TRY_LIMIT 3
+
+#define CXL_TEST_EVENT_CNT_MAX 15
+
+/* Set a number of events to return at a time for simulation. */
+#define CXL_TEST_EVENT_RET_MAX 4
+
+struct mock_event_log {
+ u16 clear_idx;
+ u16 cur_idx;
+ u16 nr_events;
+ u16 nr_overflow;
+ u16 overflow_reset;
+ struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
+};
+
+struct mock_event_store {
+ struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
+ u32 ev_status;
+};
+
+struct vendor_test_feat {
+ __le32 data;
+} __packed;
+
+struct cxl_mockmem_data {
+ void *lsa;
+ void *fw;
+ int fw_slot;
+ int fw_staged;
+ size_t fw_size;
+ u32 security_state;
+ u8 user_pass[NVDIMM_PASSPHRASE_LEN];
+ u8 master_pass[NVDIMM_PASSPHRASE_LEN];
+ int user_limit;
+ int master_limit;
+ struct mock_event_store mes;
+ struct cxl_memdev_state *mds;
+ u8 event_buf[SZ_4K];
+ u64 timestamp;
+ unsigned long sanitize_timeout;
+ struct vendor_test_feat test_feat;
+ u8 shutdown_state;
+};
+
+static struct mock_event_log *event_find_log(struct device *dev, int log_type)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+
+ if (log_type >= CXL_EVENT_TYPE_MAX)
+ return NULL;
+ return &mdata->mes.mock_logs[log_type];
+}
+
+static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
+{
+ return log->events[log->cur_idx];
+}
+
+static void event_reset_log(struct mock_event_log *log)
+{
+ log->cur_idx = 0;
+ log->clear_idx = 0;
+ log->nr_overflow = log->overflow_reset;
+}
+
+/* Handle can never be 0 use 1 based indexing for handle */
+static u16 event_get_clear_handle(struct mock_event_log *log)
+{
+ return log->clear_idx + 1;
+}
+
+/* Handle can never be 0 use 1 based indexing for handle */
+static __le16 event_get_cur_event_handle(struct mock_event_log *log)
+{
+ u16 cur_handle = log->cur_idx + 1;
+
+ return cpu_to_le16(cur_handle);
+}
+
+static bool event_log_empty(struct mock_event_log *log)
+{
+ return log->cur_idx == log->nr_events;
+}
+
+static void mes_add_event(struct mock_event_store *mes,
+ enum cxl_event_log_type log_type,
+ struct cxl_event_record_raw *event)
+{
+ struct mock_event_log *log;
+
+ if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
+ return;
+
+ log = &mes->mock_logs[log_type];
+
+ if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
+ log->nr_overflow++;
+ log->overflow_reset = log->nr_overflow;
+ return;
+ }
+
+ log->events[log->nr_events] = event;
+ log->nr_events++;
+}
+
+/*
+ * Vary the number of events returned to simulate events occuring while the
+ * logs are being read.
+ */
+static atomic_t event_counter = ATOMIC_INIT(0);
+
+static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_get_event_payload *pl;
+ struct mock_event_log *log;
+ int ret_limit;
+ u8 log_type;
+ int i;
+
+ if (cmd->size_in != sizeof(log_type))
+ return -EINVAL;
+
+ /* Vary return limit from 1 to CXL_TEST_EVENT_RET_MAX */
+ ret_limit = (atomic_inc_return(&event_counter) % CXL_TEST_EVENT_RET_MAX) + 1;
+
+ if (cmd->size_out < struct_size(pl, records, ret_limit))
+ return -EINVAL;
+
+ log_type = *((u8 *)cmd->payload_in);
+ if (log_type >= CXL_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ memset(cmd->payload_out, 0, struct_size(pl, records, 0));
+
+ log = event_find_log(dev, log_type);
+ if (!log || event_log_empty(log))
+ return 0;
+
+ pl = cmd->payload_out;
+
+ for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
+ memcpy(&pl->records[i], event_get_current(log),
+ sizeof(pl->records[i]));
+ pl->records[i].event.generic.hdr.handle =
+ event_get_cur_event_handle(log);
+ log->cur_idx++;
+ }
+
+ cmd->size_out = struct_size(pl, records, i);
+ pl->record_count = cpu_to_le16(i);
+ if (!event_log_empty(log))
+ pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
+
+ if (log->nr_overflow) {
+ u64 ns;
+
+ pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
+ pl->overflow_err_count = cpu_to_le16(log->nr_overflow);
+ ns = ktime_get_real_ns();
+ ns -= 5000000000; /* 5s ago */
+ pl->first_overflow_timestamp = cpu_to_le64(ns);
+ ns = ktime_get_real_ns();
+ ns -= 1000000000; /* 1s ago */
+ pl->last_overflow_timestamp = cpu_to_le64(ns);
+ }
+
+ return 0;
+}
+
+static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
+ struct mock_event_log *log;
+ u8 log_type = pl->event_log;
+ u16 handle;
+ int nr;
+
+ if (log_type >= CXL_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ log = event_find_log(dev, log_type);
+ if (!log)
+ return 0; /* No mock data in this log */
+
+ /*
+ * This check is technically not invalid per the specification AFAICS.
+ * (The host could 'guess' handles and clear them in order).
+ * However, this is not good behavior for the host so test it.
+ */
+ if (log->clear_idx + pl->nr_recs > log->cur_idx) {
+ dev_err(dev,
+ "Attempting to clear more events than returned!\n");
+ return -EINVAL;
+ }
+
+ /* Check handle order prior to clearing events */
+ for (nr = 0, handle = event_get_clear_handle(log);
+ nr < pl->nr_recs;
+ nr++, handle++) {
+ if (handle != le16_to_cpu(pl->handles[nr])) {
+ dev_err(dev, "Clearing events out of order\n");
+ return -EINVAL;
+ }
+ }
+
+ if (log->nr_overflow)
+ log->nr_overflow = 0;
+
+ /* Clear events */
+ log->clear_idx += pl->nr_recs;
+ return 0;
+}
+
+static void cxl_mock_event_trigger(struct device *dev)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ struct mock_event_store *mes = &mdata->mes;
+ int i;
+
+ for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
+ struct mock_event_log *log;
+
+ log = event_find_log(dev, i);
+ if (log)
+ event_reset_log(log);
+ }
+
+ cxl_mem_get_event_records(mdata->mds, mes->ev_status);
+}
+
+struct cxl_event_record_raw maint_needed = {
+ .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
+ 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
+ .event.generic = {
+ .hdr = {
+ .length = sizeof(struct cxl_event_record_raw),
+ .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
+ /* .handle = Set dynamically */
+ .related_handle = cpu_to_le16(0xa5b6),
+ },
+ .data = { 0xDE, 0xAD, 0xBE, 0xEF },
+ },
+};
+
+struct cxl_event_record_raw hardware_replace = {
+ .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
+ 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
+ .event.generic = {
+ .hdr = {
+ .length = sizeof(struct cxl_event_record_raw),
+ .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
+ /* .handle = Set dynamically */
+ .related_handle = cpu_to_le16(0xb6a5),
+ },
+ .data = { 0xDE, 0xAD, 0xBE, 0xEF },
+ },
+};
+
+struct cxl_test_gen_media {
+ uuid_t id;
+ struct cxl_event_gen_media rec;
+} __packed;
+
+struct cxl_test_gen_media gen_media = {
+ .id = CXL_EVENT_GEN_MEDIA_UUID,
+ .rec = {
+ .media_hdr = {
+ .hdr = {
+ .length = sizeof(struct cxl_test_gen_media),
+ .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
+ /* .handle = Set dynamically */
+ .related_handle = cpu_to_le16(0),
+ },
+ .phys_addr = cpu_to_le64(0x2000),
+ .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
+ .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
+ .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
+ /* .validity_flags = <set below> */
+ .channel = 1,
+ .rank = 30,
+ },
+ .component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .cme_threshold_ev_flags = 3,
+ .cme_count = { 33, 0, 0 },
+ .sub_type = 0x2,
+ },
+};
+
+struct cxl_test_dram {
+ uuid_t id;
+ struct cxl_event_dram rec;
+} __packed;
+
+struct cxl_test_dram dram = {
+ .id = CXL_EVENT_DRAM_UUID,
+ .rec = {
+ .media_hdr = {
+ .hdr = {
+ .length = sizeof(struct cxl_test_dram),
+ .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
+ /* .handle = Set dynamically */
+ .related_handle = cpu_to_le16(0),
+ },
+ .phys_addr = cpu_to_le64(0x8000),
+ .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
+ .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
+ .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
+ /* .validity_flags = <set below> */
+ .channel = 1,
+ },
+ .bank_group = 5,
+ .bank = 2,
+ .column = {0xDE, 0xAD},
+ .component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .sub_channel = 8,
+ .cme_threshold_ev_flags = 2,
+ .cvme_count = { 14, 0, 0 },
+ .sub_type = 0x5,
+ },
+};
+
+struct cxl_test_mem_module {
+ uuid_t id;
+ struct cxl_event_mem_module rec;
+} __packed;
+
+struct cxl_test_mem_module mem_module = {
+ .id = CXL_EVENT_MEM_MODULE_UUID,
+ .rec = {
+ .hdr = {
+ .length = sizeof(struct cxl_test_mem_module),
+ /* .handle = Set dynamically */
+ .related_handle = cpu_to_le16(0),
+ },
+ .event_type = CXL_MMER_TEMP_CHANGE,
+ .info = {
+ .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
+ .media_status = CXL_DHI_MS_ALL_DATA_LOST,
+ .add_status = (CXL_DHI_AS_CRITICAL << 2) |
+ (CXL_DHI_AS_WARNING << 4) |
+ (CXL_DHI_AS_WARNING << 5),
+ .device_temp = { 0xDE, 0xAD},
+ .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
+ .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
+ .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
+ },
+ /* .validity_flags = <set below> */
+ .component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
+ .event_sub_type = 0x3,
+ },
+};
+
+static int mock_set_timestamp(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
+ struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
+
+ if (cmd->size_in != sizeof(*ts))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ mdata->timestamp = le64_to_cpu(ts->timestamp);
+ return 0;
+}
+
+static void cxl_mock_add_event_logs(struct mock_event_store *mes)
+{
+ put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
+ CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
+ &gen_media.rec.media_hdr.validity_flags);
+
+ put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
+ CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
+ CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
+ &dram.rec.media_hdr.validity_flags);
+
+ put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
+ &mem_module.rec.validity_flags);
+
+ mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
+ mes_add_event(mes, CXL_EVENT_TYPE_INFO,
+ (struct cxl_event_record_raw *)&gen_media);
+ mes_add_event(mes, CXL_EVENT_TYPE_INFO,
+ (struct cxl_event_record_raw *)&mem_module);
+ mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
+
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
+ (struct cxl_event_record_raw *)&dram);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
+ (struct cxl_event_record_raw *)&gen_media);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
+ (struct cxl_event_record_raw *)&mem_module);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
+ (struct cxl_event_record_raw *)&dram);
+ /* Overflow this log */
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
+ mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
+
+ mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
+ mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
+ (struct cxl_event_record_raw *)&dram);
+ mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
+}
+
+static int mock_gsl(struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_out < sizeof(mock_gsl_payload))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
+ cmd->size_out = sizeof(mock_gsl_payload);
+
+ return 0;
+}
+
+static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_get_log *gl = cmd->payload_in;
+ u32 offset = le32_to_cpu(gl->offset);
+ u32 length = le32_to_cpu(gl->length);
+ uuid_t uuid = DEFINE_CXL_CEL_UUID;
+ void *data = &mock_cel;
+
+ if (cmd->size_in < sizeof(*gl))
+ return -EINVAL;
+ if (length > cxl_mbox->payload_size)
+ return -EINVAL;
+ if (offset + length > sizeof(mock_cel))
+ return -EINVAL;
+ if (!uuid_equal(&gl->uuid, &uuid))
+ return -EINVAL;
+ if (length > cmd->size_out)
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, data + offset, length);
+
+ return 0;
+}
+
+static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_identify id = {
+ .fw_revision = { "mock fw v1 " },
+ .total_capacity =
+ cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
+ .volatile_capacity =
+ cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
+ };
+
+ if (cmd->size_out < sizeof(id))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &id, sizeof(id));
+
+ return 0;
+}
+
+static int mock_id(struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_identify id = {
+ .fw_revision = { "mock fw v1 " },
+ .lsa_size = cpu_to_le32(LSA_SIZE),
+ .partition_align =
+ cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
+ .total_capacity =
+ cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
+ .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
+ };
+
+ put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
+
+ if (cmd->size_out < sizeof(id))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &id, sizeof(id));
+
+ return 0;
+}
+
+static int mock_partition_info(struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_partition_info pi = {
+ .active_volatile_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ .active_persistent_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ };
+
+ if (cmd->size_out < sizeof(pi))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &pi, sizeof(pi));
+
+ return 0;
+}
+
+void cxl_mockmem_sanitize_work(struct work_struct *work)
+{
+ struct cxl_memdev_state *mds =
+ container_of(work, typeof(*mds), security.poll_dwork.work);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
+ mutex_lock(&cxl_mbox->mbox_mutex);
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
+ mds->security.sanitize_active = false;
+ mutex_unlock(&cxl_mbox->mbox_mutex);
+
+ dev_dbg(mds->cxlds.dev, "sanitize complete\n");
+}
+
+static int mock_sanitize(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ int rc = 0;
+
+ if (cmd->size_in != 0)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ mutex_lock(&cxl_mbox->mbox_mutex);
+ if (schedule_delayed_work(&mds->security.poll_dwork,
+ msecs_to_jiffies(mdata->sanitize_timeout))) {
+ mds->security.sanitize_active = true;
+ dev_dbg(mds->cxlds.dev, "sanitize issued\n");
+ } else
+ rc = -EBUSY;
+ mutex_unlock(&cxl_mbox->mbox_mutex);
+
+ return rc;
+}
+
+static int mock_secure_erase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in != 0)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mock_get_security_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in)
+ return -EINVAL;
+
+ if (cmd->size_out != sizeof(u32))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
+
+ return 0;
+}
+
+static void master_plimit_check(struct cxl_mockmem_data *mdata)
+{
+ if (mdata->master_limit == PASS_TRY_LIMIT)
+ return;
+ mdata->master_limit++;
+ if (mdata->master_limit == PASS_TRY_LIMIT)
+ mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
+}
+
+static void user_plimit_check(struct cxl_mockmem_data *mdata)
+{
+ if (mdata->user_limit == PASS_TRY_LIMIT)
+ return;
+ mdata->user_limit++;
+ if (mdata->user_limit == PASS_TRY_LIMIT)
+ mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
+}
+
+static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_set_pass *set_pass;
+
+ if (cmd->size_in != sizeof(*set_pass))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ set_pass = cmd->payload_in;
+ switch (set_pass->type) {
+ case CXL_PMEM_SEC_PASS_MASTER:
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ /*
+ * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
+ * the security disabled state when the user passphrase is not set.
+ */
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
+ master_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+ memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
+ mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
+ return 0;
+
+ case CXL_PMEM_SEC_PASS_USER:
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
+ user_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+ memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
+ mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
+ return 0;
+
+ default:
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ }
+ return -EINVAL;
+}
+
+static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_disable_pass *dis_pass;
+
+ if (cmd->size_in != sizeof(*dis_pass))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ dis_pass = cmd->payload_in;
+ switch (dis_pass->type) {
+ case CXL_PMEM_SEC_PASS_MASTER:
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
+ master_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+
+ mdata->master_limit = 0;
+ memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
+ mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
+ return 0;
+
+ case CXL_PMEM_SEC_PASS_USER:
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
+ user_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+
+ mdata->user_limit = 0;
+ memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
+ mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
+ CXL_PMEM_SEC_STATE_LOCKED);
+ return 0;
+
+ default:
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mock_freeze_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in != 0)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
+ return 0;
+
+ mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
+ return 0;
+}
+
+static int mock_unlock_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
+ if (++mdata->user_limit == PASS_TRY_LIMIT)
+ mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+
+ mdata->user_limit = 0;
+ mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
+ return 0;
+}
+
+static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_pass_erase *erase;
+
+ if (cmd->size_in != sizeof(*erase))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ erase = cmd->payload_in;
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
+ erase->type == CXL_PMEM_SEC_PASS_USER) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
+ erase->type == CXL_PMEM_SEC_PASS_MASTER) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ switch (erase->type) {
+ case CXL_PMEM_SEC_PASS_MASTER:
+ /*
+ * The spec does not clearly define the behavior of the scenario
+ * where a master passphrase is passed in while the master
+ * passphrase is not set and user passphrase is not set. The
+ * code will take the assumption that it will behave the same
+ * as a CXL secure erase command without passphrase (0x4401).
+ */
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
+ if (memcmp(mdata->master_pass, erase->pass,
+ NVDIMM_PASSPHRASE_LEN)) {
+ master_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+ mdata->master_limit = 0;
+ mdata->user_limit = 0;
+ mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
+ memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
+ mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
+ } else {
+ /*
+ * CXL rev3 8.2.9.8.6.3 Disable Passphrase
+ * When master passphrase is disabled, the device shall
+ * return Invalid Input for the Passphrase Secure Erase
+ * command with master passphrase.
+ */
+ return -EINVAL;
+ }
+ /* Scramble encryption keys so that data is effectively erased */
+ break;
+ case CXL_PMEM_SEC_PASS_USER:
+ /*
+ * The spec does not clearly define the behavior of the scenario
+ * where a user passphrase is passed in while the user
+ * passphrase is not set. The code will take the assumption that
+ * it will behave the same as a CXL secure erase command without
+ * passphrase (0x4401).
+ */
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ if (memcmp(mdata->user_pass, erase->pass,
+ NVDIMM_PASSPHRASE_LEN)) {
+ user_plimit_check(mdata);
+ cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
+ return -ENXIO;
+ }
+ mdata->user_limit = 0;
+ mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
+ memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
+ }
+
+ /*
+ * CXL rev3 Table 8-118
+ * If user passphrase is not set or supported by device, current
+ * passphrase value is ignored. Will make the assumption that
+ * the operation will proceed as secure erase w/o passphrase
+ * since spec is not explicit.
+ */
+
+ /* Scramble encryption keys so that data is effectively erased */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mock_get_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
+ void *lsa = mdata->lsa;
+ u32 offset, length;
+
+ if (sizeof(*get_lsa) > cmd->size_in)
+ return -EINVAL;
+ offset = le32_to_cpu(get_lsa->offset);
+ length = le32_to_cpu(get_lsa->length);
+ if (offset + length > LSA_SIZE)
+ return -EINVAL;
+ if (length > cmd->size_out)
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, lsa + offset, length);
+ return 0;
+}
+
+static int mock_set_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
+ void *lsa = mdata->lsa;
+ u32 offset, length;
+
+ if (sizeof(*set_lsa) > cmd->size_in)
+ return -EINVAL;
+ offset = le32_to_cpu(set_lsa->offset);
+ length = cmd->size_in - sizeof(*set_lsa);
+ if (offset + length > LSA_SIZE)
+ return -EINVAL;
+
+ memcpy(lsa + offset, &set_lsa->data[0], length);
+ return 0;
+}
+
+static int mock_health_info(struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_health_info health_info = {
+ /* set flags for maint needed, perf degraded, hw replacement */
+ .health_status = 0x7,
+ /* set media status to "All Data Lost" */
+ .media_status = 0x3,
+ /*
+ * set ext_status flags for:
+ * ext_life_used: normal,
+ * ext_temperature: critical,
+ * ext_corrected_volatile: warning,
+ * ext_corrected_persistent: normal,
+ */
+ .ext_status = 0x18,
+ .life_used = 15,
+ .temperature = cpu_to_le16(25),
+ .dirty_shutdowns = cpu_to_le32(10),
+ .volatile_errors = cpu_to_le32(20),
+ .pmem_errors = cpu_to_le32(30),
+ };
+
+ if (cmd->size_out < sizeof(health_info))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &health_info, sizeof(health_info));
+ return 0;
+}
+
+static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
+
+ if (cmd->size_in != sizeof(*ss))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ mdata->shutdown_state = ss->state;
+ return 0;
+}
+
+static struct mock_poison {
+ struct cxl_dev_state *cxlds;
+ u64 dpa;
+} mock_poison_list[MOCK_INJECT_TEST_MAX];
+
+static struct cxl_mbox_poison_out *
+cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
+{
+ struct cxl_mbox_poison_out *po;
+ int nr_records = 0;
+ u64 dpa;
+
+ po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
+ if (!po)
+ return NULL;
+
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (mock_poison_list[i].cxlds != cxlds)
+ continue;
+ if (mock_poison_list[i].dpa < offset ||
+ mock_poison_list[i].dpa > offset + length - 1)
+ continue;
+
+ dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
+ po->record[nr_records].address = cpu_to_le64(dpa);
+ po->record[nr_records].length = cpu_to_le32(1);
+ nr_records++;
+ if (nr_records == poison_inject_dev_max)
+ break;
+ }
+
+ /* Always return count, even when zero */
+ po->count = cpu_to_le16(nr_records);
+
+ return po;
+}
+
+static int mock_get_poison(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_poison_in *pi = cmd->payload_in;
+ struct cxl_mbox_poison_out *po;
+ u64 offset = le64_to_cpu(pi->offset);
+ u64 length = le64_to_cpu(pi->length);
+ int nr_records;
+
+ po = cxl_get_injected_po(cxlds, offset, length);
+ if (!po)
+ return -ENOMEM;
+ nr_records = le16_to_cpu(po->count);
+ memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
+ cmd->size_out = struct_size(po, record, nr_records);
+ kfree(po);
+
+ return 0;
+}
+
+static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
+{
+ int count = 0;
+
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (mock_poison_list[i].cxlds == cxlds)
+ count++;
+ }
+ return (count >= poison_inject_dev_max);
+}
+
+static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
+{
+ /* Return EBUSY to match the CXL driver handling */
+ if (mock_poison_dev_max_injected(cxlds)) {
+ dev_dbg(cxlds->dev,
+ "Device poison injection limit has been reached: %d\n",
+ poison_inject_dev_max);
+ return -EBUSY;
+ }
+
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (!mock_poison_list[i].cxlds) {
+ mock_poison_list[i].cxlds = cxlds;
+ mock_poison_list[i].dpa = dpa;
+ return 0;
+ }
+ }
+ dev_dbg(cxlds->dev,
+ "Mock test poison injection limit has been reached: %d\n",
+ MOCK_INJECT_TEST_MAX);
+
+ return -ENXIO;
+}
+
+static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
+{
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (mock_poison_list[i].cxlds == cxlds &&
+ mock_poison_list[i].dpa == dpa)
+ return true;
+ }
+ return false;
+}
+
+static int mock_inject_poison(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_inject_poison *pi = cmd->payload_in;
+ u64 dpa = le64_to_cpu(pi->address);
+
+ if (mock_poison_found(cxlds, dpa)) {
+ /* Not an error to inject poison if already poisoned */
+ dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
+ return 0;
+ }
+
+ return mock_poison_add(cxlds, dpa);
+}
+
+static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
+{
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (mock_poison_list[i].cxlds == cxlds &&
+ mock_poison_list[i].dpa == dpa) {
+ mock_poison_list[i].cxlds = NULL;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int mock_clear_poison(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_clear_poison *pi = cmd->payload_in;
+ u64 dpa = le64_to_cpu(pi->address);
+
+ /*
+ * A real CXL device will write pi->write_data to the address
+ * being cleared. In this mock, just delete this address from
+ * the mock poison list.
+ */
+ if (!mock_poison_del(cxlds, dpa))
+ dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
+
+ return 0;
+}
+
+static bool mock_poison_list_empty(void)
+{
+ for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
+ if (mock_poison_list[i].cxlds)
+ return false;
+ }
+ return true;
+}
+
+static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
+}
+
+static ssize_t poison_inject_max_store(struct device_driver *drv,
+ const char *buf, size_t len)
+{
+ int val;
+
+ if (kstrtoint(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (!mock_poison_list_empty())
+ return -EBUSY;
+
+ if (val <= MOCK_INJECT_TEST_MAX)
+ poison_inject_dev_max = val;
+ else
+ return -EINVAL;
+
+ return len;
+}
+
+static DRIVER_ATTR_RW(poison_inject_max);
+
+static struct attribute *cxl_mock_mem_core_attrs[] = {
+ &driver_attr_poison_inject_max.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(cxl_mock_mem_core);
+
+static int mock_fw_info(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_fw_info fw_info = {
+ .num_slots = FW_SLOTS,
+ .slot_info = (mdata->fw_slot & 0x7) |
+ ((mdata->fw_staged & 0x7) << 3),
+ .activation_cap = 0,
+ };
+
+ strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
+ strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
+ strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
+ strcpy(fw_info.slot_4_revision, "");
+
+ if (cmd->size_out < sizeof(fw_info))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
+ return 0;
+}
+
+static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
+ void *fw = mdata->fw;
+ size_t offset, length;
+
+ offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
+ length = cmd->size_in - sizeof(*transfer);
+ if (offset + length > FW_SIZE)
+ return -EINVAL;
+
+ switch (transfer->action) {
+ case CXL_FW_TRANSFER_ACTION_FULL:
+ if (offset != 0)
+ return -EINVAL;
+ fallthrough;
+ case CXL_FW_TRANSFER_ACTION_END:
+ if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
+ return -EINVAL;
+ mdata->fw_size = offset + length;
+ break;
+ case CXL_FW_TRANSFER_ACTION_INITIATE:
+ case CXL_FW_TRANSFER_ACTION_CONTINUE:
+ break;
+ case CXL_FW_TRANSFER_ACTION_ABORT:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(fw + offset, transfer->data, length);
+ usleep_range(1500, 2000);
+ return 0;
+}
+
+static int mock_activate_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_activate_fw *activate = cmd->payload_in;
+
+ if (activate->slot == 0 || activate->slot > FW_SLOTS)
+ return -EINVAL;
+
+ switch (activate->action) {
+ case CXL_FW_ACTIVATE_ONLINE:
+ mdata->fw_slot = activate->slot;
+ mdata->fw_staged = 0;
+ return 0;
+ case CXL_FW_ACTIVATE_OFFLINE:
+ mdata->fw_staged = activate->slot;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#define CXL_VENDOR_FEATURE_TEST \
+ UUID_INIT(0xffffffff, 0xffff, 0xffff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff)
+
+static void fill_feature_vendor_test(struct cxl_feat_entry *feat)
+{
+ feat->uuid = CXL_VENDOR_FEATURE_TEST;
+ feat->id = 0;
+ feat->get_feat_size = cpu_to_le16(0x4);
+ feat->set_feat_size = cpu_to_le16(0x4);
+ feat->flags = cpu_to_le32(CXL_FEATURE_F_CHANGEABLE |
+ CXL_FEATURE_F_DEFAULT_SEL |
+ CXL_FEATURE_F_SAVED_SEL);
+ feat->get_feat_ver = 1;
+ feat->set_feat_ver = 1;
+ feat->effects = cpu_to_le16(CXL_CMD_CONFIG_CHANGE_COLD_RESET |
+ CXL_CMD_EFFECTS_VALID);
+}
+
+#define MAX_CXL_TEST_FEATS 1
+
+static int mock_get_test_feature(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct vendor_test_feat *output = cmd->payload_out;
+ struct cxl_mbox_get_feat_in *input = cmd->payload_in;
+ u16 offset = le16_to_cpu(input->offset);
+ u16 count = le16_to_cpu(input->count);
+ u8 *ptr;
+
+ if (offset > sizeof(*output)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ if (offset + count > sizeof(*output)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ ptr = (u8 *)&mdata->test_feat + offset;
+ memcpy((u8 *)output + offset, ptr, count);
+
+ return 0;
+}
+
+static int mock_get_feature(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_feat_in *input = cmd->payload_in;
+
+ if (uuid_equal(&input->uuid, &CXL_VENDOR_FEATURE_TEST))
+ return mock_get_test_feature(mdata, cmd);
+
+ cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
+
+ return -EOPNOTSUPP;
+}
+
+static int mock_set_test_feature(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_feat_in *input = cmd->payload_in;
+ struct vendor_test_feat *test =
+ (struct vendor_test_feat *)input->feat_data;
+ u32 action;
+
+ action = FIELD_GET(CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK,
+ le32_to_cpu(input->hdr.flags));
+ /*
+ * While it is spec compliant to support other set actions, it is not
+ * necessary to add the complication in the emulation currently. Reject
+ * anything besides full xfer.
+ */
+ if (action != CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER) {
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ /* Offset should be reserved when doing full transfer */
+ if (input->hdr.offset) {
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ memcpy(&mdata->test_feat.data, &test->data, sizeof(u32));
+
+ return 0;
+}
+
+static int mock_set_feature(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_feat_in *input = cmd->payload_in;
+
+ if (uuid_equal(&input->hdr.uuid, &CXL_VENDOR_FEATURE_TEST))
+ return mock_set_test_feature(mdata, cmd);
+
+ cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
+
+ return -EOPNOTSUPP;
+}
+
+static int mock_get_supported_features(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_sup_feats_in *in = cmd->payload_in;
+ struct cxl_mbox_get_sup_feats_out *out = cmd->payload_out;
+ struct cxl_feat_entry *feat;
+ u16 start_idx, count;
+
+ if (cmd->size_out < sizeof(*out)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
+ return -EINVAL;
+ }
+
+ /*
+ * Current emulation only supports 1 feature
+ */
+ start_idx = le16_to_cpu(in->start_idx);
+ if (start_idx != 0) {
+ cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
+ return -EINVAL;
+ }
+
+ count = le16_to_cpu(in->count);
+ if (count < struct_size(out, ents, 0)) {
+ cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
+ return -EINVAL;
+ }
+
+ out->supported_feats = cpu_to_le16(MAX_CXL_TEST_FEATS);
+ cmd->return_code = 0;
+ if (count < struct_size(out, ents, MAX_CXL_TEST_FEATS)) {
+ out->num_entries = 0;
+ return 0;
+ }
+
+ out->num_entries = cpu_to_le16(MAX_CXL_TEST_FEATS);
+ feat = out->ents;
+ fill_feature_vendor_test(feat);
+
+ return 0;
+}
+
+static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct device *dev = cxl_mbox->host;
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ int rc = -EIO;
+
+ switch (cmd->opcode) {
+ case CXL_MBOX_OP_SET_TIMESTAMP:
+ rc = mock_set_timestamp(cxlds, cmd);
+ break;
+ case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
+ rc = mock_gsl(cmd);
+ break;
+ case CXL_MBOX_OP_GET_LOG:
+ rc = mock_get_log(mds, cmd);
+ break;
+ case CXL_MBOX_OP_IDENTIFY:
+ if (cxlds->rcd)
+ rc = mock_rcd_id(cmd);
+ else
+ rc = mock_id(cmd);
+ break;
+ case CXL_MBOX_OP_GET_LSA:
+ rc = mock_get_lsa(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_PARTITION_INFO:
+ rc = mock_partition_info(cmd);
+ break;
+ case CXL_MBOX_OP_GET_EVENT_RECORD:
+ rc = mock_get_event(dev, cmd);
+ break;
+ case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
+ rc = mock_clear_event(dev, cmd);
+ break;
+ case CXL_MBOX_OP_SET_LSA:
+ rc = mock_set_lsa(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_HEALTH_INFO:
+ rc = mock_health_info(cmd);
+ break;
+ case CXL_MBOX_OP_SANITIZE:
+ rc = mock_sanitize(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SECURE_ERASE:
+ rc = mock_secure_erase(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_SECURITY_STATE:
+ rc = mock_get_security_state(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SET_PASSPHRASE:
+ rc = mock_set_passphrase(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_DISABLE_PASSPHRASE:
+ rc = mock_disable_passphrase(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_FREEZE_SECURITY:
+ rc = mock_freeze_security(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_UNLOCK:
+ rc = mock_unlock_security(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
+ rc = mock_passphrase_secure_erase(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
+ rc = mock_set_shutdown_state(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_POISON:
+ rc = mock_get_poison(cxlds, cmd);
+ break;
+ case CXL_MBOX_OP_INJECT_POISON:
+ rc = mock_inject_poison(cxlds, cmd);
+ break;
+ case CXL_MBOX_OP_CLEAR_POISON:
+ rc = mock_clear_poison(cxlds, cmd);
+ break;
+ case CXL_MBOX_OP_GET_FW_INFO:
+ rc = mock_fw_info(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_TRANSFER_FW:
+ rc = mock_transfer_fw(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_ACTIVATE_FW:
+ rc = mock_activate_fw(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
+ rc = mock_get_supported_features(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_GET_FEATURE:
+ rc = mock_get_feature(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SET_FEATURE:
+ rc = mock_set_feature(mdata, cmd);
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
+ cmd->size_in, cmd->size_out, rc);
+
+ return rc;
+}
+
+static void label_area_release(void *lsa)
+{
+ vfree(lsa);
+}
+
+static void fw_buf_release(void *buf)
+{
+ vfree(buf);
+}
+
+static bool is_rcd(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+
+ return !!id->driver_data;
+}
+
+static ssize_t event_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ cxl_mock_event_trigger(dev);
+ return count;
+}
+static DEVICE_ATTR_WO(event_trigger);
+
+static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void cxl_mock_test_feat_init(struct cxl_mockmem_data *mdata)
+{
+ mdata->test_feat.data = cpu_to_le32(0xdeadbeef);
+}
+
+static int cxl_mock_mem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cxl_memdev *cxlmd;
+ struct cxl_memdev_state *mds;
+ struct cxl_dev_state *cxlds;
+ struct cxl_mockmem_data *mdata;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_dpa_info range_info = { 0 };
+ int rc;
+
+ mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+ dev_set_drvdata(dev, mdata);
+
+ mdata->lsa = vmalloc(LSA_SIZE);
+ if (!mdata->lsa)
+ return -ENOMEM;
+ mdata->fw = vmalloc(FW_SIZE);
+ if (!mdata->fw)
+ return -ENOMEM;
+ mdata->fw_slot = 2;
+
+ rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
+ if (rc)
+ return rc;
+
+ mds = cxl_memdev_state_create(dev);
+ if (IS_ERR(mds))
+ return PTR_ERR(mds);
+
+ cxlds = &mds->cxlds;
+ rc = cxl_mock_mailbox_create(cxlds);
+ if (rc)
+ return rc;
+
+ cxl_mbox = &mds->cxlds.cxl_mbox;
+ mdata->mds = mds;
+ cxl_mbox->mbox_send = cxl_mock_mbox_send;
+ cxl_mbox->payload_size = SZ_4K;
+ mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
+ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
+
+ cxlds->serial = pdev->id + 1;
+ if (is_rcd(pdev))
+ cxlds->rcd = true;
+
+ rc = cxl_enumerate_cmds(mds);
+ if (rc)
+ return rc;
+
+ rc = cxl_poison_state_init(mds);
+ if (rc)
+ return rc;
+
+ rc = cxl_set_timestamp(mds);
+ if (rc)
+ return rc;
+
+ cxlds->media_ready = true;
+ rc = cxl_dev_state_identify(mds);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = devm_cxl_setup_features(cxlds);
+ if (rc)
+ dev_dbg(dev, "No CXL Features discovered\n");
+
+ cxl_mock_add_event_logs(&mdata->mes);
+
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ if (rc)
+ return rc;
+
+ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
+ if (rc)
+ return rc;
+
+ rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
+ if (rc)
+ dev_dbg(dev, "No CXL FWCTL setup\n");
+
+ cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
+ cxl_mock_test_feat_init(mdata);
+
+ return 0;
+}
+
+static ssize_t security_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n",
+ !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
+}
+
+static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
+ CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
+ int val;
+
+ if (kstrtoint(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val == 1) {
+ if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
+ return -ENXIO;
+ mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
+ mdata->security_state &= ~mask;
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR_RW(security_lock);
+
+static ssize_t fw_buf_checksum_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ u8 hash[SHA256_DIGEST_SIZE];
+
+ sha256(mdata->fw, mdata->fw_size, hash);
+
+ return sysfs_emit(buf, "%*phN\n", SHA256_DIGEST_SIZE, hash);
+}
+
+static DEVICE_ATTR_RO(fw_buf_checksum);
+
+static ssize_t sanitize_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
+}
+
+static ssize_t sanitize_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ mdata->sanitize_timeout = val;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sanitize_timeout);
+
+static struct attribute *cxl_mock_mem_attrs[] = {
+ &dev_attr_security_lock.attr,
+ &dev_attr_event_trigger.attr,
+ &dev_attr_fw_buf_checksum.attr,
+ &dev_attr_sanitize_timeout.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(cxl_mock_mem);
+
+static const struct platform_device_id cxl_mock_mem_ids[] = {
+ { .name = "cxl_mem", 0 },
+ { .name = "cxl_rcd", 1 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
+
+static struct platform_driver cxl_mock_mem_driver = {
+ .probe = cxl_mock_mem_probe,
+ .id_table = cxl_mock_mem_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .dev_groups = cxl_mock_mem_groups,
+ .groups = cxl_mock_mem_core_groups,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(cxl_mock_mem_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: mem device mock module");
+MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
new file mode 100644
index 000000000000..44bce80ef3ff
--- /dev/null
+++ b/tools/testing/cxl/test/mock.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//Copyright(c) 2021 Intel Corporation. All rights reserved.
+
+#include <linux/libnvdimm.h>
+#include <linux/rculist.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+#include <cxlpci.h>
+#include "mock.h"
+#include "../exports.h"
+
+static LIST_HEAD(mock);
+
+static struct cxl_dport *
+redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+static int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+
+void register_cxl_mock_ops(struct cxl_mock_ops *ops)
+{
+ list_add_rcu(&ops->list, &mock);
+ _devm_cxl_add_dport_by_dev = redirect_devm_cxl_add_dport_by_dev;
+ _devm_cxl_switch_port_decoders_setup =
+ redirect_devm_cxl_switch_port_decoders_setup;
+}
+EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
+
+DEFINE_STATIC_SRCU(cxl_mock_srcu);
+
+void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
+{
+ _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+ _devm_cxl_add_dport_by_dev = __devm_cxl_add_dport_by_dev;
+ list_del_rcu(&ops->list);
+ synchronize_srcu(&cxl_mock_srcu);
+}
+EXPORT_SYMBOL_GPL(unregister_cxl_mock_ops);
+
+struct cxl_mock_ops *get_cxl_mock_ops(int *index)
+{
+ *index = srcu_read_lock(&cxl_mock_srcu);
+ return list_first_or_null_rcu(&mock, struct cxl_mock_ops, list);
+}
+EXPORT_SYMBOL_GPL(get_cxl_mock_ops);
+
+void put_cxl_mock_ops(int index)
+{
+ srcu_read_unlock(&cxl_mock_srcu, index);
+}
+EXPORT_SYMBOL_GPL(put_cxl_mock_ops);
+
+bool __wrap_is_acpi_device_node(const struct fwnode_handle *fwnode)
+{
+ struct acpi_device *adev =
+ container_of(fwnode, struct acpi_device, fwnode);
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ bool retval = false;
+
+ if (ops)
+ retval = ops->is_mock_adev(adev);
+
+ if (!retval)
+ retval = is_acpi_device_node(fwnode);
+
+ put_cxl_mock_ops(index);
+ return retval;
+}
+EXPORT_SYMBOL(__wrap_is_acpi_device_node);
+
+int __wrap_acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg,
+ void *arg)
+{
+ int index, rc;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->acpi_table_parse_cedt(id, handler_arg, arg);
+ else
+ rc = acpi_table_parse_cedt(id, handler_arg, arg);
+
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_acpi_table_parse_cedt, "ACPI");
+
+acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments,
+ unsigned long long *data)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ acpi_status status;
+
+ if (ops)
+ status = ops->acpi_evaluate_integer(handle, pathname, arguments,
+ data);
+ else
+ status = acpi_evaluate_integer(handle, pathname, arguments,
+ data);
+ put_cxl_mock_ops(index);
+
+ return status;
+}
+EXPORT_SYMBOL(__wrap_acpi_evaluate_integer);
+
+int __wrap_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size)
+{
+ int index, rc;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+ else
+ rc = hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_hmat_get_extended_linear_cache_size);
+
+struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle)
+{
+ int index;
+ struct acpi_pci_root *root;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ root = ops->acpi_pci_find_root(handle);
+ else
+ root = acpi_pci_find_root(handle);
+
+ put_cxl_mock_ops(index);
+
+ return root;
+}
+EXPORT_SYMBOL_GPL(__wrap_acpi_pci_find_root);
+
+struct nvdimm_bus *
+__wrap_nvdimm_bus_register(struct device *dev,
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_dev(dev->parent->parent))
+ nd_desc->provider_name = "cxl_test";
+ put_cxl_mock_ops(index);
+
+ return nvdimm_bus_register(dev, nd_desc);
+}
+EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
+
+int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ int rc, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_port(port->uport_dev))
+ rc = ops->devm_cxl_switch_port_decoders_setup(port);
+ else
+ rc = __devm_cxl_switch_port_decoders_setup(port);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+
+int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ int rc, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_port(port->uport_dev))
+ rc = ops->devm_cxl_endpoint_decoders_setup(port);
+ else
+ rc = devm_cxl_endpoint_decoders_setup(port);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_endpoint_decoders_setup, "CXL");
+
+int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ int rc, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_dev(cxlds->dev))
+ rc = 0;
+ else
+ rc = cxl_await_media_ready(cxlds);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, "CXL");
+
+struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
+ struct device *dport_dev,
+ int port_id,
+ resource_size_t rcrb)
+{
+ int index;
+ struct cxl_dport *dport;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_port(dport_dev)) {
+ dport = devm_cxl_add_dport(port, dport_dev, port_id,
+ CXL_RESOURCE_NONE);
+ if (!IS_ERR(dport)) {
+ dport->rcrb.base = rcrb;
+ dport->rch = true;
+ }
+ } else
+ dport = devm_cxl_add_rch_dport(port, dport_dev, port_id, rcrb);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_rch_dport, "CXL");
+
+void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+
+ if (ops && ops->is_mock_dev(cxlmd->dev.parent))
+ ops->cxl_endpoint_parse_cdat(port);
+ else
+ cxl_endpoint_parse_cdat(port);
+ put_cxl_mock_ops(index);
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, "CXL");
+
+void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (!ops || !ops->is_mock_port(dport->dport_dev))
+ cxl_dport_init_ras_reporting(dport, host);
+
+ put_cxl_mock_ops(index);
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL");
+
+struct cxl_dport *redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ struct cxl_dport *dport;
+
+ if (ops && ops->is_mock_port(port->uport_dev))
+ dport = ops->devm_cxl_add_dport_by_dev(port, dport_dev);
+ else
+ dport = __devm_cxl_add_dport_by_dev(port, dport_dev);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: emulation module");
+MODULE_IMPORT_NS("ACPI");
+MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
new file mode 100644
index 000000000000..2684b89c8aa2
--- /dev/null
+++ b/tools/testing/cxl/test/mock.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/list.h>
+#include <linux/acpi.h>
+#include <cxl.h>
+
+struct cxl_mock_ops {
+ struct list_head list;
+ bool (*is_mock_adev)(struct acpi_device *dev);
+ int (*acpi_table_parse_cedt)(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg,
+ void *arg);
+ bool (*is_mock_bridge)(struct device *dev);
+ acpi_status (*acpi_evaluate_integer)(acpi_handle handle,
+ acpi_string pathname,
+ struct acpi_object_list *arguments,
+ unsigned long long *data);
+ struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
+ bool (*is_mock_bus)(struct pci_bus *bus);
+ bool (*is_mock_port)(struct device *dev);
+ bool (*is_mock_dev)(struct device *dev);
+ int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port);
+ int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port);
+ void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
+ struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port,
+ struct device *dport_dev);
+ int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size);
+};
+
+void register_cxl_mock_ops(struct cxl_mock_ops *ops);
+void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
+struct cxl_mock_ops *get_cxl_mock_ops(int *index);
+void put_cxl_mock_ops(int index);
diff --git a/tools/testing/cxl/watermark.h b/tools/testing/cxl/watermark.h
new file mode 100644
index 000000000000..9d81d4a5f6be
--- /dev/null
+++ b/tools/testing/cxl/watermark.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#ifndef _TEST_CXL_WATERMARK_H_
+#define _TEST_CXL_WATERMARK_H_
+#include <linux/module.h>
+#include <linux/printk.h>
+
+int cxl_acpi_test(void);
+int cxl_core_test(void);
+int cxl_mem_test(void);
+int cxl_pmem_test(void);
+int cxl_port_test(void);
+
+/*
+ * dummy routine for cxl_test to validate it is linking to the properly
+ * mocked module and not the standard one from the base tree.
+ */
+#define cxl_test_watermark(x) \
+int x##_test(void) \
+{ \
+ pr_debug("%s for cxl_test\n", KBUILD_MODNAME); \
+ return 0; \
+} \
+EXPORT_SYMBOL(x##_test)
+#endif /* _TEST_CXL_WATERMARK_H_ */
diff --git a/tools/testing/fault-injection/failcmd.sh b/tools/testing/fault-injection/failcmd.sh
index 78dac34264be..c4f2432750f4 100644..100755
--- a/tools/testing/fault-injection/failcmd.sh
+++ b/tools/testing/fault-injection/failcmd.sh
@@ -64,6 +64,14 @@ ENVIRONMENT
EOF
}
+exit_if_not_hex() {
+ local value="$1"
+ if ! [[ $value =~ ^0x[0-9a-fA-F]+$ ]]; then
+ echo "Error: The provided value '$value' is not a valid hexadecimal number." >&2
+ exit 1
+ fi
+}
+
if [ $UID != 0 ]; then
echo must be run as root >&2
exit 1
@@ -160,18 +168,22 @@ while true; do
shift 2
;;
--require-start)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/require-start
shift 2
;;
--require-end)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/require-end
shift 2
;;
--reject-start)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/reject-start
shift 2
;;
--reject-end)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/reject-end
shift 2
;;
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
index 6fd864935319..bee7cb34a289 100755
--- a/tools/testing/ktest/config-bisect.pl
+++ b/tools/testing/ktest/config-bisect.pl
@@ -741,9 +741,9 @@ if ($start) {
die "Can not find file $bad\n";
}
if ($val eq "good") {
- run_command "cp $output_config $good" or die "failed to copy $config to $good\n";
+ run_command "cp $output_config $good" or die "failed to copy $output_config to $good\n";
} elsif ($val eq "bad") {
- run_command "cp $output_config $bad" or die "failed to copy $config to $bad\n";
+ run_command "cp $output_config $bad" or die "failed to copy $output_config to $bad\n";
}
}
diff --git a/tools/testing/ktest/examples/bootconfigs/boottrace.bconf b/tools/testing/ktest/examples/bootconfigs/boottrace.bconf
new file mode 100644
index 000000000000..7aa706cccb3b
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/boottrace.bconf
@@ -0,0 +1,59 @@
+ftrace.event {
+ task.task_newtask {
+ filter = "pid < 128"
+ enable
+ }
+ kprobes.vfs_read {
+ probes = "vfs_read $arg1 $arg2"
+ filter = "common_pid < 200"
+ enable
+ }
+ synthetic.initcall_latency {
+ fields = "unsigned long func", "u64 lat"
+ hist {
+ keys = func.sym,lat
+ values = lat
+ sort = lat
+ }
+ }
+ initcall.initcall_start.hist {
+ keys = func;
+ var.ts0 = common_timestamp.usecs
+ }
+ initcall.initcall_finish.hist {
+ keys = func
+ var.lat = common_timestamp.usecs - $ts0
+ onmatch {
+ event = initcall.initcall_start
+ trace = initcall_latency, func, $lat
+ }
+ }
+}
+
+ftrace.instance {
+ foo {
+ tracer = "function"
+ ftrace.filters = "user_*"
+ cpumask = 1
+ options = nosym-addr
+ buffer_size = 512KB
+ trace_clock = mono
+ event.signal.signal_deliver.actions=snapshot
+ }
+ bar {
+ tracer = "function"
+ ftrace.filters = "kernel_*"
+ cpumask = 2
+ trace_clock = x86-tsc
+ }
+}
+
+ftrace.alloc_snapshot
+
+kernel {
+ trace_options = sym-addr
+ trace_event = "initcall:*"
+ trace_buf_size = 1M
+ ftrace = function
+ ftrace_filter = "vfs*"
+}
diff --git a/tools/testing/ktest/examples/bootconfigs/config-bootconfig b/tools/testing/ktest/examples/bootconfigs/config-bootconfig
new file mode 100644
index 000000000000..0685b6811388
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/config-bootconfig
@@ -0,0 +1 @@
+CONFIG_CMDLINE="bootconfig"
diff --git a/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf b/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf
new file mode 100644
index 000000000000..68debfcbda76
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/functiongraph.bconf
@@ -0,0 +1,15 @@
+ftrace {
+ tracing_on = 0 # off by default
+ tracer = function_graph
+ event.kprobes {
+ start_event {
+ probes = "pci_proc_init"
+ actions = "traceon"
+ }
+ end_event {
+ probes = "pci_proc_init%return"
+ actions = "traceoff"
+ }
+ }
+}
+
diff --git a/tools/testing/ktest/examples/bootconfigs/tracing.bconf b/tools/testing/ktest/examples/bootconfigs/tracing.bconf
new file mode 100644
index 000000000000..bf117c78115a
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/tracing.bconf
@@ -0,0 +1,33 @@
+ftrace {
+ tracer = function_graph;
+ options = event-fork, sym-addr, stacktrace;
+ buffer_size = 1M;
+ alloc_snapshot;
+ trace_clock = global;
+ events = "task:task_newtask", "initcall:*";
+ event.sched.sched_process_exec {
+ filter = "pid < 128";
+ }
+ instance.bar {
+ event.kprobes {
+ myevent {
+ probes = "vfs_read $arg2 $arg3";
+ }
+ myevent2 {
+ probes = "vfs_write $arg2 +0($arg2):ustring $arg3";
+ }
+ myevent3 {
+ probes = "initrd_load";
+ }
+ enable
+ }
+ }
+ instance.foo {
+ tracer = function;
+ tracing_on = false;
+ };
+}
+kernel {
+ ftrace_dump_on_oops = "orig_cpu"
+ traceoff_on_warning
+}
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh b/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh
new file mode 100755
index 000000000000..233e95cfcf20
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-boottrace.sh
@@ -0,0 +1,84 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_file "events/task/task_newtask/filter" "pid < 128"
+compare_file "events/task/task_newtask/enable" "1"
+
+compare_file "events/kprobes/vfs_read/filter" "common_pid < 200"
+compare_file "events/kprobes/vfs_read/enable" "1"
+
+compare_file_partial "events/synthetic/initcall_latency/trigger" "hist:keys=func.sym,lat:vals=hitcount,lat:sort=lat"
+compare_file_partial "events/synthetic/initcall_latency/enable" "0"
+
+compare_file_partial "events/initcall/initcall_start/trigger" "hist:keys=func:vals=hitcount:ts0=common_timestamp.usecs"
+compare_file_partial "events/initcall/initcall_start/enable" "1"
+
+compare_file_partial "events/initcall/initcall_finish/trigger" 'hist:keys=func:vals=hitcount:lat=common_timestamp.usecs-\$ts0:sort=hitcount:size=2048:clock=global:onmatch(initcall.initcall_start).trace(initcall_latency,func,\$lat)'
+compare_file_partial "events/initcall/initcall_finish/enable" "1"
+
+compare_file "instances/foo/current_tracer" "function"
+file_contains "instances/foo/set_ftrace_filter" "^user"
+compare_file "instances/foo/buffer_size_kb" "512"
+compare_mask "instances/foo/tracing_cpumask" "1"
+compare_file "instances/foo/options/sym-addr" "0"
+file_contains "instances/foo/trace_clock" '\[mono\]'
+compare_file_partial "instances/foo/events/signal/signal_deliver/trigger" "snapshot"
+
+compare_file "instances/bar/current_tracer" "function"
+file_contains "instances/bar/set_ftrace_filter" "^kernel"
+compare_mask "instances/bar/tracing_cpumask" "2"
+file_contains "instances/bar/trace_clock" '\[x86-tsc\]'
+
+file_contains "snapshot" "Snapshot is allocated"
+compare_file "options/sym-addr" "1"
+compare_file "events/initcall/enable" "1"
+compare_file "buffer_size_kb" "1027"
+compare_file "current_tracer" "function"
+file_contains "set_ftrace_filter" '^vfs'
+
+exit 0
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh b/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh
new file mode 100755
index 000000000000..b50baa10fe97
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+
+compare_file "tracing_on" "0"
+compare_file "current_tracer" "function_graph"
+
+compare_file_partial "events/kprobes/start_event/enable" "1"
+compare_file_partial "events/kprobes/start_event/trigger" "traceon"
+file_contains "kprobe_events" 'start_event.*pci_proc_init'
+
+compare_file_partial "events/kprobes/end_event/enable" "1"
+compare_file_partial "events/kprobes/end_event/trigger" "traceoff"
+file_contains "kprobe_events" '^r.*end_event.*pci_proc_init'
+
+exit 0
diff --git a/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh b/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh
new file mode 100755
index 000000000000..01e111e36e63
--- /dev/null
+++ b/tools/testing/ktest/examples/bootconfigs/verify-tracing.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+
+cd /sys/kernel/tracing
+
+compare_file() {
+ file="$1"
+ val="$2"
+ content=`cat $file`
+ if [ "$content" != "$val" ]; then
+ echo "FAILED: $file has '$content', expected '$val'"
+ exit 1
+ fi
+}
+
+compare_file_partial() {
+ file="$1"
+ val="$2"
+ content=`cat $file | sed -ne "/^$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not contain '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+file_contains() {
+ file=$1
+ val="$2"
+
+ if ! grep -q "$val" $file ; then
+ echo "FAILED: $file does not contain $val"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_mask() {
+ file=$1
+ val="$2"
+
+ content=`cat $file | sed -ne "/^[0 ]*$val/p"`
+ if [ -z "$content" ]; then
+ echo "FAILED: $file does not have mask '$val'"
+ cat $file
+ exit 1
+ fi
+}
+
+compare_file "current_tracer" "function_graph"
+compare_file "options/event-fork" "1"
+compare_file "options/sym-addr" "1"
+compare_file "options/stacktrace" "1"
+compare_file "buffer_size_kb" "1024"
+file_contains "snapshot" "Snapshot is allocated"
+file_contains "trace_clock" '\[global\]'
+
+compare_file "events/initcall/enable" "1"
+compare_file "events/task/task_newtask/enable" "1"
+compare_file "events/sched/sched_process_exec/filter" "pid < 128"
+compare_file "events/kprobes/enable" "1"
+
+compare_file "instances/bar/events/kprobes/myevent/enable" "1"
+compare_file "instances/bar/events/kprobes/myevent2/enable" "1"
+compare_file "instances/bar/events/kprobes/myevent3/enable" "1"
+
+compare_file "instances/foo/current_tracer" "function"
+compare_file "instances/foo/tracing_on" "0"
+
+compare_file "/proc/sys/kernel/ftrace_dump_on_oops" "2"
+compare_file "/proc/sys/kernel/traceoff_on_warning" "1"
+
+exit 0
diff --git a/tools/testing/ktest/examples/include/bootconfig.conf b/tools/testing/ktest/examples/include/bootconfig.conf
new file mode 100644
index 000000000000..3b885de085bd
--- /dev/null
+++ b/tools/testing/ktest/examples/include/bootconfig.conf
@@ -0,0 +1,69 @@
+# bootconfig.conf
+#
+# Tests to test some bootconfig scripts
+
+# List where on the target machine the initrd is used
+INITRD := /boot/initramfs-test.img
+
+# Install bootconfig on the target machine and define the path here.
+BOOTCONFIG := /usr/bin/bootconfig
+
+# Currenty we just build the .config in the BUILD_DIR
+BUILD_TYPE := oldconfig
+
+# Helper macro to run bootconfig on the target
+# SSH is defined in include/defaults.conf
+ADD_BOOTCONFIG := ${SSH} "${BOOTCONFIG} -d ${INITRD} && ${BOOTCONFIG} -a /tmp/${BOOTCONFIG_FILE} ${INITRD}"
+
+# This copies a bootconfig script to the target and then will
+# add it to the initrd. SSH_USER is defined in include/defaults.conf
+# and MACHINE is defined in the example configs.
+BOOTCONFIG_TEST_PREP = scp ${BOOTCONFIG_PATH}${BOOTCONFIG_FILE} ${SSH_USER}@${MACHINE}:/tmp && ${ADD_BOOTCONFIG}
+
+# When a test is complete, remove the bootconfig from the initrd.
+CLEAR_BOOTCONFIG := ${SSH} "${BOOTCONFIG} -d ${INITRD}"
+
+# Run a verifier on the target after it had booted, to make sure that the
+# bootconfig script did what it was expected to do
+DO_TEST = scp ${BOOTCONFIG_PATH}${BOOTCONFIG_VERIFY} ${SSH_USER}@${MACHINE}:/tmp && ${SSH} /tmp/${BOOTCONFIG_VERIFY}
+
+# Comment this out to not run the boot configs
+RUN_BOOTCONFIG := 1
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig boottrace
+# Just testing the bootconfig on initrd, no need to build the kernel
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = boottrace.bconf
+BOOTCONFIG_VERIFY = verify-boottrace.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig function graph
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = functiongraph.bconf
+BOOTCONFIG_VERIFY = verify-functiongraph.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
+TEST_START IF DEFINED RUN_BOOTCONFIG
+TEST_TYPE = test
+TEST_NAME = bootconfig tracing
+BUILD_TYPE = nobuild
+BOOTCONFIG_FILE = tracing.bconf
+BOOTCONFIG_VERIFY = verify-tracing.sh
+ADD_CONFIG = ${ADD_CONFIG} ${BOOTCONFIG_PATH}/config-bootconfig
+PRE_TEST = ${BOOTCONFIG_TEST_PREP}
+PRE_TEST_DIE = 1
+TEST = ${DO_TEST}
+POST_TEST = ${CLEAR_BOOTCONFIG}
+
diff --git a/tools/testing/ktest/examples/include/defaults.conf b/tools/testing/ktest/examples/include/defaults.conf
index 63a1a83f4f0b..f6d8517a471e 100644
--- a/tools/testing/ktest/examples/include/defaults.conf
+++ b/tools/testing/ktest/examples/include/defaults.conf
@@ -46,7 +46,7 @@ CLEAR_LOG = 1
SSH_USER = root
-# For accesing the machine, we will ssh to root@machine.
+# For accessing the machine, we will ssh to root@machine.
SSH := ssh ${SSH_USER}@${MACHINE}
# Update this. The default here is ktest will ssh to the target box
diff --git a/tools/testing/ktest/examples/kvm.conf b/tools/testing/ktest/examples/kvm.conf
index fbc134f9ac6e..c700e8bb7fde 100644
--- a/tools/testing/ktest/examples/kvm.conf
+++ b/tools/testing/ktest/examples/kvm.conf
@@ -90,3 +90,4 @@ INCLUDE include/patchcheck.conf
INCLUDE include/tests.conf
INCLUDE include/bisect.conf
INCLUDE include/min-config.conf
+INCLUDE include/bootconfig.conf \ No newline at end of file
diff --git a/tools/testing/ktest/examples/vmware.conf b/tools/testing/ktest/examples/vmware.conf
new file mode 100644
index 000000000000..61958163d242
--- /dev/null
+++ b/tools/testing/ktest/examples/vmware.conf
@@ -0,0 +1,137 @@
+#
+# This config is an example usage of ktest.pl with a vmware guest
+#
+# VMware Setup:
+# -------------
+# - Edit the Virtual Machine ("Edit virtual machine settings")
+# - Add a Serial Port
+# - You almost certainly want it set "Connect at power on"
+# - Select "Use socket (named pipe)"
+# - Select a name that you'll recognize, like 'ktestserialpipe'
+# - From: Server
+# - To: A Virtual Machine
+# - Save
+# - Make sure you note the name, it will be in the base directory of the
+# virtual machine (where the "disks" are stored. The default
+# is /var/lib/vmware/<virtual machine name>/<the name you entered above>
+#
+# - Make note of the path to the VM
+# </End VMware setup>
+#
+# The guest is called 'Guest' and this would be something that
+# could be run on the host to test a virtual machine target.
+
+MACHINE = Guest
+
+# Name of the serial pipe you set in the VMware settings
+VMWARE_SERIAL_NAME = <the name you entered above>
+
+# Define a variable of the name of the VM
+# Noting this needs to be the name of the kmx file, and usually, the
+# name of the directory that it's in. If the directory and name
+# differ change the VMWARE_VM_DIR accordingly.
+# Please ommit the .kmx extension
+VMWARE_VM_NAME = <virtual machine name>
+
+# VM dir name. This is usually the same as the virtual machine's name,
+# but not always the case. Change if they differ
+VMWARE_VM_DIR = ${VMWARE_VM_NAME}
+
+# Base directory that the Virtual machine is contained in
+# /var/lib/vmware is the default on Linux
+VMWARE_VM_BASE_DIR = /var/lib/vmware/${VMWARE_VM_DIR}
+
+# Use ncat to read the unix pipe. Anything that can read the Unix Pipe
+# and output it's contents to stdout will work
+CONSOLE = /usr/bin/ncat -U ${VMWARE_VM_BASE_DIR}/${VMWARE_SERIAL_NAME}
+
+# Define what version of Workstation you are using
+# This is used by vmrun to use the appropriate appripriate pieces to
+# test this. In all likelihood you want 'ws' or 'player'
+# Valid options:
+# ws - Workstation (Windows or Linux host)
+# fusion - Fusion (Mac host)
+# player - Using VMware Player (Windows or Linux host)
+# Note: vmrun has to run directly on the host machine
+VMWARE_HOST_TYPE = ws
+
+# VMware provides `vmrun` to allow you to do certain things to the virtual machine
+# This should hard reset the VM and force a boot
+VMWARE_POWER_CYCLE = /usr/bin/vmrun -T ${VMWARE_HOST_TYPE} reset ${VMWARE_VM_BASE_DIR}/${VMWARE_VM_NAME}.kmx nogui
+
+#*************************************#
+# This part is the same as test.conf #
+#*************************************#
+
+# The include files will set up the type of test to run. Just set TEST to
+# which test you want to run.
+#
+# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config
+#
+# See the include/*.conf files that define these tests
+#
+TEST := patchcheck
+
+# Some tests may have more than one test to run. Define MULTI := 1 to run
+# the extra tests.
+MULTI := 0
+
+# In case you want to differentiate which type of system you are testing
+BITS := 64
+
+# REBOOT = none, error, fail, empty
+# See include/defaults.conf
+REBOOT := empty
+
+
+# The defaults file will set up various settings that can be used by all
+# machine configs.
+INCLUDE include/defaults.conf
+
+
+#*************************************#
+# Now we are different from test.conf #
+#*************************************#
+
+
+# The example here assumes that Guest is running a Fedora release
+# that uses dracut for its initfs. The POST_INSTALL will be executed
+# after the install of the kernel and modules are complete.
+#
+POST_INSTALL = ${SSH} /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+
+# Guests sometimes get stuck on reboot. We wait 3 seconds after running
+# the reboot command and then do a full power-cycle of the guest.
+# This forces the guest to restart.
+#
+POWERCYCLE_AFTER_REBOOT = 3
+
+# We do the same after the halt command, but this time we wait 20 seconds.
+POWEROFF_AFTER_HALT = 20
+
+
+# As the defaults.conf file has a POWER_CYCLE option already defined,
+# and options can not be defined in the same section more than once
+# (all DEFAULTS sections are considered the same). We use the
+# DEFAULTS OVERRIDE to tell ktest.pl to ignore the previous defined
+# options, for the options set in the OVERRIDE section.
+#
+DEFAULTS OVERRIDE
+
+# Instead of using the default POWER_CYCLE option defined in
+# defaults.conf, we use virsh to cycle it. To do so, we destroy
+# the guest, wait 5 seconds, and then start it up again.
+# Crude, but effective.
+#
+POWER_CYCLE = ${VMWARE_POWER_CYCLE}
+
+
+DEFAULTS
+
+# The following files each handle a different test case.
+# Having them included allows you to set up more than one machine and share
+# the same tests.
+INCLUDE include/patchcheck.conf
+INCLUDE include/tests.conf
+INCLUDE include/bisect.conf
+INCLUDE include/min-config.conf
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 4e2450964517..001c4df9f7df 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -21,10 +21,12 @@ my %opt;
my %repeat_tests;
my %repeats;
my %evals;
+my @command_vars;
+my %command_tmp_vars;
#default opts
my %default = (
- "MAILER" => "sendmail", # default mailer
+ "MAILER" => "sendmail", # default mailer
"EMAIL_ON_ERROR" => 1,
"EMAIL_WHEN_FINISHED" => 1,
"EMAIL_WHEN_CANCELED" => 0,
@@ -36,15 +38,15 @@ my %default = (
"CLOSE_CONSOLE_SIGNAL" => "INT",
"TIMEOUT" => 120,
"TMP_DIR" => "/tmp/ktest/\${MACHINE}",
- "SLEEP_TIME" => 60, # sleep time between tests
+ "SLEEP_TIME" => 60, # sleep time between tests
"BUILD_NOCLEAN" => 0,
"REBOOT_ON_ERROR" => 0,
"POWEROFF_ON_ERROR" => 0,
"REBOOT_ON_SUCCESS" => 1,
"POWEROFF_ON_SUCCESS" => 0,
"BUILD_OPTIONS" => "",
- "BISECT_SLEEP_TIME" => 60, # sleep time between bisects
- "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks
+ "BISECT_SLEEP_TIME" => 60, # sleep time between bisects
+ "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks
"CLEAR_LOG" => 0,
"BISECT_MANUAL" => 0,
"BISECT_SKIP" => 1,
@@ -178,6 +180,7 @@ my $store_failures;
my $store_successes;
my $test_name;
my $timeout;
+my $run_timeout;
my $connect_timeout;
my $config_bisect_exec;
my $booted_timeout;
@@ -215,12 +218,15 @@ my $patchcheck_type;
my $patchcheck_start;
my $patchcheck_cherry;
my $patchcheck_end;
+my $patchcheck_skip;
my $build_time;
my $install_time;
my $reboot_time;
my $test_time;
+my $warning_found = 0;
+
my $pwd;
my $dirname = $FindBin::Bin;
@@ -340,6 +346,7 @@ my %option_map = (
"STORE_SUCCESSES" => \$store_successes,
"TEST_NAME" => \$test_name,
"TIMEOUT" => \$timeout,
+ "RUN_TIMEOUT" => \$run_timeout,
"CONNECT_TIMEOUT" => \$connect_timeout,
"CONFIG_BISECT_EXEC" => \$config_bisect_exec,
"BOOTED_TIMEOUT" => \$booted_timeout,
@@ -376,6 +383,7 @@ my %option_map = (
"PATCHCHECK_START" => \$patchcheck_start,
"PATCHCHECK_CHERRY" => \$patchcheck_cherry,
"PATCHCHECK_END" => \$patchcheck_end,
+ "PATCHCHECK_SKIP" => \$patchcheck_skip,
);
# Options may be used by other options, record them.
@@ -512,6 +520,69 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF"
EOF
;
+# used with process_expression()
+my $d = 0;
+
+# defined before get_test_name()
+my $in_die = 0;
+
+# defined before process_warning_line()
+my $check_build_re = ".*:.*(warning|error|Error):.*";
+my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})";
+
+# defined before child_finished()
+my $child_done;
+
+# config_ignore holds the configs that were set (or unset) for
+# a good config and we will ignore these configs for the rest
+# of a config bisect. These configs stay as they were.
+my %config_ignore;
+
+# config_set holds what all configs were set as.
+my %config_set;
+
+# config_off holds the set of configs that the bad config had disabled.
+# We need to record them and set them in the .config when running
+# olddefconfig, because olddefconfig keeps the defaults.
+my %config_off;
+
+# config_off_tmp holds a set of configs to turn off for now
+my @config_off_tmp;
+
+# config_list is the set of configs that are being tested
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+# found above run_config_bisect()
+my $pass = 1;
+
+# found above add_dep()
+
+my %depends;
+my %depcount;
+my $iflevel = 0;
+my @ifdeps;
+
+# prevent recursion
+my %read_kconfigs;
+
+# found above test_this_config()
+my %min_configs;
+my %keep_configs;
+my %save_configs;
+my %processed_configs;
+my %nochange_config;
+
+#
+# These are first defined here, main function later on
+#
+sub run_command;
+sub start_monitor;
+sub end_monitor;
+sub wait_for_monitor;
+
sub _logit {
if (defined($opt{"LOG_FILE"})) {
print LOG @_;
@@ -537,7 +608,7 @@ sub read_prompt {
my $ans;
for (;;) {
- if ($cancel) {
+ if ($cancel) {
print "$prompt [y/n/C] ";
} else {
print "$prompt [Y/n] ";
@@ -664,11 +735,18 @@ sub print_times {
show_time($test_time);
doprint "\n";
}
+ if ($warning_found) {
+ doprint "\n*** WARNING";
+ doprint "S" if ($warning_found > 1);
+ doprint " found in build: $warning_found ***\n\n";
+ }
+
# reset for iterations like bisect
$build_time = 0;
$install_time = 0;
$reboot_time = 0;
$test_time = 0;
+ $warning_found = 0;
}
sub get_mandatory_configs {
@@ -727,40 +805,51 @@ sub process_variables {
my $retval = "";
# We want to check for '\', and it is just easier
- # to check the previous characet of '$' and not need
+ # to check the previous character of '$' and not need
# to worry if '$' is the first character. By adding
# a space to $value, we can just check [^\\]\$ and
# it will still work.
$value = " $value";
- while ($value =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
+ while ($value =~ /(.*?[^\\])\$\{([^\{]*?)\}(.*)/) {
my $begin = $1;
my $var = $2;
my $end = $3;
# append beginning of value to retval
$retval = "$retval$begin";
- if (defined($variable{$var})) {
+ if ($var =~ s/^shell\s+//) {
+ $retval = `$var`;
+ if ($?) {
+ doprint "WARNING: $var returned an error\n";
+ } else {
+ chomp $retval;
+ }
+ } elsif (defined($variable{$var})) {
$retval = "$retval$variable{$var}";
} elsif (defined($remove_undef) && $remove_undef) {
# for if statements, any variable that is not defined,
# we simple convert to 0
$retval = "${retval}0";
} else {
- # put back the origin piece.
- $retval = "$retval\$\{$var\}";
+ # put back the origin piece, but with $#### to not reprocess it
+ $retval = "$retval\$####\{$var\}";
# This could be an option that is used later, save
# it so we don't warn if this option is not one of
# ktests options.
$used_options{$var} = 1;
}
- $value = $end;
+ $value = "$retval$end";
+ $retval = "";
}
- $retval = "$retval$value";
+ $retval = $value;
+
+ # Convert the saved variables with $####{var} back to ${var}
+ $retval =~ s/\$####/\$/g;
# remove the space added in the beginning
$retval =~ s/ //;
- return "$retval"
+ return "$retval";
}
sub set_value {
@@ -771,6 +860,7 @@ sub set_value {
if ($lvalue =~ /^(TEST|BISECT|CONFIG_BISECT)_TYPE(\[.*\])?$/ &&
$prvalue !~ /^(config_|)bisect$/ &&
$prvalue !~ /^build$/ &&
+ $prvalue !~ /^make_warnings_file$/ &&
$buildonly) {
# Note if a test is something other than build, then we
@@ -814,14 +904,22 @@ sub set_eval {
}
sub set_variable {
- my ($lvalue, $rvalue) = @_;
+ my ($lvalue, $rvalue, $command) = @_;
+ # Command line variables override all others
+ if (defined($command_tmp_vars{$lvalue})) {
+ return;
+ }
if ($rvalue =~ /^\s*$/) {
delete $variable{$lvalue};
} else {
$rvalue = process_variables($rvalue);
$variable{$lvalue} = $rvalue;
}
+
+ if (defined($command)) {
+ $command_tmp_vars{$lvalue} = 1;
+ }
}
sub process_compare {
@@ -863,7 +961,6 @@ sub value_defined {
defined($opt{$2});
}
-my $d = 0;
sub process_expression {
my ($name, $val) = @_;
@@ -978,7 +1075,6 @@ sub __read_config {
$override = 0;
if ($type eq "TEST_START") {
-
if ($num_tests_set) {
die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
}
@@ -1048,7 +1144,6 @@ sub __read_config {
$test_num = $old_test_num;
$repeat = $old_repeat;
}
-
} elsif (/^\s*ELSE\b(.*)$/) {
if (!$if) {
die "$name: $.: ELSE found with out matching IF section\n$_";
@@ -1095,7 +1190,7 @@ sub __read_config {
}
}
}
-
+
if ( ! -r $file ) {
die "$name: $.: Can't read file $file\n$_";
}
@@ -1162,7 +1257,7 @@ sub __read_config {
# Config variables are only active while reading the
# config and can be defined anywhere. They also ignore
# TEST_START and DEFAULTS, but are skipped if they are in
- # on of these sections that have SKIP defined.
+ # one of these sections that have SKIP defined.
# The save variable can be
# defined multiple times and the new one simply overrides
# the previous one.
@@ -1186,13 +1281,13 @@ sub __read_config {
}
sub get_test_case {
- print "What test case would you like to run?\n";
- print " (build, install or boot)\n";
- print " Other tests are available but require editing ktest.conf\n";
- print " (see tools/testing/ktest/sample.conf)\n";
- my $ans = <STDIN>;
- chomp $ans;
- $default{"TEST_TYPE"} = $ans;
+ print "What test case would you like to run?\n";
+ print " (build, install or boot)\n";
+ print " Other tests are available but require editing ktest.conf\n";
+ print " (see tools/testing/ktest/sample.conf)\n";
+ my $ans = <STDIN>;
+ chomp $ans;
+ $default{"TEST_TYPE"} = $ans;
}
sub read_config {
@@ -1203,6 +1298,19 @@ sub read_config {
$test_case = __read_config $config, \$test_num;
+ foreach my $val (@command_vars) {
+ chomp $val;
+ my %command_overrides;
+ if ($val =~ m/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+ my $lvalue = $1;
+ my $rvalue = $2;
+
+ set_value($lvalue, $rvalue, 1, \%command_overrides, "COMMAND LINE");
+ } else {
+ die "Invalid option definition '$val'\n";
+ }
+ }
+
# make sure we have all mandatory configs
get_mandatory_configs;
@@ -1288,7 +1396,10 @@ sub __eval_option {
# If a variable contains itself, use the default var
if (($var eq $name) && defined($opt{$var})) {
$o = $opt{$var};
- $retval = "$retval$o";
+ # Only append if the default doesn't contain itself
+ if ($o !~ m/\$\{$var\}/) {
+ $retval = "$retval$o";
+ }
} elsif (defined($opt{$o})) {
$o = $opt{$o};
$retval = "$retval$o";
@@ -1368,11 +1479,6 @@ sub eval_option {
return $option;
}
-sub run_command;
-sub start_monitor;
-sub end_monitor;
-sub wait_for_monitor;
-
sub reboot {
my ($time) = @_;
my $powercycle = 0;
@@ -1433,7 +1539,8 @@ sub reboot {
# Still need to wait for the reboot to finish
wait_for_monitor($time, $reboot_success_line);
-
+ }
+ if ($powercycle || $time) {
end_monitor;
}
}
@@ -1457,8 +1564,6 @@ sub do_not_reboot {
($test_type eq "config_bisect" && $opt{"CONFIG_BISECT_TYPE[$i]"} eq "build");
}
-my $in_die = 0;
-
sub get_test_name() {
my $name;
@@ -1471,20 +1576,22 @@ sub get_test_name() {
}
sub dodie {
-
# avoid recursion
return if ($in_die);
$in_die = 1;
+ if ($monitor_cnt) {
+ # restore terminal settings
+ system("stty $stty_orig");
+ }
+
my $i = $iteration;
doprint "CRITICAL FAILURE... [TEST $i] ", @_, "\n";
if ($reboot_on_error && !do_not_reboot) {
-
doprint "REBOOTING\n";
reboot_to_good;
-
} elsif ($poweroff_on_error && defined($power_off)) {
doprint "POWERING OFF\n";
`$power_off`;
@@ -1519,13 +1626,9 @@ sub dodie {
close O;
close L;
}
- send_email("KTEST: critical failure for test $i [$name]",
- "Your test started at $script_start_time has failed with:\n@_\n", $log_file);
- }
- if ($monitor_cnt) {
- # restore terminal settings
- system("stty $stty_orig");
+ send_email("KTEST: critical failure for test $i [$name]",
+ "Your test started at $script_start_time has failed with:\n@_\n", $log_file);
}
if (defined($post_test)) {
@@ -1709,81 +1812,81 @@ sub wait_for_monitor {
}
sub save_logs {
- my ($result, $basedir) = @_;
- my @t = localtime;
- my $date = sprintf "%04d%02d%02d%02d%02d%02d",
- 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+ my ($result, $basedir) = @_;
+ my @t = localtime;
+ my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+ 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
- my $type = $build_type;
- if ($type =~ /useconfig/) {
- $type = "useconfig";
- }
+ my $type = $build_type;
+ if ($type =~ /useconfig/) {
+ $type = "useconfig";
+ }
- my $dir = "$machine-$test_type-$type-$result-$date";
+ my $dir = "$machine-$test_type-$type-$result-$date";
- $dir = "$basedir/$dir";
+ $dir = "$basedir/$dir";
- if (!-d $dir) {
- mkpath($dir) or
- dodie "can't create $dir";
- }
+ if (!-d $dir) {
+ mkpath($dir) or
+ dodie "can't create $dir";
+ }
- my %files = (
- "config" => $output_config,
- "buildlog" => $buildlog,
- "dmesg" => $dmesg,
- "testlog" => $testlog,
- );
+ my %files = (
+ "config" => $output_config,
+ "buildlog" => $buildlog,
+ "dmesg" => $dmesg,
+ "testlog" => $testlog,
+ );
- while (my ($name, $source) = each(%files)) {
- if (-f "$source") {
- cp "$source", "$dir/$name" or
- dodie "failed to copy $source";
- }
+ while (my ($name, $source) = each(%files)) {
+ if (-f "$source") {
+ cp "$source", "$dir/$name" or
+ dodie "failed to copy $source";
}
+ }
- doprint "*** Saved info to $dir ***\n";
+ doprint "*** Saved info to $dir ***\n";
}
sub fail {
- if ($die_on_failure) {
- dodie @_;
- }
+ if ($die_on_failure) {
+ dodie @_;
+ }
- doprint "FAILED\n";
+ doprint "FAILED\n";
- my $i = $iteration;
+ my $i = $iteration;
- # no need to reboot for just building.
- if (!do_not_reboot) {
- doprint "REBOOTING\n";
- reboot_to_good $sleep_time;
- }
+ # no need to reboot for just building.
+ if (!do_not_reboot) {
+ doprint "REBOOTING\n";
+ reboot_to_good $sleep_time;
+ }
- my $name = "";
+ my $name = "";
- if (defined($test_name)) {
- $name = " ($test_name)";
- }
+ if (defined($test_name)) {
+ $name = " ($test_name)";
+ }
- print_times;
+ print_times;
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+ doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
- if (defined($store_failures)) {
- save_logs "fail", $store_failures;
- }
+ if (defined($store_failures)) {
+ save_logs "fail", $store_failures;
+ }
- if (defined($post_test)) {
- run_command $post_test;
- }
+ if (defined($post_test)) {
+ run_command $post_test;
+ }
- return 1;
+ return 1;
}
sub run_command {
@@ -1799,6 +1902,14 @@ sub run_command {
$command =~ s/\$SSH_USER/$ssh_user/g;
$command =~ s/\$MACHINE/$machine/g;
+ if (!defined($timeout)) {
+ $timeout = $run_timeout;
+ }
+
+ if (!defined($timeout)) {
+ $timeout = -1; # tell wait_for_input to wait indefinitely
+ }
+
doprint("$command ... ");
$start_time = time;
@@ -1825,13 +1936,10 @@ sub run_command {
while (1) {
my $fp = \*CMD;
- if (defined($timeout)) {
- doprint "timeout = $timeout\n";
- }
my $line = wait_for_input($fp, $timeout);
if (!defined($line)) {
my $now = time;
- if (defined($timeout) && (($now - $start_time) >= $timeout)) {
+ if ($timeout >= 0 && (($now - $start_time) >= $timeout)) {
doprint "Hit timeout of $timeout, killing process\n";
$hit_timeout = 1;
kill 9, $pid;
@@ -1912,11 +2020,11 @@ sub run_scp_mod {
sub _get_grub_index {
- my ($command, $target, $skip) = @_;
+ my ($command, $target, $skip, $submenu) = @_;
return if (defined($grub_number) && defined($last_grub_menu) &&
- $last_grub_menu eq $grub_menu && defined($last_machine) &&
- $last_machine eq $machine);
+ $last_grub_menu eq $grub_menu && defined($last_machine) &&
+ $last_machine eq $machine);
doprint "Find $reboot_type menu ... ";
$grub_number = -1;
@@ -1924,16 +2032,21 @@ sub _get_grub_index {
my $ssh_grub = $ssh_exec;
$ssh_grub =~ s,\$SSH_COMMAND,$command,g;
- open(IN, "$ssh_grub |")
- or dodie "unable to execute $command";
+ open(IN, "$ssh_grub |") or
+ dodie "unable to execute $command";
my $found = 0;
+ my $submenu_number = 0;
+
while (<IN>) {
if (/$target/) {
$grub_number++;
$found = 1;
last;
+ } elsif (defined($submenu) && /$submenu/) {
+ $submenu_number++;
+ $grub_number = -1;
} elsif (/$skip/) {
$grub_number++;
}
@@ -1942,6 +2055,9 @@ sub _get_grub_index {
dodie "Could not find '$grub_menu' through $command on $machine"
if (!$found);
+ if ($submenu_number > 0) {
+ $grub_number = "$submenu_number>$grub_number";
+ }
doprint "$grub_number\n";
$last_grub_menu = $grub_menu;
$last_machine = $machine;
@@ -1952,6 +2068,7 @@ sub get_grub_index {
my $command;
my $target;
my $skip;
+ my $submenu;
my $grub_menu_qt;
if ($reboot_type !~ /^grub/) {
@@ -1966,21 +2083,21 @@ sub get_grub_index {
$skip = '^\s*title\s';
} elsif ($reboot_type eq "grub2") {
$command = "cat $grub_file";
- $target = '^menuentry.*' . $grub_menu_qt;
- $skip = '^menuentry\s|^submenu\s';
+ $target = '^\s*menuentry.*' . $grub_menu_qt;
+ $skip = '^\s*menuentry\s';
+ $submenu = '^\s*submenu\s';
} elsif ($reboot_type eq "grub2bls") {
- $command = $grub_bls_get;
- $target = '^title=.*' . $grub_menu_qt;
- $skip = '^title=';
+ $command = $grub_bls_get;
+ $target = '^title=.*' . $grub_menu_qt;
+ $skip = '^title=';
} else {
return;
}
- _get_grub_index($command, $target, $skip);
+ _get_grub_index($command, $target, $skip, $submenu);
}
-sub wait_for_input
-{
+sub wait_for_input {
my ($fp, $time) = @_;
my $start_time;
my $rin;
@@ -1994,6 +2111,11 @@ sub wait_for_input
$time = $timeout;
}
+ if ($time < 0) {
+ # Negative number means wait indefinitely
+ undef $time;
+ }
+
$rin = '';
vec($rin, fileno($fp), 1) = 1;
vec($rin, fileno(\*STDIN), 1) = 1;
@@ -2040,7 +2162,7 @@ sub reboot_to {
if ($reboot_type eq "grub") {
run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
} elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
- run_ssh "$grub_reboot $grub_number";
+ run_ssh "$grub_reboot \"'$grub_number'\"";
} elsif ($reboot_type eq "syslinux") {
run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
} elsif (defined $reboot_script) {
@@ -2096,7 +2218,6 @@ sub monitor {
my $version_found = 0;
while (!$done) {
-
if ($bug && defined($stop_after_failure) &&
$stop_after_failure >= 0) {
my $time = $stop_after_failure - (time - $failure_start);
@@ -2326,6 +2447,11 @@ sub get_version {
return if ($have_version);
doprint "$make kernelrelease ... ";
$version = `$make -s kernelrelease | tail -1`;
+ if (!length($version)) {
+ run_command "$make allnoconfig" or return 0;
+ doprint "$make kernelrelease ... ";
+ $version = `$make -s kernelrelease | tail -1`;
+ }
chomp($version);
doprint "$version\n";
$have_version = 1;
@@ -2349,9 +2475,6 @@ sub start_monitor_and_install {
return monitor;
}
-my $check_build_re = ".*:.*(warning|error|Error):.*";
-my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})";
-
sub process_warning_line {
my ($line) = @_;
@@ -2379,8 +2502,6 @@ sub process_warning_line {
# Returns 1 if OK
# 0 otherwise
sub check_buildlog {
- return 1 if (!defined $warnings_file);
-
my %warnings_list;
# Failed builds should not reboot the target
@@ -2394,25 +2515,28 @@ sub check_buildlog {
while (<IN>) {
if (/$check_build_re/) {
my $warning = process_warning_line $_;
-
+
$warnings_list{$warning} = 1;
}
}
close(IN);
}
- # If warnings file didn't exist, and WARNINGS_FILE exist,
- # then we fail on any warning!
-
open(IN, $buildlog) or dodie "Can't open $buildlog";
while (<IN>) {
if (/$check_build_re/) {
my $warning = process_warning_line $_;
if (!defined $warnings_list{$warning}) {
- fail "New warning found (not in $warnings_file)\n$_\n";
- $no_reboot = $save_no_reboot;
- return 0;
+ $warning_found++;
+
+ # If warnings file didn't exist, and WARNINGS_FILE exist,
+ # then we fail on any warning!
+ if (defined $warnings_file) {
+ fail "New warning found (not in $warnings_file)\n$_\n";
+ $no_reboot = $save_no_reboot;
+ return 0;
+ }
}
}
}
@@ -2571,7 +2695,6 @@ sub build {
run_command "mv $outputdir/config_temp $output_config" or
dodie "moving config_temp";
}
-
} elsif (!$noclean) {
unlink "$output_config";
run_command "$make mrproper" or
@@ -2594,6 +2717,9 @@ sub build {
# Run old config regardless, to enforce min configurations
make_oldconfig;
+ if (not defined($build_options)){
+ $build_options = "";
+ }
my $build_ret = run_command "$make $build_options", $buildlog;
if (defined($post_build)) {
@@ -2649,14 +2775,15 @@ sub success {
print_times;
- doprint "\n\n*******************************************\n";
- doprint "*******************************************\n";
- doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n";
- doprint "*******************************************\n";
- doprint "*******************************************\n";
+ doprint "\n\n";
+ doprint "*******************************************\n";
+ doprint "*******************************************\n";
+ doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n";
+ doprint "*******************************************\n";
+ doprint "*******************************************\n";
if (defined($store_successes)) {
- save_logs "success", $store_successes;
+ save_logs "success", $store_successes;
}
if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
@@ -2698,8 +2825,6 @@ sub child_run_test {
exit $run_command_status;
}
-my $child_done;
-
sub child_finished {
$child_done = 1;
}
@@ -2868,8 +2993,6 @@ sub run_bisect_test {
my $failed = 0;
my $result;
- my $output;
- my $ret;
$in_bisect = 1;
@@ -3031,7 +3154,6 @@ sub bisect {
}
if ($do_check) {
-
# get current HEAD
my $head = get_sha1("HEAD");
@@ -3071,13 +3193,11 @@ sub bisect {
run_command "git bisect replay $replay" or
dodie "failed to run replay";
} else {
-
run_command "git bisect good $good" or
dodie "could not set bisect good to $good";
run_git_bisect "git bisect bad $bad" or
dodie "could not set bisect bad to $bad";
-
}
if (defined($start)) {
@@ -3103,35 +3223,13 @@ sub bisect {
success $i;
}
-# config_ignore holds the configs that were set (or unset) for
-# a good config and we will ignore these configs for the rest
-# of a config bisect. These configs stay as they were.
-my %config_ignore;
-
-# config_set holds what all configs were set as.
-my %config_set;
-
-# config_off holds the set of configs that the bad config had disabled.
-# We need to record them and set them in the .config when running
-# olddefconfig, because olddefconfig keeps the defaults.
-my %config_off;
-
-# config_off_tmp holds a set of configs to turn off for now
-my @config_off_tmp;
-
-# config_list is the set of configs that are being tested
-my %config_list;
-my %null_config;
-
-my %dependency;
-
sub assign_configs {
my ($hash, $config) = @_;
doprint "Reading configs from $config\n";
- open (IN, $config)
- or dodie "Failed to read $config";
+ open (IN, $config) or
+ dodie "Failed to read $config";
while (<IN>) {
chomp;
@@ -3219,8 +3317,6 @@ sub config_bisect_end {
doprint "***************************************\n\n";
}
-my $pass = 1;
-
sub run_config_bisect {
my ($good, $bad, $last_result) = @_;
my $reset = "";
@@ -3243,13 +3339,13 @@ sub run_config_bisect {
$ret = run_config_bisect_test $config_bisect_type;
if ($ret) {
- doprint "NEW GOOD CONFIG ($pass)\n";
+ doprint "NEW GOOD CONFIG ($pass)\n";
system("cp $output_config $tmpdir/good_config.tmp.$pass");
$pass++;
# Return 3 for good config
return 3;
} else {
- doprint "NEW BAD CONFIG ($pass)\n";
+ doprint "NEW BAD CONFIG ($pass)\n";
system("cp $output_config $tmpdir/bad_config.tmp.$pass");
$pass++;
# Return 4 for bad config
@@ -3284,10 +3380,11 @@ sub config_bisect {
if (!defined($config_bisect_exec)) {
# First check the location that ktest.pl ran
- my @locations = ( "$pwd/config-bisect.pl",
- "$dirname/config-bisect.pl",
- "$builddir/tools/testing/ktest/config-bisect.pl",
- undef );
+ my @locations = (
+ "$pwd/config-bisect.pl",
+ "$dirname/config-bisect.pl",
+ "$builddir/tools/testing/ktest/config-bisect.pl",
+ undef );
foreach my $loc (@locations) {
doprint "loc = $loc\n";
$config_bisect_exec = $loc;
@@ -3368,7 +3465,7 @@ sub config_bisect {
} while ($ret == 3 || $ret == 4);
if ($ret == 2) {
- config_bisect_end "$good_config.tmp", "$bad_config.tmp";
+ config_bisect_end "$good_config.tmp", "$bad_config.tmp";
}
return $ret if ($ret < 0);
@@ -3442,11 +3539,37 @@ sub patchcheck {
@list = reverse @list;
}
+ my %skip_list;
+ my $will_skip = 0;
+
+ if (defined($patchcheck_skip)) {
+ foreach my $s (split /\s+/, $patchcheck_skip) {
+ $s = `git log --pretty=oneline $s~1..$s`;
+ $s =~ s/^(\S+).*/$1/;
+ chomp $s;
+ $skip_list{$s} = 1;
+ $will_skip++;
+ }
+ }
+
doprint("Going to test the following commits:\n");
foreach my $l (@list) {
+ my $sha1 = $l;
+ $sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ next if (defined($skip_list{$sha1}));
doprint "$l\n";
}
+ if ($will_skip) {
+ doprint("\nSkipping the following commits:\n");
+ foreach my $l (@list) {
+ my $sha1 = $l;
+ $sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ next if (!defined($skip_list{$sha1}));
+ doprint "$l\n";
+ }
+ }
+
my $save_clean = $noclean;
my %ignored_warnings;
@@ -3461,6 +3584,11 @@ sub patchcheck {
my $sha1 = $item;
$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ if (defined($skip_list{$sha1})) {
+ doprint "\nSkipping \"$item\"\n\n";
+ next;
+ }
+
doprint "\nProcessing commit \"$item\"\n\n";
run_command "git checkout $sha1" or
@@ -3511,14 +3639,6 @@ sub patchcheck {
return 1;
}
-my %depends;
-my %depcount;
-my $iflevel = 0;
-my @ifdeps;
-
-# prevent recursion
-my %read_kconfigs;
-
sub add_dep {
# $config depends on $dep
my ($config, $dep) = @_;
@@ -3548,7 +3668,6 @@ sub read_kconfig {
my $cont = 0;
my $line;
-
if (! -f $kconfig) {
doprint "file $kconfig does not exist, skipping\n";
return;
@@ -3630,8 +3749,8 @@ sub read_kconfig {
sub read_depends {
# find out which arch this is by the kconfig file
- open (IN, $output_config)
- or dodie "Failed to read $output_config";
+ open (IN, $output_config) or
+ dodie "Failed to read $output_config";
my $arch;
while (<IN>) {
if (m,Linux/(\S+)\s+\S+\s+Kernel Configuration,) {
@@ -3657,7 +3776,7 @@ sub read_depends {
if (! -f $kconfig && $arch =~ /\d$/) {
my $orig = $arch;
- # some subarchs have numbers, truncate them
+ # some subarchs have numbers, truncate them
$arch =~ s/\d*$//;
$kconfig = "$builddir/arch/$arch/Kconfig";
if (! -f $kconfig) {
@@ -3706,7 +3825,6 @@ sub get_depends {
my @configs;
while ($dep =~ /[$valid]/) {
-
if ($dep =~ /^[^$valid]*([$valid]+)/) {
my $conf = "CONFIG_" . $1;
@@ -3721,12 +3839,6 @@ sub get_depends {
return @configs;
}
-my %min_configs;
-my %keep_configs;
-my %save_configs;
-my %processed_configs;
-my %nochange_config;
-
sub test_this_config {
my ($config) = @_;
@@ -3763,9 +3875,10 @@ sub test_this_config {
# .config to make sure it is missing the config that
# we had before
my %configs = %min_configs;
- delete $configs{$config};
+ $configs{$config} = "# $config is not set";
make_new_config ((values %configs), (values %keep_configs));
make_oldconfig;
+ delete $configs{$config};
undef %configs;
assign_configs \%configs, $output_config;
@@ -3852,7 +3965,7 @@ sub make_min_config {
foreach my $config (@config_keys) {
my $kconfig = chomp_config $config;
if (!defined $depcount{$kconfig}) {
- $depcount{$kconfig} = 0;
+ $depcount{$kconfig} = 0;
}
}
@@ -3887,7 +4000,6 @@ sub make_min_config {
my $take_two = 0;
while (!$done) {
-
my $config;
my $found;
@@ -3898,7 +4010,7 @@ sub make_min_config {
# Sort keys by who is most dependent on
@test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
- @test_configs ;
+ @test_configs ;
# Put configs that did not modify the config at the end.
my $reset = 1;
@@ -3954,13 +4066,13 @@ sub make_min_config {
my $failed = 0;
build "oldconfig" or $failed = 1;
if (!$failed) {
- start_monitor_and_install or $failed = 1;
+ start_monitor_and_install or $failed = 1;
- if ($type eq "test" && !$failed) {
- do_run_test or $failed = 1;
- }
+ if ($type eq "test" && !$failed) {
+ do_run_test or $failed = 1;
+ }
- end_monitor;
+ end_monitor;
}
$in_bisect = 0;
@@ -3974,8 +4086,8 @@ sub make_min_config {
# update new ignore configs
if (defined($ignore_config)) {
- open (OUT, ">$temp_config")
- or dodie "Can't write to $temp_config";
+ open (OUT, ">$temp_config") or
+ dodie "Can't write to $temp_config";
foreach my $config (keys %save_configs) {
print OUT "$save_configs{$config}\n";
}
@@ -4002,8 +4114,8 @@ sub make_min_config {
}
# Save off all the current mandatory configs
- open (OUT, ">$temp_config")
- or dodie "Can't write to $temp_config";
+ open (OUT, ">$temp_config") or
+ dodie "Can't write to $temp_config";
foreach my $config (keys %keep_configs) {
print OUT "$keep_configs{$config}\n";
}
@@ -4041,7 +4153,6 @@ sub make_warnings_file {
open(IN, $buildlog) or dodie "Can't open $buildlog";
while (<IN>) {
-
# Some compilers use UTF-8 extended for quotes
# for distcc heterogeneous systems, this causes issues
s/$utf8_quote/'/g;
@@ -4057,98 +4168,6 @@ sub make_warnings_file {
success $i;
}
-$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n";
-
-if ($#ARGV == 0) {
- $ktest_config = $ARGV[0];
- if (! -f $ktest_config) {
- print "$ktest_config does not exist.\n";
- if (!read_yn "Create it?") {
- exit 0;
- }
- }
-}
-
-if (! -f $ktest_config) {
- $newconfig = 1;
- get_test_case;
- open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
- print OUT << "EOF"
-# Generated by ktest.pl
-#
-
-# PWD is a ktest.pl variable that will result in the process working
-# directory that ktest.pl is executed in.
-
-# THIS_DIR is automatically assigned the PWD of the path that generated
-# the config file. It is best to use this variable when assigning other
-# directory paths within this directory. This allows you to easily
-# move the test cases to other locations or to other machines.
-#
-THIS_DIR := $variable{"PWD"}
-
-# Define each test with TEST_START
-# The config options below it will override the defaults
-TEST_START
-TEST_TYPE = $default{"TEST_TYPE"}
-
-DEFAULTS
-EOF
-;
- close(OUT);
-}
-read_config $ktest_config;
-
-if (defined($opt{"LOG_FILE"})) {
- $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
-}
-
-# Append any configs entered in manually to the config file.
-my @new_configs = keys %entered_configs;
-if ($#new_configs >= 0) {
- print "\nAppending entered in configs to $ktest_config\n";
- open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
- foreach my $config (@new_configs) {
- print OUT "$config = $entered_configs{$config}\n";
- $opt{$config} = process_variables($entered_configs{$config});
- }
-}
-
-if (defined($opt{"LOG_FILE"})) {
- if ($opt{"CLEAR_LOG"}) {
- unlink $opt{"LOG_FILE"};
- }
- open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
- LOG->autoflush(1);
-}
-
-doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
-
-for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
-
- if (!$i) {
- doprint "DEFAULT OPTIONS:\n";
- } else {
- doprint "\nTEST $i OPTIONS";
- if (defined($repeat_tests{$i})) {
- $repeat = $repeat_tests{$i};
- doprint " ITERATE $repeat";
- }
- doprint "\n";
- }
-
- foreach my $option (sort keys %opt) {
-
- if ($option =~ /\[(\d+)\]$/) {
- next if ($i != $1);
- } else {
- next if ($i);
- }
-
- doprint "$option = $opt{$option}\n";
- }
-}
-
sub option_defined {
my ($option) = @_;
@@ -4261,7 +4280,6 @@ sub do_send_mail {
}
sub send_email {
-
if (defined($mailto)) {
if (!defined($mailer)) {
doprint "No email sent: email or mailer not specified in config.\n";
@@ -4272,14 +4290,163 @@ sub send_email {
}
sub cancel_test {
+ if ($monitor_cnt) {
+ end_monitor;
+ }
if ($email_when_canceled) {
my $name = get_test_name;
- send_email("KTEST: Your [$name] test was cancelled",
- "Your test started at $script_start_time was cancelled: sig int");
+ send_email("KTEST: Your [$name] test was cancelled",
+ "Your test started at $script_start_time was cancelled: sig int");
}
die "\nCaught Sig Int, test interrupted: $!\n"
}
+sub die_usage {
+ die << "EOF"
+ktest.pl version: $VERSION
+ usage: ktest.pl [options] [config-file]
+ [options]:
+ -D value: Where value can act as an option override.
+ -D BUILD_NOCLEAN=1
+ Sets global BUILD_NOCLEAN to 1
+ -D TEST_TYPE[2]=build
+ Sets TEST_TYPE of test 2 to "build"
+
+ It can also override all temp variables.
+ -D USE_TEMP_DIR:=1
+ Will override all variables that use
+ "USE_TEMP_DIR="
+
+EOF
+;
+}
+
+while ( $#ARGV >= 0 ) {
+ if ( $ARGV[0] eq "-D" ) {
+ shift;
+ die_usage if ($#ARGV < 1);
+ my $val = shift;
+
+ if ($val =~ m/(.*?):=(.*)$/) {
+ set_variable($1, $2, 1);
+ } else {
+ $command_vars[$#command_vars + 1] = $val;
+ }
+
+ } elsif ( $ARGV[0] =~ m/^-D(.*)/) {
+ my $val = $1;
+ shift;
+
+ if ($val =~ m/(.*?):=(.*)$/) {
+ set_variable($1, $2, 1);
+ } else {
+ $command_vars[$#command_vars + 1] = $val;
+ }
+ } elsif ( $ARGV[0] eq "-h" ) {
+ die_usage;
+ } else {
+ last;
+ }
+}
+
+$#ARGV < 1 or die_usage;
+if ($#ARGV == 0) {
+ $ktest_config = $ARGV[0];
+ if (! -f $ktest_config) {
+ print "$ktest_config does not exist.\n";
+ if (!read_yn "Create it?") {
+ exit 0;
+ }
+ }
+}
+
+if (! -f $ktest_config) {
+ $newconfig = 1;
+ get_test_case;
+ open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
+ print OUT << "EOF"
+# Generated by ktest.pl
+#
+
+# PWD is a ktest.pl variable that will result in the process working
+# directory that ktest.pl is executed in.
+
+# THIS_DIR is automatically assigned the PWD of the path that generated
+# the config file. It is best to use this variable when assigning other
+# directory paths within this directory. This allows you to easily
+# move the test cases to other locations or to other machines.
+#
+THIS_DIR := $variable{"PWD"}
+
+# Define each test with TEST_START
+# The config options below it will override the defaults
+TEST_START
+TEST_TYPE = $default{"TEST_TYPE"}
+
+DEFAULTS
+EOF
+;
+ close(OUT);
+}
+read_config $ktest_config;
+
+if (defined($opt{"LOG_FILE"})) {
+ $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
+}
+
+# Append any configs entered in manually to the config file.
+my @new_configs = keys %entered_configs;
+if ($#new_configs >= 0) {
+ print "\nAppending entered in configs to $ktest_config\n";
+ open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
+ foreach my $config (@new_configs) {
+ print OUT "$config = $entered_configs{$config}\n";
+ $opt{$config} = process_variables($entered_configs{$config});
+ }
+}
+
+if (defined($opt{"LOG_FILE"})) {
+ if ($opt{"CLEAR_LOG"}) {
+ unlink $opt{"LOG_FILE"};
+ }
+
+ if (! -e $opt{"LOG_FILE"} && $opt{"LOG_FILE"} =~ m,^(.*/),) {
+ my $dir = $1;
+ if (! -d $dir) {
+ mkpath($dir) or die "Failed to create directories '$dir': $!";
+ print "\nThe log directory $dir did not exist, so it was created.\n";
+ }
+ }
+ open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+ LOG->autoflush(1);
+}
+
+doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+ if (!$i) {
+ doprint "DEFAULT OPTIONS:\n";
+ } else {
+ doprint "\nTEST $i OPTIONS";
+ if (defined($repeat_tests{$i})) {
+ $repeat = $repeat_tests{$i};
+ doprint " ITERATE $repeat";
+ }
+ doprint "\n";
+ }
+
+ foreach my $option (sort keys %opt) {
+ if ($option =~ /\[(\d+)\]$/) {
+ next if ($i != $1);
+ } else {
+ next if ($i);
+ }
+
+ doprint "$option = $opt{$option}\n";
+ }
+}
+
$SIG{INT} = qw(cancel_test);
# First we need to do is the builds
@@ -4323,15 +4490,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
# The first test may override the PRE_KTEST option
if ($i == 1) {
- if (defined($pre_ktest)) {
- doprint "\n";
- run_command $pre_ktest;
- }
- if ($email_when_started) {
+ if (defined($pre_ktest)) {
+ doprint "\n";
+ run_command $pre_ktest;
+ }
+ if ($email_when_started) {
my $name = get_test_name;
- send_email("KTEST: Your [$name] test was started",
- "Your test was started on $script_start_time");
- }
+ send_email("KTEST: Your [$name] test was started",
+ "Your test was started on $script_start_time");
+ }
}
# Any test can override the POST_KTEST option
@@ -4405,11 +4572,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
doprint "RUNNING TEST $i of $opt{NUM_TESTS}$name with option $test_type $run_type$installme\n\n";
+ # Always show which build directory and output directory is being used
+ doprint "BUILD_DIR=$builddir\n";
+ doprint "OUTPUT_DIR=$outputdir\n\n";
+
if (defined($pre_test)) {
my $ret = run_command $pre_test;
if (!$ret && defined($pre_test_die) &&
$pre_test_die) {
- dodie "failed to pre_test\n";
+ dodie "failed to pre_test\n";
}
}
@@ -4503,12 +4674,11 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) {
run_command $switch_to_good;
}
-
doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n";
if ($email_when_finished) {
send_email("KTEST: Your test has finished!",
- "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
+ "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
}
if (defined($opt{"LOG_FILE"})) {
@@ -4517,3 +4687,12 @@ if (defined($opt{"LOG_FILE"})) {
}
exit 0;
+
+##
+# The following are here to standardize tabs/spaces/etc across the most likely editors
+###
+
+# Local Variables:
+# mode: perl
+# End:
+# vim: softtabstop=4
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 5e7d1d729752..9c4c449a8f3e 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -259,6 +259,14 @@
# If PATH is not a config variable, then the ${PATH} in
# the MAKE_CMD option will be evaluated by the shell when
# the MAKE_CMD option is passed into shell processing.
+#
+# Shell commands can also be inserted with the ${shell <command>}
+# expression. Note, this is case sensitive, thus ${SHELL <command>}
+# will not work.
+#
+# HOSTNAME := ${shell hostname}
+# DEFAULTS IF "${HOSTNAME}" == "frodo"
+#
#### Using options in other options ####
#
@@ -809,6 +817,11 @@
# is issued instead of a reboot.
# CONNECT_TIMEOUT = 25
+# The timeout in seconds for how long to wait for any running command
+# to timeout. If not defined, it will let it go indefinitely.
+# (default undefined)
+#RUN_TIMEOUT = 600
+
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
@@ -1004,6 +1017,8 @@
# Note, PATCHCHECK_CHERRY requires PATCHCHECK_END to be defined.
# (default 0)
#
+# PATCHCHECK_SKIP is an optional list of shas to skip testing
+#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index 9235b7d42d38..422e186cf3cf 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -1,3 +1,57 @@
+# This config enables as many tests as possible under UML.
+# It is intended for use in continuous integration systems and similar for
+# automated testing of as much as possible.
+# The config is manually maintained, though it uses KUNIT_ALL_TESTS=y to enable
+# any tests whose dependencies are already satisfied. Please feel free to add
+# more options if they any new tests.
+
CONFIG_KUNIT=y
-CONFIG_KUNIT_TEST=y
CONFIG_KUNIT_EXAMPLE_TEST=y
+CONFIG_KUNIT_ALL_TESTS=y
+
+CONFIG_FORTIFY_SOURCE=y
+
+CONFIG_IIO=y
+
+CONFIG_EXT4_FS=y
+
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+
+CONFIG_PCI=y
+CONFIG_USB4=y
+CONFIG_I2C=y
+
+CONFIG_NET=y
+CONFIG_MCTP=y
+CONFIG_MCTP_FLOWS=y
+
+CONFIG_INET=y
+CONFIG_MPTCP=y
+
+CONFIG_NETDEVICES=y
+CONFIG_WLAN=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_WLAN_VENDOR_INTEL=y
+CONFIG_IWLWIFI=y
+
+CONFIG_DAMON=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_PADDR=y
+
+CONFIG_REGMAP_BUILD=y
+
+CONFIG_AUDIT=y
+
+CONFIG_PRIME_NUMBERS=y
+
+CONFIG_SECURITY=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_SECURITY_LANDLOCK=y
+
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TOPOLOGY_BUILD=y
+CONFIG_SND_SOC_CS35L56_I2C=y
diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config
new file mode 100644
index 000000000000..28edf816aa70
--- /dev/null
+++ b/tools/testing/kunit/configs/arch_uml.config
@@ -0,0 +1,7 @@
+# Config options which are added to UML builds by default
+
+# Enable pci, as a lot of tests require it.
+CONFIG_KUNIT_UML_PCI=y
+
+# Enable FORTIFY_SOURCE for wider checking.
+CONFIG_FORTIFY_SOURCE=y
diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config
deleted file mode 100644
index a7f0603d33f6..000000000000
--- a/tools/testing/kunit/configs/broken_on_uml.config
+++ /dev/null
@@ -1,42 +0,0 @@
-# These are currently broken on UML and prevent allyesconfig from building
-# CONFIG_STATIC_LINK is not set
-# CONFIG_UML_NET_VECTOR is not set
-# CONFIG_UML_NET_VDE is not set
-# CONFIG_UML_NET_PCAP is not set
-# CONFIG_NET_PTP_CLASSIFY is not set
-# CONFIG_IP_VS is not set
-# CONFIG_BRIDGE_EBT_BROUTE is not set
-# CONFIG_BRIDGE_EBT_T_FILTER is not set
-# CONFIG_BRIDGE_EBT_T_NAT is not set
-# CONFIG_MTD_NAND_CADENCE is not set
-# CONFIG_MTD_NAND_NANDSIM is not set
-# CONFIG_BLK_DEV_NULL_BLK is not set
-# CONFIG_BLK_DEV_RAM is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_NET_VENDOR_XILINX is not set
-# CONFIG_NULL_TTY is not set
-# CONFIG_PTP_1588_CLOCK is not set
-# CONFIG_PINCTRL_EQUILIBRIUM is not set
-# CONFIG_DMABUF_SELFTESTS is not set
-# CONFIG_COMEDI is not set
-# CONFIG_XIL_AXIS_FIFO is not set
-# CONFIG_EXFAT_FS is not set
-# CONFIG_STM_DUMMY is not set
-# CONFIG_FSI_MASTER_ASPEED is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_UBIFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_CRYPTO_DEV_SAFEXCEL is not set
-# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set
-# CONFIG_KCOV is not set
-# CONFIG_LKDTM is not set
-# CONFIG_REED_SOLOMON_TEST is not set
-# CONFIG_TEST_RHASHTABLE is not set
-# CONFIG_TEST_MEMINIT is not set
-# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
-# CONFIG_DEBUG_INFO_BTF is not set
-# CONFIG_PTP_1588_CLOCK_INES is not set
-# CONFIG_QCOM_CPR is not set
-# CONFIG_RESET_BRCMSTB_RESCAL is not set
-# CONFIG_RESET_INTEL_GW is not set
-# CONFIG_ADI_AXI_ADC is not set
diff --git a/tools/testing/kunit/configs/coverage_uml.config b/tools/testing/kunit/configs/coverage_uml.config
new file mode 100644
index 000000000000..bacb77664fa8
--- /dev/null
+++ b/tools/testing/kunit/configs/coverage_uml.config
@@ -0,0 +1,11 @@
+# This config fragment enables coverage on UML, which is different from the
+# normal gcov used in other arches (no debugfs).
+# Example usage:
+# ./tools/testing/kunit/kunit.py run \
+# --kunitconfig=tools/testing/kunit/configs/all_tests_uml.config \
+# --kunitconfig=tools/testing/kunit/configs/coverage_uml.config
+
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_GCOV=y
diff --git a/tools/testing/kunit/configs/default.config b/tools/testing/kunit/configs/default.config
new file mode 100644
index 000000000000..e67af7b9f1bb
--- /dev/null
+++ b/tools/testing/kunit/configs/default.config
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
+CONFIG_KUNIT_ALL_TESTS=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 21516e293d17..cd99c1956331 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -8,34 +8,22 @@
# Author: Brendan Higgins <brendanhiggins@google.com>
import argparse
-import sys
import os
+import re
+import shlex
+import sys
import time
-from collections import namedtuple
+assert sys.version_info >= (3, 7), "Python version is too old"
+
+from dataclasses import dataclass
from enum import Enum, auto
+from typing import Iterable, List, Optional, Sequence, Tuple
-import kunit_config
import kunit_json
import kunit_kernel
import kunit_parser
-
-KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time'])
-
-KunitConfigRequest = namedtuple('KunitConfigRequest',
- ['build_dir', 'make_options'])
-KunitBuildRequest = namedtuple('KunitBuildRequest',
- ['jobs', 'build_dir', 'alltests',
- 'make_options'])
-KunitExecRequest = namedtuple('KunitExecRequest',
- ['timeout', 'build_dir', 'alltests'])
-KunitParseRequest = namedtuple('KunitParseRequest',
- ['raw_output', 'input_data', 'build_dir', 'json'])
-KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
- 'build_dir', 'alltests', 'json',
- 'make_options'])
-
-KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
+from kunit_printer import stdout, null_printer
class KunitStatus(Enum):
SUCCESS = auto()
@@ -43,172 +31,542 @@ class KunitStatus(Enum):
BUILD_FAILURE = auto()
TEST_FAILURE = auto()
-def get_kernel_root_path():
- parts = sys.argv[0] if not __file__ else __file__
- parts = os.path.realpath(parts).split('tools/testing/kunit')
+@dataclass
+class KunitResult:
+ status: KunitStatus
+ elapsed_time: float
+
+@dataclass
+class KunitConfigRequest:
+ build_dir: str
+ make_options: Optional[List[str]]
+
+@dataclass
+class KunitBuildRequest(KunitConfigRequest):
+ jobs: int
+
+@dataclass
+class KunitParseRequest:
+ raw_output: Optional[str]
+ json: Optional[str]
+ summary: bool
+ failed: bool
+
+@dataclass
+class KunitExecRequest(KunitParseRequest):
+ build_dir: str
+ timeout: int
+ filter_glob: str
+ filter: str
+ filter_action: Optional[str]
+ kernel_args: Optional[List[str]]
+ run_isolated: Optional[str]
+ list_tests: bool
+ list_tests_attr: bool
+
+@dataclass
+class KunitRequest(KunitExecRequest, KunitBuildRequest):
+ pass
+
+
+def get_kernel_root_path() -> str:
+ path = sys.argv[0] if not __file__ else __file__
+ parts = os.path.realpath(path).split('tools/testing/kunit')
if len(parts) != 2:
sys.exit(1)
return parts[0]
def config_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitConfigRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...')
+ stdout.print_with_timestamp('Configuring KUnit Kernel ...')
config_start = time.time()
success = linux.build_reconfig(request.build_dir, request.make_options)
config_end = time.time()
- if not success:
- return KunitResult(KunitStatus.CONFIG_FAILURE,
- 'could not configure kernel',
- config_end - config_start)
- return KunitResult(KunitStatus.SUCCESS,
- 'configured kernel successfully',
- config_end - config_start)
+ status = KunitStatus.SUCCESS if success else KunitStatus.CONFIG_FAILURE
+ return KunitResult(status, config_end - config_start)
def build_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitBuildRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
+ stdout.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
- success = linux.build_um_kernel(request.alltests,
- request.jobs,
- request.build_dir,
- request.make_options)
+ success = linux.build_kernel(request.jobs,
+ request.build_dir,
+ request.make_options)
build_end = time.time()
- if not success:
- return KunitResult(KunitStatus.BUILD_FAILURE,
- 'could not build kernel',
- build_end - build_start)
- if not success:
- return KunitResult(KunitStatus.BUILD_FAILURE,
- 'could not build kernel',
- build_end - build_start)
- return KunitResult(KunitStatus.SUCCESS,
- 'built kernel successfully',
- build_end - build_start)
-
-def exec_tests(linux: kunit_kernel.LinuxSourceTree,
- request: KunitExecRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Starting KUnit Kernel ...')
- test_start = time.time()
- result = linux.run_kernel(
- timeout=None if request.alltests else request.timeout,
- build_dir=request.build_dir)
-
- test_end = time.time()
-
- return KunitResult(KunitStatus.SUCCESS,
- result,
- test_end - test_start)
-
-def parse_tests(request: KunitParseRequest) -> KunitResult:
- parse_start = time.time()
+ status = KunitStatus.SUCCESS if success else KunitStatus.BUILD_FAILURE
+ return KunitResult(status, build_end - build_start)
- test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
- [],
- 'Tests not Parsed.')
+def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitBuildRequest) -> KunitResult:
+ config_result = config_tests(linux, request)
+ if config_result.status != KunitStatus.SUCCESS:
+ return config_result
+
+ return build_tests(linux, request)
+
+def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
+ args = ['kunit.action=list']
+
+ if request.kernel_args:
+ args.extend(request.kernel_args)
+
+ output = linux.run_kernel(args=args,
+ timeout=request.timeout,
+ filter_glob=request.filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+ lines = kunit_parser.extract_tap_lines(output)
+ # Hack! Drop the dummy TAP version header that the executor prints out.
+ lines.pop()
+
+ # Filter out any extraneous non-test output that might have gotten mixed in.
+ return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
+
+def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]:
+ args = ['kunit.action=list_attr']
+
+ if request.kernel_args:
+ args.extend(request.kernel_args)
+
+ output = linux.run_kernel(args=args,
+ timeout=request.timeout,
+ filter_glob=request.filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+ lines = kunit_parser.extract_tap_lines(output)
+ # Hack! Drop the dummy TAP version header that the executor prints out.
+ lines.pop()
+
+ # Filter out any extraneous non-test output that might have gotten mixed in.
+ return lines
+
+def _suites_from_test_list(tests: List[str]) -> List[str]:
+ """Extracts all the suites from an ordered list of tests."""
+ suites = [] # type: List[str]
+ for t in tests:
+ parts = t.split('.', maxsplit=2)
+ if len(parts) != 2:
+ raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"')
+ suite, _ = parts
+ if not suites or suites[-1] != suite:
+ suites.append(suite)
+ return suites
+
+def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
+ filter_globs = [request.filter_glob]
+ if request.list_tests:
+ output = _list_tests(linux, request)
+ for line in output:
+ print(line.rstrip())
+ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
+ if request.list_tests_attr:
+ attr_output = _list_tests_attr(linux, request)
+ for line in attr_output:
+ print(line.rstrip())
+ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
+ if request.run_isolated:
+ tests = _list_tests(linux, request)
+ if request.run_isolated == 'test':
+ filter_globs = tests
+ elif request.run_isolated == 'suite':
+ filter_globs = _suites_from_test_list(tests)
+ # Apply the test-part of the user's glob, if present.
+ if '.' in request.filter_glob:
+ test_glob = request.filter_glob.split('.', maxsplit=2)[1]
+ filter_globs = [g + '.'+ test_glob for g in filter_globs]
+
+ metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig')
+
+ test_counts = kunit_parser.TestCounts()
+ exec_time = 0.0
+ for i, filter_glob in enumerate(filter_globs):
+ stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
+
+ test_start = time.time()
+ run_result = linux.run_kernel(
+ args=request.kernel_args,
+ timeout=request.timeout,
+ filter_glob=filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+
+ _, test_result = parse_tests(request, metadata, run_result)
+ # run_kernel() doesn't block on the kernel exiting.
+ # That only happens after we get the last line of output from `run_result`.
+ # So exec_time here actually contains parsing + execution time, which is fine.
+ test_end = time.time()
+ exec_time += test_end - test_start
+
+ test_counts.add_subtest_counts(test_result.counts)
+
+ if len(filter_globs) == 1 and test_counts.crashed > 0:
+ bd = request.build_dir
+ print('The kernel seems to have crashed; you can decode the stack traces with:')
+ print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format(
+ bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0]))
+
+ kunit_status = _map_to_overall_status(test_counts.get_status())
+ return KunitResult(status=kunit_status, elapsed_time=exec_time)
+
+def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
+ if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
+ return KunitStatus.SUCCESS
+ return KunitStatus.TEST_FAILURE
+
+def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
+ parse_start = time.time()
if request.raw_output:
- kunit_parser.raw_output(request.input_data)
- else:
- test_result = kunit_parser.parse_run_tests(request.input_data)
- parse_end = time.time()
+ # Treat unparsed results as one passing test.
+ fake_test = kunit_parser.Test()
+ fake_test.status = kunit_parser.TestStatus.SUCCESS
+ fake_test.counts.passed = 1
+
+ output: Iterable[str] = input_data
+ if request.raw_output == 'all' or request.raw_output == 'full':
+ pass
+ elif request.raw_output == 'kunit':
+ output = kunit_parser.extract_tap_lines(output)
+ for line in output:
+ print(line.rstrip())
+ parse_time = time.time() - parse_start
+ return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
+
+ default_printer = stdout
+ if request.summary or request.failed:
+ default_printer = null_printer
+
+ # Actually parse the test results.
+ test = kunit_parser.parse_run_tests(input_data, default_printer)
+ parse_time = time.time() - parse_start
+
+ if request.failed:
+ kunit_parser.print_test(test, request.failed, stdout)
+ kunit_parser.print_summary_line(test, stdout)
if request.json:
- json_obj = kunit_json.get_json_result(
- test_result=test_result,
- def_config='kunit_defconfig',
- build_dir=request.build_dir,
- json_path=request.json)
+ json_str = kunit_json.get_json_result(
+ test=test,
+ metadata=metadata)
if request.json == 'stdout':
- print(json_obj)
-
- if test_result.status != kunit_parser.TestStatus.SUCCESS:
- return KunitResult(KunitStatus.TEST_FAILURE, test_result,
- parse_end - parse_start)
+ print(json_str)
+ else:
+ with open(request.json, 'w') as f:
+ f.write(json_str)
+ stdout.print_with_timestamp("Test results stored in %s" %
+ os.path.abspath(request.json))
- return KunitResult(KunitStatus.SUCCESS, test_result,
- parse_end - parse_start)
+ if test.status != kunit_parser.TestStatus.SUCCESS:
+ return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test
+ return KunitResult(KunitStatus.SUCCESS, parse_time), test
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
run_start = time.time()
- config_request = KunitConfigRequest(request.build_dir,
- request.make_options)
- config_result = config_tests(linux, config_request)
+ config_result = config_tests(linux, request)
if config_result.status != KunitStatus.SUCCESS:
return config_result
- build_request = KunitBuildRequest(request.jobs, request.build_dir,
- request.alltests,
- request.make_options)
- build_result = build_tests(linux, build_request)
+ build_result = build_tests(linux, request)
if build_result.status != KunitStatus.SUCCESS:
return build_result
- exec_request = KunitExecRequest(request.timeout, request.build_dir,
- request.alltests)
- exec_result = exec_tests(linux, exec_request)
- if exec_result.status != KunitStatus.SUCCESS:
- return exec_result
-
- parse_request = KunitParseRequest(request.raw_output,
- exec_result.result,
- request.build_dir,
- request.json)
- parse_result = parse_tests(parse_request)
+ exec_result = exec_tests(linux, request)
run_end = time.time()
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
'building, %.3fs running\n') % (
run_end - run_start,
config_result.elapsed_time,
build_result.elapsed_time,
exec_result.elapsed_time))
- return parse_result
-
-def add_common_opts(parser):
+ return exec_result
+
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+ '--json': 'stdout',
+ '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def massage_arg(arg: str) -> str:
+ if arg not in pseudo_bool_flag_defaults:
+ return arg
+ return f'{arg}={pseudo_bool_flag_defaults[arg]}'
+ return list(map(massage_arg, argv))
+
+def get_default_jobs() -> int:
+ if sys.version_info >= (3, 13):
+ if (ncpu := os.process_cpu_count()) is not None:
+ return ncpu
+ raise RuntimeError("os.process_cpu_count() returned None")
+ # See https://github.com/python/cpython/blob/b61fece/Lib/os.py#L1175-L1186.
+ if sys.platform != "darwin":
+ return len(os.sched_getaffinity(0))
+ if (ncpu := os.cpu_count()) is not None:
+ return ncpu
+ raise RuntimeError("os.cpu_count() returned None")
+
+def add_common_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
- type=str, default='.kunit', metavar='build_dir')
+ type=str, default='.kunit', metavar='DIR')
parser.add_argument('--make_options',
help='X=Y make option, can be repeated.',
- action='append')
+ action='append', metavar='X=Y')
parser.add_argument('--alltests',
- help='Run all KUnit tests through allyesconfig',
+ help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config',
action='store_true')
-
-def add_build_opts(parser):
+ parser.add_argument('--kunitconfig',
+ help='Path to Kconfig fragment that enables KUnit tests.'
+ ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
+ 'will get automatically appended. If repeated, the files '
+ 'blindly concatenated, which might not work in all cases.',
+ action='append', metavar='PATHS')
+ parser.add_argument('--kconfig_add',
+ help='Additional Kconfig options to append to the '
+ '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.',
+ action='append', metavar='CONFIG_X=Y')
+
+ parser.add_argument('--arch',
+ help=('Specifies the architecture to run tests under. '
+ 'The architecture specified here must match the '
+ 'string passed to the ARCH make param, '
+ 'e.g. i386, x86_64, arm, um, etc. Non-UML '
+ 'architectures run on QEMU.'),
+ type=str, default='um', metavar='ARCH')
+
+ parser.add_argument('--cross_compile',
+ help=('Sets make\'s CROSS_COMPILE variable; it should '
+ 'be set to a toolchain path prefix (the prefix '
+ 'of gcc and other tools in your toolchain, for '
+ 'example `sparc64-linux-gnu-` if you have the '
+ 'sparc toolchain installed on your system, or '
+ '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` '
+ 'if you have downloaded the microblaze toolchain '
+ 'from the 0-day website to a directory in your '
+ 'home directory called `toolchains`).'),
+ metavar='PREFIX')
+
+ parser.add_argument('--qemu_config',
+ help=('Takes a path to a path to a file containing '
+ 'a QemuArchParams object.'),
+ type=str, metavar='FILE')
+
+ parser.add_argument('--qemu_args',
+ help='Additional QEMU arguments, e.g. "-smp 8"',
+ action='append', metavar='')
+
+def add_build_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
- type=int, default=8, metavar='jobs')
+ type=int, default=get_default_jobs(), metavar='N')
-def add_exec_opts(parser):
+def add_exec_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--timeout',
help='maximum number of seconds to allow for all tests '
'to run. This does not include time taken to build the '
'tests.',
type=int,
default=300,
- metavar='timeout')
-
-def add_parse_opts(parser):
- parser.add_argument('--raw_output', help='don\'t format output from kernel',
+ metavar='SECONDS')
+ parser.add_argument('filter_glob',
+ help='Filter which KUnit test suites/tests run at '
+ 'boot-time, e.g. list* or list*.*del_test',
+ type=str,
+ nargs='?',
+ default='',
+ metavar='filter_glob')
+ parser.add_argument('--filter',
+ help='Filter KUnit tests with attributes, '
+ 'e.g. module=example or speed>slow',
+ type=str,
+ default='')
+ parser.add_argument('--filter_action',
+ help='If set to skip, filtered tests will be skipped, '
+ 'e.g. --filter_action=skip. Otherwise they will not run.',
+ type=str,
+ choices=['skip'])
+ parser.add_argument('--kernel_args',
+ help='Kernel command-line parameters. Maybe be repeated',
+ action='append', metavar='')
+ parser.add_argument('--run_isolated', help='If set, boot the kernel for each '
+ 'individual suite/test. This is can be useful for debugging '
+ 'a non-hermetic test, one that might pass/fail based on '
+ 'what ran before it.',
+ type=str,
+ choices=['suite', 'test'])
+ parser.add_argument('--list_tests', help='If set, list all tests that will be '
+ 'run.',
action='store_true')
+ parser.add_argument('--list_tests_attr', help='If set, list all tests and test '
+ 'attributes.',
+ action='store_true')
+
+def add_parse_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
+ 'By default, filters to just KUnit output. Use '
+ '--raw_output=all to show everything',
+ type=str, nargs='?', const='all', default=None, choices=['all', 'full', 'kunit'])
parser.add_argument('--json',
nargs='?',
- help='Stores test results in a JSON, and either '
- 'prints to stdout or saves to file if a '
- 'filename is specified',
- type=str, const='stdout', default=None)
+ help='Prints parsed test results as JSON to stdout or a file if '
+ 'a filename is specified. Does nothing if --raw_output is set.',
+ type=str, const='stdout', default=None, metavar='FILE')
+ parser.add_argument('--summary',
+ help='Prints only the summary line for parsed test results.'
+ 'Does nothing if --raw_output is set.',
+ action='store_true')
+ parser.add_argument('--failed',
+ help='Prints only the failed parsed test results and summary line.'
+ 'Does nothing if --raw_output is set.',
+ action='store_true')
+
-def main(argv, linux=None):
+def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
+ """Returns a LinuxSourceTree based on the user's arguments."""
+ # Allow users to specify multiple arguments in one string, e.g. '-smp 8'
+ qemu_args: List[str] = []
+ if cli_args.qemu_args:
+ for arg in cli_args.qemu_args:
+ qemu_args.extend(shlex.split(arg))
+
+ kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else []
+ if cli_args.alltests:
+ # Prepend so user-specified options take prio if we ever allow
+ # --kunitconfig options to have differing options.
+ kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs
+
+ return kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_paths=kunitconfigs,
+ kconfig_add=cli_args.kconfig_add,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config,
+ extra_qemu_args=qemu_args)
+
+
+def run_handler(cli_args: argparse.Namespace) -> None:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+
+ linux = tree_from_args(cli_args)
+ request = KunitRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options,
+ jobs=cli_args.jobs,
+ raw_output=cli_args.raw_output,
+ json=cli_args.json,
+ summary=cli_args.summary,
+ failed=cli_args.failed,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
+ filter=cli_args.filter,
+ filter_action=cli_args.filter_action,
+ kernel_args=cli_args.kernel_args,
+ run_isolated=cli_args.run_isolated,
+ list_tests=cli_args.list_tests,
+ list_tests_attr=cli_args.list_tests_attr)
+ result = run_tests(linux, request)
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+
+
+def config_handler(cli_args: argparse.Namespace) -> None:
+ if cli_args.build_dir and (
+ not os.path.exists(cli_args.build_dir)):
+ os.mkdir(cli_args.build_dir)
+
+ linux = tree_from_args(cli_args)
+ request = KunitConfigRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options)
+ result = config_tests(linux, request)
+ stdout.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (
+ result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+
+
+def build_handler(cli_args: argparse.Namespace) -> None:
+ linux = tree_from_args(cli_args)
+ request = KunitBuildRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options,
+ jobs=cli_args.jobs)
+ result = config_and_build_tests(linux, request)
+ stdout.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (
+ result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+
+
+def exec_handler(cli_args: argparse.Namespace) -> None:
+ linux = tree_from_args(cli_args)
+ exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
+ build_dir=cli_args.build_dir,
+ json=cli_args.json,
+ summary=cli_args.summary,
+ failed=cli_args.failed,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
+ filter=cli_args.filter,
+ filter_action=cli_args.filter_action,
+ kernel_args=cli_args.kernel_args,
+ run_isolated=cli_args.run_isolated,
+ list_tests=cli_args.list_tests,
+ list_tests_attr=cli_args.list_tests_attr)
+ result = exec_tests(linux, exec_request)
+ stdout.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+
+
+def parse_handler(cli_args: argparse.Namespace) -> None:
+ if cli_args.file is None:
+ sys.stdin.reconfigure(errors='backslashreplace') # type: ignore
+ kunit_output = sys.stdin # type: Iterable[str]
+ else:
+ with open(cli_args.file, 'r', errors='backslashreplace') as f:
+ kunit_output = f.read().splitlines()
+ # We know nothing about how the result was created!
+ metadata = kunit_json.Metadata()
+ request = KunitParseRequest(raw_output=cli_args.raw_output,
+ json=cli_args.json, summary=cli_args.summary,
+ failed=cli_args.failed)
+ result, _ = parse_tests(request, metadata, kunit_output)
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+
+
+subcommand_handlers_map = {
+ 'run': run_handler,
+ 'config': config_handler,
+ 'build': build_handler,
+ 'exec': exec_handler,
+ 'parse': parse_handler
+}
+
+
+def main(argv: Sequence[str]) -> None:
parser = argparse.ArgumentParser(
description='Helps writing and running KUnit tests.')
subparser = parser.add_subparsers(dest='subcommand')
@@ -246,103 +604,19 @@ def main(argv, linux=None):
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
- cli_args = parser.parse_args(argv)
+ cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
- if cli_args.subcommand == 'run':
- if not os.path.exists(cli_args.build_dir):
- os.mkdir(cli_args.build_dir)
-
- if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
-
- request = KunitRequest(cli_args.raw_output,
- cli_args.timeout,
- cli_args.jobs,
- cli_args.build_dir,
- cli_args.alltests,
- cli_args.json,
- cli_args.make_options)
- result = run_tests(linux, request)
- if result.status != KunitStatus.SUCCESS:
- sys.exit(1)
- elif cli_args.subcommand == 'config':
- if cli_args.build_dir and (
- not os.path.exists(cli_args.build_dir)):
- os.mkdir(cli_args.build_dir)
-
- if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
-
- request = KunitConfigRequest(cli_args.build_dir,
- cli_args.make_options)
- result = config_tests(linux, request)
- kunit_parser.print_with_timestamp((
- 'Elapsed time: %.3fs\n') % (
- result.elapsed_time))
- if result.status != KunitStatus.SUCCESS:
- sys.exit(1)
- elif cli_args.subcommand == 'build':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
-
- request = KunitBuildRequest(cli_args.jobs,
- cli_args.build_dir,
- cli_args.alltests,
- cli_args.make_options)
- result = build_tests(linux, request)
- kunit_parser.print_with_timestamp((
- 'Elapsed time: %.3fs\n') % (
- result.elapsed_time))
- if result.status != KunitStatus.SUCCESS:
- sys.exit(1)
- elif cli_args.subcommand == 'exec':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree()
-
- linux.create_kunitconfig(cli_args.build_dir)
- linux.read_kunitconfig(cli_args.build_dir)
-
- exec_request = KunitExecRequest(cli_args.timeout,
- cli_args.build_dir,
- cli_args.alltests)
- exec_result = exec_tests(linux, exec_request)
- parse_request = KunitParseRequest(cli_args.raw_output,
- exec_result.result,
- cli_args.build_dir,
- cli_args.json)
- result = parse_tests(parse_request)
- kunit_parser.print_with_timestamp((
- 'Elapsed time: %.3fs\n') % (
- exec_result.elapsed_time))
- if result.status != KunitStatus.SUCCESS:
- sys.exit(1)
- elif cli_args.subcommand == 'parse':
- if cli_args.file == None:
- kunit_output = sys.stdin
- else:
- with open(cli_args.file, 'r') as f:
- kunit_output = f.read().splitlines()
- request = KunitParseRequest(cli_args.raw_output,
- kunit_output,
- None,
- cli_args.json)
- result = parse_tests(request)
- if result.status != KunitStatus.SUCCESS:
- sys.exit(1)
- else:
+ subcomand_handler = subcommand_handlers_map.get(cli_args.subcommand, None)
+
+ if subcomand_handler is None:
parser.print_help()
+ return
+
+ subcomand_handler(cli_args)
+
if __name__ == '__main__':
main(sys.argv[1:])
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index 02ffc3a3e5dc..eb5dd01210b1 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -6,84 +6,103 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
-import collections
+from dataclasses import dataclass
import re
+from typing import Any, Dict, Iterable, List, Tuple
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
-KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
-
-class KconfigEntry(KconfigEntryBase):
+@dataclass(frozen=True)
+class KconfigEntry:
+ name: str
+ value: str
def __str__(self) -> str:
if self.value == 'n':
- return r'# CONFIG_%s is not set' % (self.name)
- else:
- return r'CONFIG_%s=%s' % (self.name, self.value)
+ return f'# CONFIG_{self.name} is not set'
+ return f'CONFIG_{self.name}={self.value}'
class KconfigParseError(Exception):
"""Error parsing Kconfig defconfig or .config."""
-class Kconfig(object):
+class Kconfig:
"""Represents defconfig or .config specified using the Kconfig language."""
- def __init__(self):
- self._entries = []
+ def __init__(self) -> None:
+ self._entries = {} # type: Dict[str, str]
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return self._entries == other._entries
+
+ def __repr__(self) -> str:
+ return ','.join(str(e) for e in self.as_entries())
- def entries(self):
- return set(self._entries)
+ def as_entries(self) -> Iterable[KconfigEntry]:
+ for name, value in self._entries.items():
+ yield KconfigEntry(name, value)
- def add_entry(self, entry: KconfigEntry) -> None:
- self._entries.append(entry)
+ def add_entry(self, name: str, value: str) -> None:
+ self._entries[name] = value
def is_subset_of(self, other: 'Kconfig') -> bool:
- for a in self.entries():
- found = False
- for b in other.entries():
- if a.name != b.name:
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
+ if b is None:
+ if value == 'n':
continue
- if a.value != b.value:
- return False
- found = True
- if a.value != 'n' and found == False:
+ return False
+ if value != b:
return False
return True
+ def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]:
+ diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]]
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
+ if b and value != b:
+ pair = (KconfigEntry(name, value), KconfigEntry(name, b))
+ diff.append(pair)
+ return diff
+
+ def merge_in_entries(self, other: 'Kconfig') -> None:
+ for name, value in other._entries.items():
+ self._entries[name] = value
+
def write_to_file(self, path: str) -> None:
- with open(path, 'w') as f:
- for entry in self.entries():
- f.write(str(entry) + '\n')
-
- def parse_from_string(self, blob: str) -> None:
- """Parses a string containing KconfigEntrys and populates this Kconfig."""
- self._entries = []
- is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN)
- config_matcher = re.compile(CONFIG_PATTERN)
- for line in blob.split('\n'):
- line = line.strip()
- if not line:
- continue
-
- match = config_matcher.match(line)
- if match:
- entry = KconfigEntry(match.group(1), match.group(2))
- self.add_entry(entry)
- continue
-
- empty_match = is_not_set_matcher.match(line)
- if empty_match:
- entry = KconfigEntry(empty_match.group(1), 'n')
- self.add_entry(entry)
- continue
-
- if line[0] == '#':
- continue
- else:
- raise KconfigParseError('Failed to parse: ' + line)
-
- def read_from_file(self, path: str) -> None:
- with open(path, 'r') as f:
- self.parse_from_string(f.read())
+ with open(path, 'a+') as f:
+ for e in self.as_entries():
+ f.write(str(e) + '\n')
+
+def parse_file(path: str) -> Kconfig:
+ with open(path, 'r') as f:
+ return parse_from_string(f.read())
+
+def parse_from_string(blob: str) -> Kconfig:
+ """Parses a string containing Kconfig entries."""
+ kconfig = Kconfig()
+ is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN)
+ config_matcher = re.compile(CONFIG_PATTERN)
+ for line in blob.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+
+ match = config_matcher.match(line)
+ if match:
+ kconfig.add_entry(match.group(1), match.group(2))
+ continue
+
+ empty_match = is_not_set_matcher.match(line)
+ if empty_match:
+ kconfig.add_entry(empty_match.group(1), 'n')
+ continue
+
+ if line[0] == '#':
+ continue
+ raise KconfigParseError('Failed to parse: ' + line)
+ return kconfig
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index 624b31b2dbd6..80fa4e354a17 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -6,58 +6,68 @@
# Copyright (C) 2020, Google LLC.
# Author: Heidi Fahim <heidifahim@google.com>
+from dataclasses import dataclass
import json
-import os
-
-import kunit_parser
-
-from kunit_parser import TestStatus
-
-def get_json_result(test_result, def_config, build_dir, json_path):
- sub_groups = []
-
- # Each test suite is mapped to a KernelCI sub_group
- for test_suite in test_result.suites:
- sub_group = {
- "name": test_suite.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
- "test_cases": [],
- "lab_name": None,
- "kernel": None,
- "job": None,
- "git_branch": "kselftest",
- }
- test_cases = []
- # TODO: Add attachments attribute in test_case with detailed
- # failure message, see https://api.kernelci.org/schema-test-case.html#get
- for case in test_suite.cases:
- test_case = {"name": case.name, "status": "FAIL"}
- if case.status == TestStatus.SUCCESS:
- test_case["status"] = "PASS"
- elif case.status == TestStatus.TEST_CRASHED:
- test_case["status"] = "ERROR"
- test_cases.append(test_case)
- sub_group["test_cases"] = test_cases
- sub_groups.append(sub_group)
+from typing import Any, Dict
+
+from kunit_parser import Test, TestStatus
+
+@dataclass
+class Metadata:
+ """Stores metadata about this run to include in get_json_result()."""
+ arch: str = ''
+ def_config: str = ''
+ build_dir: str = ''
+
+JsonObj = Dict[str, Any]
+
+_status_map: Dict[TestStatus, str] = {
+ TestStatus.SUCCESS: "PASS",
+ TestStatus.SKIPPED: "SKIP",
+ TestStatus.TEST_CRASHED: "ERROR",
+}
+
+def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj:
+ sub_groups = [] # List[JsonObj]
+ test_cases = [] # List[JsonObj]
+
+ for subtest in test.subtests:
+ if subtest.subtests:
+ sub_group = _get_group_json(subtest, common_fields)
+ sub_groups.append(sub_group)
+ continue
+ status = _status_map.get(subtest.status, "FAIL")
+ test_cases.append({"name": subtest.name, "status": status})
+
+ test_counts = test.counts
+ counts_json = {
+ "tests": test_counts.total(),
+ "passed": test_counts.passed,
+ "failed": test_counts.failed,
+ "crashed": test_counts.crashed,
+ "skipped": test_counts.skipped,
+ "errors": test_counts.errors,
+ }
test_group = {
- "name": "KUnit Test Group",
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
+ "name": test.name,
"sub_groups": sub_groups,
+ "test_cases": test_cases,
+ "misc": counts_json
+ }
+ test_group.update(common_fields)
+ return test_group
+
+def get_json_result(test: Test, metadata: Metadata) -> str:
+ common_fields = {
+ "arch": metadata.arch,
+ "defconfig": metadata.def_config,
+ "build_environment": metadata.build_dir,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
- json_obj = json.dumps(test_group, indent=4)
- if json_path != 'stdout':
- with open(json_path, 'w') as result_path:
- result_path.write(json_obj)
- root = __file__.split('tools/testing/kunit/')[0]
- kunit_parser.print_with_timestamp(
- "Test results stored in %s" %
- os.path.join(root, result_path.name))
- return json_obj
+
+ test_group = _get_group_json(test, common_fields)
+ test_group["name"] = "KUnit Test Group"
+ return json.dumps(test_group, indent=4)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 57c1724b7e5d..260d8d9aa1db 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -6,27 +6,31 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
+import importlib.abc
+import importlib.util
import logging
import subprocess
import os
+import shlex
import shutil
import signal
-
-from contextlib import ExitStack
+import sys
+import threading
+from typing import Iterator, List, Optional, Tuple
+from types import FrameType
import kunit_config
-import kunit_parser
+import qemu_config
KCONFIG_PATH = '.config'
KUNITCONFIG_PATH = '.kunitconfig'
-DEFAULT_KUNITCONFIG_PATH = 'arch/um/configs/kunit_defconfig'
-BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config'
+OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig'
+DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config'
+ALL_TESTS_CONFIG_PATH = 'tools/testing/kunit/configs/all_tests.config'
+UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config'
OUTFILE_PATH = 'test.log'
-
-def get_file_path(build_dir, default):
- if build_dir:
- default = os.path.join(build_dir, default)
- return default
+ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
+QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs')
class ConfigError(Exception):
"""Represents an error trying to configure the Linux kernel."""
@@ -36,10 +40,14 @@ class BuildError(Exception):
"""Represents an error trying to build the Linux kernel."""
-class LinuxSourceTreeOperations(object):
+class LinuxSourceTreeOperations:
"""An abstraction over command line operations performed on a source tree."""
- def make_mrproper(self):
+ def __init__(self, linux_arch: str, cross_compile: Optional[str]):
+ self._linux_arch = linux_arch
+ self._cross_compile = cross_compile
+
+ def make_mrproper(self) -> None:
try:
subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
except OSError as e:
@@ -47,12 +55,16 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_olddefconfig(self, build_dir, make_options):
- command = ['make', 'ARCH=um', 'olddefconfig']
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ return base_kunitconfig
+
+ def make_olddefconfig(self, build_dir: str, make_options: Optional[List[str]]) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig']
+ if self._cross_compile:
+ command += ['CROSS_COMPILE=' + self._cross_compile]
if make_options:
command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
+ print('Populating config with:\n$', ' '.join(command))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except OSError as e:
@@ -60,73 +72,204 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_allyesconfig(self, build_dir, make_options):
- kunit_parser.print_with_timestamp(
- 'Enabling all CONFIGs for UML...')
- command = ['make', 'ARCH=um', 'allyesconfig']
+ def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None:
+ command = ['make', 'all', 'compile_commands.json', 'scripts_gdb',
+ 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)]
if make_options:
command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
- process = subprocess.Popen(
- command,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.STDOUT)
- process.wait()
- kunit_parser.print_with_timestamp(
- 'Disabling broken configs to run KUnit tests...')
- with ExitStack() as es:
- config = open(get_kconfig_path(build_dir), 'a')
- disable = open(BROKEN_ALLCONFIG_PATH, 'r').read()
- config.write(disable)
- kunit_parser.print_with_timestamp(
- 'Starting Kernel with all configs takes a few minutes...')
-
- def make(self, jobs, build_dir, make_options):
- command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
- if make_options:
- command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
+ if self._cross_compile:
+ command += ['CROSS_COMPILE=' + self._cross_compile]
+ print('Building with:\n$', ' '.join(command))
try:
proc = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.DEVNULL)
except OSError as e:
- raise BuildError('Could not call make command: ' + str(e))
+ raise BuildError('Could not call execute make: ' + str(e))
+ except subprocess.CalledProcessError as e:
+ raise BuildError(e.output)
_, stderr = proc.communicate()
if proc.returncode != 0:
raise BuildError(stderr.decode())
if stderr: # likely only due to build warnings
print(stderr.decode())
- def linux_bin(self, params, timeout, build_dir):
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
+ raise RuntimeError('not implemented!')
+
+
+class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
+
+ def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]):
+ super().__init__(linux_arch=qemu_arch_params.linux_arch,
+ cross_compile=cross_compile)
+ self._kconfig = qemu_arch_params.kconfig
+ self._qemu_arch = qemu_arch_params.qemu_arch
+ self._kernel_path = qemu_arch_params.kernel_path
+ self._kernel_command_line = qemu_arch_params.kernel_command_line
+ if 'kunit_shutdown=' not in self._kernel_command_line:
+ self._kernel_command_line += ' kunit_shutdown=reboot'
+ self._extra_qemu_params = qemu_arch_params.extra_qemu_params
+ self._serial = qemu_arch_params.serial
+
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ kconfig = kunit_config.parse_from_string(self._kconfig)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
+
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
+ kernel_path = os.path.join(build_dir, self._kernel_path)
+ qemu_command = ['qemu-system-' + self._qemu_arch,
+ '-nodefaults',
+ '-m', '1024',
+ '-kernel', kernel_path,
+ '-append', ' '.join(params + [self._kernel_command_line]),
+ '-no-reboot',
+ '-nographic',
+ '-accel', 'kvm',
+ '-accel', 'hvf',
+ '-accel', 'tcg',
+ '-serial', self._serial] + self._extra_qemu_params
+ # Note: shlex.join() does what we want, but requires python 3.8+.
+ print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command))
+ return subprocess.Popen(qemu_command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, errors='backslashreplace')
+
+class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
+ """An abstraction over command line operations performed on a source tree."""
+
+ def __init__(self, cross_compile: Optional[str]=None):
+ super().__init__(linux_arch='um', cross_compile=cross_compile)
+
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ kconfig = kunit_config.parse_file(UML_KCONFIG_PATH)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
+
+ def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
- linux_bin = get_file_path(build_dir, 'linux')
- outfile = get_outfile_path(build_dir)
- with open(outfile, 'w') as output:
- process = subprocess.Popen([linux_bin] + params,
- stdout=output,
- stderr=subprocess.STDOUT)
- process.wait(timeout)
+ linux_bin = os.path.join(build_dir, 'linux')
+ params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
+ print('Running tests with:\n$', linux_bin, ' '.join(shlex.quote(arg) for arg in params))
+ return subprocess.Popen([linux_bin] + params,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, errors='backslashreplace')
+
+def get_kconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KCONFIG_PATH)
+
+def get_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KUNITCONFIG_PATH)
+
+def get_old_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OLD_KUNITCONFIG_PATH)
+
+def get_parsed_kunitconfig(build_dir: str,
+ kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig:
+ if not kunitconfig_paths:
+ path = get_kunitconfig_path(build_dir)
+ if not os.path.exists(path):
+ shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path)
+ return kunit_config.parse_file(path)
+
+ merged = kunit_config.Kconfig()
+
+ for path in kunitconfig_paths:
+ if os.path.isdir(path):
+ path = os.path.join(path, KUNITCONFIG_PATH)
+ if not os.path.exists(path):
+ raise ConfigError(f'Specified kunitconfig ({path}) does not exist')
+
+ partial = kunit_config.parse_file(path)
+ diff = merged.conflicting_options(partial)
+ if diff:
+ diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff)
+ raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}')
+ merged.merge_in_entries(partial)
+ return merged
+
+def get_outfile_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OUTFILE_PATH)
+
+def _default_qemu_config_path(arch: str) -> str:
+ config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py')
+ if os.path.isfile(config_path):
+ return config_path
-def get_kconfig_path(build_dir):
- return get_file_path(build_dir, KCONFIG_PATH)
+ options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
-def get_kunitconfig_path(build_dir):
- return get_file_path(build_dir, KUNITCONFIG_PATH)
+ if arch == 'help':
+ print('um')
+ for option in options:
+ print(option)
+ sys.exit()
-def get_outfile_path(build_dir):
- return get_file_path(build_dir, OUTFILE_PATH)
+ raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options)))
-class LinuxSourceTree(object):
+def _get_qemu_ops(config_path: str,
+ extra_qemu_args: Optional[List[str]],
+ cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]:
+ # The module name/path has very little to do with where the actual file
+ # exists (I learned this through experimentation and could not find it
+ # anywhere in the Python documentation).
+ #
+ # Bascially, we completely ignore the actual file location of the config
+ # we are loading and just tell Python that the module lives in the
+ # QEMU_CONFIGS_DIR for import purposes regardless of where it actually
+ # exists as a file.
+ module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path))
+ spec = importlib.util.spec_from_file_location(module_path, config_path)
+ assert spec is not None
+ config = importlib.util.module_from_spec(spec)
+ # See https://github.com/python/typeshed/pull/2626 for context.
+ assert isinstance(spec.loader, importlib.abc.Loader)
+ spec.loader.exec_module(config)
+
+ if not hasattr(config, 'QEMU_ARCH'):
+ raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path)
+ params: qemu_config.QemuArchParams = config.QEMU_ARCH
+ if extra_qemu_args:
+ params.extra_qemu_params.extend(extra_qemu_args)
+ return params.linux_arch, LinuxSourceTreeOperationsQemu(
+ params, cross_compile=cross_compile)
+
+class LinuxSourceTree:
"""Represents a Linux kernel source tree with KUnit tests."""
- def __init__(self):
- self._ops = LinuxSourceTreeOperations()
+ def __init__(
+ self,
+ build_dir: str,
+ kunitconfig_paths: Optional[List[str]]=None,
+ kconfig_add: Optional[List[str]]=None,
+ arch: Optional[str]=None,
+ cross_compile: Optional[str]=None,
+ qemu_config_path: Optional[str]=None,
+ extra_qemu_args: Optional[List[str]]=None) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
+ if qemu_config_path:
+ self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
+ else:
+ self._arch = 'um' if arch is None else arch
+ if self._arch == 'um':
+ self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
+ else:
+ qemu_config_path = _default_qemu_config_path(self._arch)
+ _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
- def clean(self):
+ self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths)
+ if kconfig_add:
+ kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add))
+ self._kconfig.merge_in_entries(kconfig)
+
+ def arch(self) -> str:
+ return self._arch
+
+ def clean(self) -> bool:
try:
self._ops.make_mrproper()
except ConfigError as e:
@@ -134,62 +277,67 @@ class LinuxSourceTree(object):
return False
return True
- def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
- kunitconfig_path = get_kunitconfig_path(build_dir)
- if not os.path.exists(kunitconfig_path):
- shutil.copyfile(defconfig, kunitconfig_path)
-
- def read_kunitconfig(self, build_dir):
- kunitconfig_path = get_kunitconfig_path(build_dir)
- self._kconfig = kunit_config.Kconfig()
- self._kconfig.read_from_file(kunitconfig_path)
-
- def validate_config(self, build_dir):
+ def validate_config(self, build_dir: str) -> bool:
kconfig_path = get_kconfig_path(build_dir)
- validated_kconfig = kunit_config.Kconfig()
- validated_kconfig.read_from_file(kconfig_path)
- if not self._kconfig.is_subset_of(validated_kconfig):
- invalid = self._kconfig.entries() - validated_kconfig.entries()
- message = 'Provided Kconfig is not contained in validated .config. Following fields found in kunitconfig, ' \
- 'but not in .config: %s' % (
- ', '.join([str(e) for e in invalid])
- )
- logging.error(message)
- return False
- return True
+ validated_kconfig = kunit_config.parse_file(kconfig_path)
+ if self._kconfig.is_subset_of(validated_kconfig):
+ return True
+ missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries())
+ message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \
+ 'This is probably due to unsatisfied dependencies.\n' \
+ 'Missing: ' + ', '.join(str(e) for e in missing)
+ if self._arch == 'um':
+ message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \
+ 'on a different architecture with something like "--arch=x86_64".'
+ logging.error(message)
+ return False
- def build_config(self, build_dir, make_options):
+ def build_config(self, build_dir: str, make_options: Optional[List[str]]) -> bool:
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
- self._kconfig.write_to_file(kconfig_path)
try:
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
+ self._kconfig.write_to_file(kconfig_path)
self._ops.make_olddefconfig(build_dir, make_options)
except ConfigError as e:
logging.error(e)
return False
- return self.validate_config(build_dir)
+ if not self.validate_config(build_dir):
+ return False
+
+ old_path = get_old_kunitconfig_path(build_dir)
+ if os.path.exists(old_path):
+ os.remove(old_path) # write_to_file appends to the file
+ self._kconfig.write_to_file(old_path)
+ return True
+
+ def _kunitconfig_changed(self, build_dir: str) -> bool:
+ old_path = get_old_kunitconfig_path(build_dir)
+ if not os.path.exists(old_path):
+ return True
- def build_reconfig(self, build_dir, make_options):
+ old_kconfig = kunit_config.parse_file(old_path)
+ return old_kconfig != self._kconfig
+
+ def build_reconfig(self, build_dir: str, make_options: Optional[List[str]]) -> bool:
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
- if os.path.exists(kconfig_path):
- existing_kconfig = kunit_config.Kconfig()
- existing_kconfig.read_from_file(kconfig_path)
- if not self._kconfig.is_subset_of(existing_kconfig):
- print('Regenerating .config ...')
- os.remove(kconfig_path)
- return self.build_config(build_dir, make_options)
- else:
- return True
- else:
+ if not os.path.exists(kconfig_path):
print('Generating .config ...')
return self.build_config(build_dir, make_options)
- def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+ existing_kconfig = kunit_config.parse_file(kconfig_path)
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
+
+ if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir):
+ return True
+ print('Regenerating .config ...')
+ os.remove(kconfig_path)
+ return self.build_config(build_dir, make_options)
+
+ def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool:
try:
- if alltests:
- self._ops.make_allyesconfig(build_dir, make_options)
self._ops.make_olddefconfig(build_dir, make_options)
self._ops.make(jobs, build_dir, make_options)
except (ConfigError, BuildError) as e:
@@ -197,15 +345,47 @@ class LinuxSourceTree(object):
return False
return self.validate_config(build_dir)
- def run_kernel(self, args=[], build_dir='', timeout=None):
- args.extend(['mem=1G'])
- self._ops.linux_bin(args, timeout, build_dir)
- outfile = get_outfile_path(build_dir)
- subprocess.call(['stty', 'sane'])
- with open(outfile, 'r') as file:
- for line in file:
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+ if not args:
+ args = []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+ args.append('kunit.filter="' + filter + '"')
+ if filter_action:
+ args.append('kunit.filter_action=' + filter_action)
+ args.append('kunit.enable=1')
+
+ process = self._ops.start(args, build_dir)
+ assert process.stdout is not None # tell mypy it's set
+
+ # Enforce the timeout in a background thread.
+ def _wait_proc() -> None:
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+ print(e)
+ process.terminate()
+ process.wait()
+ waiter = threading.Thread(target=_wait_proc)
+ waiter.start()
+
+ output = open(get_outfile_path(build_dir), 'w')
+ try:
+ # Tee the output to the file and to our caller in real time.
+ for line in process.stdout:
+ output.write(line)
yield line
+ # This runs even if our caller doesn't consume every line.
+ finally:
+ # Flush any leftover output to the file
+ output.write(process.stdout.read())
+ output.close()
+ process.stdout.close()
+
+ waiter.join()
+ subprocess.call(['stty', 'sane'])
- def signal_handler(self, sig, frame):
+ def signal_handler(self, unused_sig: int, unused_frame: Optional[FrameType]) -> None:
logging.error('Build interruption occurred. Cleaning console.')
subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 6614ec4d0898..333cd3a4a56b 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -1,360 +1,864 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Parses test results from a kernel dmesg log.
+# Parses KTAP test results from a kernel dmesg log and incrementally prints
+# results with reader-friendly format. Stores and returns test results in a
+# Test object.
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
+# Author: Rae Moar <rmoar@google.com>
+from __future__ import annotations
+from dataclasses import dataclass
import re
+import textwrap
-from collections import namedtuple
-from datetime import datetime
from enum import Enum, auto
-from functools import reduce
-from typing import List, Optional, Tuple
-
-TestResult = namedtuple('TestResult', ['status','suites','log'])
-
-class TestSuite(object):
- def __init__(self):
- self.status = None
- self.name = None
- self.cases = []
-
- def __str__(self):
- return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
-
- def __repr__(self):
- return str(self)
-
-class TestCase(object):
- def __init__(self):
- self.status = None
+from typing import Iterable, Iterator, List, Optional, Tuple
+
+from kunit_printer import Printer, stdout
+
+class Test:
+ """
+ A class to represent a test parsed from KTAP results. All KTAP
+ results within a test log are stored in a main Test object as
+ subtests.
+
+ Attributes:
+ status : TestStatus - status of the test
+ name : str - name of the test
+ expected_count : int - expected number of subtests (0 if single
+ test case and None if unknown expected number of subtests)
+ subtests : List[Test] - list of subtests
+ log : List[str] - log of KTAP lines that correspond to the test
+ counts : TestCounts - counts of the test statuses and errors of
+ subtests or of the test itself if the test is a single
+ test case.
+ """
+ def __init__(self) -> None:
+ """Creates Test object with default attributes."""
+ self.status = TestStatus.TEST_CRASHED
self.name = ''
- self.log = []
+ self.expected_count = 0 # type: Optional[int]
+ self.subtests = [] # type: List[Test]
+ self.log = [] # type: List[str]
+ self.counts = TestCounts()
+
+ def __str__(self) -> str:
+ """Returns string representation of a Test class object."""
+ return (f'Test({self.status}, {self.name}, {self.expected_count}, '
+ f'{self.subtests}, {self.log}, {self.counts})')
+
+ def __repr__(self) -> str:
+ """Returns string representation of a Test class object."""
+ return str(self)
- def __str__(self):
- return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+ def add_error(self, printer: Printer, error_message: str) -> None:
+ """Records an error that occurred while parsing this test."""
+ self.counts.errors += 1
+ printer.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
- def __repr__(self):
- return str(self)
+ def ok_status(self) -> bool:
+ """Returns true if the status was ok, i.e. passed or skipped."""
+ return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
SUCCESS = auto()
FAILURE = auto()
+ SKIPPED = auto()
TEST_CRASHED = auto()
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
-kunit_start_re = re.compile(r'TAP version [0-9]+$')
-kunit_end_re = re.compile('(List of all partitions:|'
- 'Kernel panic - not syncing: VFS:)')
-
-def isolate_kunit_output(kernel_output):
- started = False
- for line in kernel_output:
- line = line.rstrip() # line always has a trailing \n
- if kunit_start_re.search(line):
- prefix_len = len(line.split('TAP version')[0])
- started = True
- yield line[prefix_len:] if prefix_len > 0 else line
- elif kunit_end_re.search(line):
- break
- elif started:
- yield line[prefix_len:] if prefix_len > 0 else line
-
-def raw_output(kernel_output):
- for line in kernel_output:
- print(line.rstrip())
-
-DIVIDER = '=' * 60
-
-RESET = '\033[0;0m'
-
-def red(text):
- return '\033[1;31m' + text + RESET
-
-def yellow(text):
- return '\033[1;33m' + text + RESET
-
-def green(text):
- return '\033[1;32m' + text + RESET
-
-def print_with_timestamp(message):
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-
-def format_suite_divider(message):
- return '======== ' + message + ' ========'
-
-def print_suite_divider(message):
- print_with_timestamp(DIVIDER)
- print_with_timestamp(format_suite_divider(message))
-
-def print_log(log):
- for m in log:
- print_with_timestamp(m)
-
-TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
-
-def consume_non_diagnositic(lines: List[str]) -> None:
- while lines and not TAP_ENTRIES.match(lines[0]):
- lines.pop(0)
+@dataclass
+class TestCounts:
+ """
+ Tracks the counts of statuses of all test cases and any errors within
+ a Test.
+ """
+ passed: int = 0
+ failed: int = 0
+ crashed: int = 0
+ skipped: int = 0
+ errors: int = 0
+
+ def __str__(self) -> str:
+ """Returns the string representation of a TestCounts object."""
+ statuses = [('passed', self.passed), ('failed', self.failed),
+ ('crashed', self.crashed), ('skipped', self.skipped),
+ ('errors', self.errors)]
+ return f'Ran {self.total()} tests: ' + \
+ ', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
+
+ def total(self) -> int:
+ """Returns the total number of test cases within a test
+ object, where a test case is a test with no subtests.
+ """
+ return (self.passed + self.failed + self.crashed +
+ self.skipped)
+
+ def add_subtest_counts(self, counts: TestCounts) -> None:
+ """
+ Adds the counts of another TestCounts object to the current
+ TestCounts object. Used to add the counts of a subtest to the
+ parent test.
+
+ Parameters:
+ counts - a different TestCounts object whose counts
+ will be added to the counts of the TestCounts object
+ """
+ self.passed += counts.passed
+ self.failed += counts.failed
+ self.crashed += counts.crashed
+ self.skipped += counts.skipped
+ self.errors += counts.errors
+
+ def get_status(self) -> TestStatus:
+ """Returns the aggregated status of a Test using test
+ counts.
+ """
+ if self.total() == 0:
+ return TestStatus.NO_TESTS
+ if self.crashed:
+ # Crashes should take priority.
+ return TestStatus.TEST_CRASHED
+ if self.failed:
+ return TestStatus.FAILURE
+ if self.passed:
+ # No failures or crashes, looks good!
+ return TestStatus.SUCCESS
+ # We have only skipped tests.
+ return TestStatus.SKIPPED
+
+ def add_status(self, status: TestStatus) -> None:
+ """Increments the count for `status`."""
+ if status == TestStatus.SUCCESS:
+ self.passed += 1
+ elif status == TestStatus.FAILURE:
+ self.failed += 1
+ elif status == TestStatus.SKIPPED:
+ self.skipped += 1
+ elif status != TestStatus.NO_TESTS:
+ self.crashed += 1
+
+class LineStream:
+ """
+ A class to represent the lines of kernel output.
+ Provides a lazy peek()/pop() interface over an iterator of
+ (line#, text).
+ """
+ _lines: Iterator[Tuple[int, str]]
+ _next: Tuple[int, str]
+ _need_next: bool
+ _done: bool
+
+ def __init__(self, lines: Iterator[Tuple[int, str]]):
+ """Creates a new LineStream that wraps the given iterator."""
+ self._lines = lines
+ self._done = False
+ self._need_next = True
+ self._next = (0, '')
+
+ def _get_next(self) -> None:
+ """Advances the LineSteam to the next line, if necessary."""
+ if not self._need_next:
+ return
+ try:
+ self._next = next(self._lines)
+ except StopIteration:
+ self._done = True
+ finally:
+ self._need_next = False
+
+ def peek(self) -> str:
+ """Returns the current line, without advancing the LineStream.
+ """
+ self._get_next()
+ return self._next[1]
+
+ def pop(self) -> str:
+ """Returns the current line and advances the LineStream to
+ the next line.
+ """
+ s = self.peek()
+ if self._done:
+ raise ValueError(f'LineStream: going past EOF, last line was {s}')
+ self._need_next = True
+ return s
+
+ def __bool__(self) -> bool:
+ """Returns True if stream has more lines."""
+ self._get_next()
+ return not self._done
+
+ # Only used by kunit_tool_test.py.
+ def __iter__(self) -> Iterator[str]:
+ """Empties all lines stored in LineStream object into
+ Iterator object and returns the Iterator object.
+ """
+ while bool(self):
+ yield self.pop()
+
+ def line_number(self) -> int:
+ """Returns the line number of the current line."""
+ self._get_next()
+ return self._next[0]
+
+# Parsing helper methods:
+
+KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
+TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
+KTAP_END = re.compile(r'\s*(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
+
+def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+ """Extracts KTAP lines from the kernel output."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
+ line_num = 0
+ started = False
+ for line in kernel_output:
+ line_num += 1
+ line = line.rstrip() # remove trailing \n
+ if not started and KTAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(
+ line.split('KTAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif not started and TAP_START.search(line):
+ # start extracting KTAP lines and set prefix
+ # to number of characters before version line
+ prefix_len = len(line.split('TAP version')[0])
+ started = True
+ yield line_num, line[prefix_len:]
+ elif started and KTAP_END.search(line):
+ # stop extracting KTAP lines
+ break
+ elif started:
+ # remove the prefix, if any.
+ line = line[prefix_len:]
+ yield line_num, line
+ elif EXECUTOR_ERROR.search(line):
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+KTAP_VERSIONS = [1]
+TAP_VERSIONS = [13, 14]
+
+def check_version(version_num: int, accepted_versions: List[int],
+ version_type: str, test: Test, printer: Printer) -> None:
+ """
+ Adds error to test object if version number is too high or too
+ low.
+
+ Parameters:
+ version_num - The inputted version number from the parsed KTAP or TAP
+ header line
+ accepted_version - List of accepted KTAP or TAP versions
+ version_type - 'KTAP' or 'TAP' depending on the type of
+ version line.
+ test - Test object for current test being parsed
+ printer - Printer object to output error
+ """
+ if version_num < min(accepted_versions):
+ test.add_error(printer, f'{version_type} version lower than expected!')
+ elif version_num > max(accepted_versions):
+ test.add_error(printer, f'{version_type} version higer than expected!')
+
+def parse_ktap_header(lines: LineStream, test: Test, printer: Printer) -> bool:
+ """
+ Parses KTAP/TAP header line and checks version number.
+ Returns False if fails to parse KTAP/TAP header line.
+
+ Accepted formats:
+ - 'KTAP version [version number]'
+ - 'TAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ printer - Printer object to output results
+
+ Return:
+ True if successfully parsed KTAP/TAP header line
+ """
+ ktap_match = KTAP_START.match(lines.peek())
+ tap_match = TAP_START.match(lines.peek())
+ if ktap_match:
+ version_num = int(ktap_match.group(1))
+ check_version(version_num, KTAP_VERSIONS, 'KTAP', test, printer)
+ elif tap_match:
+ version_num = int(tap_match.group(1))
+ check_version(version_num, TAP_VERSIONS, 'TAP', test, printer)
+ else:
+ return False
+ lines.pop()
+ return True
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
- while lines and not TAP_ENTRIES.match(lines[0]):
- test_case.log.append(lines[0])
- lines.pop(0)
+TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$')
-OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
+def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test header and stores test name in test object.
+ Returns False if fails to parse test header line.
-OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
+ Accepted format:
+ - '# Subtest: [test name]'
-OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
-def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
- if not lines:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- line = lines[0]
- match = OK_NOT_OK_SUBTEST.match(line)
- while not match and lines:
- line = lines.pop(0)
- match = OK_NOT_OK_SUBTEST.match(line)
- if match:
- test_case.log.append(lines.pop(0))
- test_case.name = match.group(2)
- if test_case.status == TestStatus.TEST_CRASHED:
- return True
- if match.group(1) == 'ok':
- test_case.status = TestStatus.SUCCESS
- else:
- test_case.status = TestStatus.FAILURE
- return True
- else:
+ Return:
+ True if successfully parsed test header line
+ """
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
return False
-
-SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
-
-def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
- save_non_diagnositic(lines, test_case)
- if not lines:
+ test.name = match.group(1)
+ lines.pop()
+ return True
+
+TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
+
+def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+ Parses test plan line and stores the expected number of subtests in
+ test object. Reports an error if expected count is 0.
+ Returns False and sets expected_count to None if there is no valid test
+ plan.
+
+ Accepted format:
+ - '1..[number of subtests]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if successfully parsed test plan line
+ """
+ match = TEST_PLAN.match(lines.peek())
+ if not match:
+ test.expected_count = None
return False
- line = lines[0]
- match = SUBTEST_DIAGNOSTIC.match(line)
- if match:
- test_case.log.append(lines.pop(0))
- crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line)
- if crash_match:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
+ lines.pop()
+ return True
+
+TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?([^#]*)( # .*)?$')
+
+TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?(.*) # SKIP ?(.*)$')
+
+def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+ Matches current line with the format of a test result line and checks
+ if the name matches the name of the current test.
+ Returns False if fails to match format or name.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+
+ Return:
+ True if matched a test result line and the name matching the
+ expected test name
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ if not match:
+ return False
+ name = match.group(4)
+ if not name:
return False
+ return name == test.name
+
+def parse_test_result(lines: LineStream, test: Test,
+ expected_num: int, printer: Printer) -> bool:
+ """
+ Parses test result line and stores the status and name in the test
+ object. Reports an error if the test number does not match expected
+ test number.
+ Returns False if fails to parse test result line.
+
+ Note that the SKIP directive is the only direction that causes a
+ change in status.
+
+ Accepted format:
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ test - Test object for current test being parsed
+ expected_num - expected test number for current test
+ printer - Printer object to output results
+
+ Return:
+ True if successfully parsed a test result line.
+ """
+ line = lines.peek()
+ match = TEST_RESULT.match(line)
+ skip_match = TEST_RESULT_SKIP.match(line)
+
+ # Check if line matches test result line format
+ if not match:
+ return False
+ lines.pop()
-def parse_test_case(lines: List[str]) -> Optional[TestCase]:
- test_case = TestCase()
- save_non_diagnositic(lines, test_case)
- while parse_diagnostic(lines, test_case):
- pass
- if parse_ok_not_ok_test_case(lines, test_case):
- return test_case
+ # Set name of test object
+ if skip_match:
+ test.name = skip_match.group(4) or skip_match.group(5)
else:
- return None
-
-SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
-
-def parse_subtest_header(lines: List[str]) -> Optional[str]:
- consume_non_diagnositic(lines)
- if not lines:
- return None
- match = SUBTEST_HEADER.match(lines[0])
- if match:
- lines.pop(0)
- return match.group(1)
+ test.name = match.group(4)
+
+ # Check test num
+ num = int(match.group(2))
+ if num != expected_num:
+ test.add_error(printer, f'Expected test number {expected_num} but found {num}')
+
+ # Set status of test object
+ status = match.group(1)
+ if skip_match:
+ test.status = TestStatus.SKIPPED
+ elif status == 'ok':
+ test.status = TestStatus.SUCCESS
else:
- return None
+ test.status = TestStatus.FAILURE
+ return True
-SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
+def parse_diagnostic(lines: LineStream) -> List[str]:
+ """
+ Parse lines that do not match the format of a test result line or
+ test header line and returns them in list.
-def parse_subtest_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
- match = SUBTEST_PLAN.match(lines[0])
- if match:
- lines.pop(0)
- return int(match.group(1))
- else:
- return None
-
-def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
- return TestStatus.TEST_CRASHED
- elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
- return TestStatus.FAILURE
- elif left != TestStatus.SUCCESS:
- return left
- elif right != TestStatus.SUCCESS:
- return right
- else:
- return TestStatus.SUCCESS
+ Line formats that are not parsed:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
+ - 'KTAP version [version number]'
-def parse_ok_not_ok_test_suite(lines: List[str],
- test_suite: TestSuite,
- expected_suite_index: int) -> bool:
- consume_non_diagnositic(lines)
- if not lines:
- test_suite.status = TestStatus.TEST_CRASHED
- return False
- line = lines[0]
- match = OK_NOT_OK_MODULE.match(line)
- if match:
- lines.pop(0)
- if match.group(1) == 'ok':
- test_suite.status = TestStatus.SUCCESS
- else:
- test_suite.status = TestStatus.FAILURE
- suite_index = int(match.group(2))
- if suite_index != expected_suite_index:
- print_with_timestamp(
- red('[ERROR] ') + 'expected_suite_index ' +
- str(expected_suite_index) + ', but got ' +
- str(suite_index))
- return True
- else:
- return False
+ Parameters:
+ lines - LineStream of KTAP output to parse
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
- status_list = map(to_status, status_container_list)
- return reduce(max_status, status_list, TestStatus.SUCCESS)
+ Return:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+ non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START, TEST_PLAN]
+ while lines and not any(re.match(lines.peek())
+ for re in non_diagnostic_lines):
+ log.append(lines.pop())
+ return log
-def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
- max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
- return max_status(max_test_case_status, test_suite.status)
-def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
- if not lines:
- return None
- consume_non_diagnositic(lines)
- test_suite = TestSuite()
- test_suite.status = TestStatus.SUCCESS
- name = parse_subtest_header(lines)
- if not name:
- return None
- test_suite.name = name
- expected_test_case_num = parse_subtest_plan(lines)
- if expected_test_case_num is None:
- return None
- while expected_test_case_num > 0:
- test_case = parse_test_case(lines)
- if not test_case:
- break
- test_suite.cases.append(test_case)
- expected_test_case_num -= 1
- if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
- test_suite.status = bubble_up_test_case_errors(test_suite)
- return test_suite
- elif not lines:
- print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token')
- return test_suite
- else:
- print('failed to parse end of suite' + lines[0])
- return None
+# Printing helper methods:
-TAP_HEADER = re.compile(r'^TAP version 14$')
+DIVIDER = '=' * 60
-def parse_tap_header(lines: List[str]) -> bool:
- consume_non_diagnositic(lines)
- if TAP_HEADER.match(lines[0]):
- lines.pop(0)
- return True
+def format_test_divider(message: str, len_message: int) -> str:
+ """
+ Returns string with message centered in fixed width divider.
+
+ Example:
+ '===================== message example ====================='
+
+ Parameters:
+ message - message to be centered in divider line
+ len_message - length of the message to be printed such that
+ any characters of the color codes are not counted
+
+ Return:
+ String containing message centered in fixed width divider
+ """
+ default_count = 3 # default number of dashes
+ len_1 = default_count
+ len_2 = default_count
+ difference = len(DIVIDER) - len_message - 2 # 2 spaces added
+ if difference > 0:
+ # calculate number of dashes for each side of the divider
+ len_1 = int(difference / 2)
+ len_2 = difference - len_1
+ return ('=' * len_1) + f' {message} ' + ('=' * len_2)
+
+def print_test_header(test: Test, printer: Printer) -> None:
+ """
+ Prints test header with test name and optionally the expected number
+ of subtests.
+
+ Example:
+ '=================== example (2 subtests) ==================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ printer - Printer object to output results
+ """
+ message = test.name
+ if message != "":
+ # Add a leading space before the subtest counts only if a test name
+ # is provided using a "# Subtest" header line.
+ message += " "
+ if test.expected_count:
+ if test.expected_count == 1:
+ message += '(1 subtest)'
+ else:
+ message += f'({test.expected_count} subtests)'
+ printer.print_with_timestamp(format_test_divider(message, len(message)))
+
+def print_log(log: Iterable[str], printer: Printer) -> None:
+ """Prints all strings in saved log for test in yellow."""
+ formatted = textwrap.dedent('\n'.join(log))
+ for line in formatted.splitlines():
+ printer.print_with_timestamp(printer.yellow(line))
+
+def format_test_result(test: Test, printer: Printer) -> str:
+ """
+ Returns string with formatted test result with colored status and test
+ name.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ printer - Printer object to output results
+
+ Return:
+ String containing formatted test result
+ """
+ if test.status == TestStatus.SUCCESS:
+ return printer.green('[PASSED] ') + test.name
+ if test.status == TestStatus.SKIPPED:
+ return printer.yellow('[SKIPPED] ') + test.name
+ if test.status == TestStatus.NO_TESTS:
+ return printer.yellow('[NO TESTS RUN] ') + test.name
+ if test.status == TestStatus.TEST_CRASHED:
+ print_log(test.log, printer)
+ return stdout.red('[CRASHED] ') + test.name
+ print_log(test.log, printer)
+ return printer.red('[FAILED] ') + test.name
+
+def print_test_result(test: Test, printer: Printer) -> None:
+ """
+ Prints result line with status of test.
+
+ Example:
+ '[PASSED] example'
+
+ Parameters:
+ test - Test object representing current test being printed
+ printer - Printer object
+ """
+ printer.print_with_timestamp(format_test_result(test, printer))
+
+def print_test_footer(test: Test, printer: Printer) -> None:
+ """
+ Prints test footer with status of test.
+
+ Example:
+ '===================== [PASSED] example ====================='
+
+ Parameters:
+ test - Test object representing current test being printed
+ printer - Printer object to output results
+ """
+ message = format_test_result(test, printer)
+ printer.print_with_timestamp(format_test_divider(message,
+ len(message) - printer.color_len()))
+
+def print_test(test: Test, failed_only: bool, printer: Printer) -> None:
+ """
+ Prints Test object to given printer. For a child test, the result line is
+ printed. For a parent test, the test header, all child test results, and
+ the test footer are all printed. If failed_only is true, only failed/crashed
+ tests will be printed.
+
+ Parameters:
+ test - Test object to print
+ failed_only - True if only failed/crashed tests should be printed.
+ printer - Printer object to output results
+ """
+ if test.name == "main":
+ printer.print_with_timestamp(DIVIDER)
+ for subtest in test.subtests:
+ print_test(subtest, failed_only, printer)
+ printer.print_with_timestamp(DIVIDER)
+ elif test.subtests != []:
+ if not failed_only or not test.ok_status():
+ print_test_header(test, printer)
+ for subtest in test.subtests:
+ print_test(subtest, failed_only, printer)
+ print_test_footer(test, printer)
else:
- return False
+ if not failed_only or not test.ok_status():
+ print_test_result(test, printer)
-TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
+def _summarize_failed_tests(test: Test) -> str:
+ """Tries to summarize all the failing subtests in `test`."""
-def parse_test_plan(lines: List[str]) -> Optional[int]:
- consume_non_diagnositic(lines)
- match = TEST_PLAN.match(lines[0])
- if match:
- lines.pop(0)
- return int(match.group(1))
- else:
- return None
-
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
- return bubble_up_errors(lambda x: x.status, test_suite_list)
-
-def parse_test_result(lines: List[str]) -> TestResult:
- consume_non_diagnositic(lines)
- if not lines or not parse_tap_header(lines):
- return TestResult(TestStatus.NO_TESTS, [], lines)
- expected_test_suite_num = parse_test_plan(lines)
- if not expected_test_suite_num:
- return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
- test_suites = []
- for i in range(1, expected_test_suite_num + 1):
- test_suite = parse_test_suite(lines, i)
- if test_suite:
- test_suites.append(test_suite)
+ def failed_names(test: Test, parent_name: str) -> List[str]:
+ # Note: we use 'main' internally for the top-level test.
+ if not parent_name or parent_name == 'main':
+ full_name = test.name
else:
- print_with_timestamp(
- red('[ERROR] ') + ' expected ' +
- str(expected_test_suite_num) +
- ' test suites, but got ' + str(i - 2))
- break
- test_suite = parse_test_suite(lines, -1)
- if test_suite:
- print_with_timestamp(red('[ERROR] ') +
- 'got unexpected test suite: ' + test_suite.name)
- if test_suites:
- return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
+ full_name = parent_name + '.' + test.name
+
+ if not test.subtests: # this is a leaf node
+ return [full_name]
+
+ # If all the children failed, just say this subtest failed.
+ # Don't summarize it down "the top-level test failed", though.
+ failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
+ if parent_name and len(failed_subtests) == len(test.subtests):
+ return [full_name]
+
+ all_failures = [] # type: List[str]
+ for t in failed_subtests:
+ all_failures.extend(failed_names(t, full_name))
+ return all_failures
+
+ failures = failed_names(test, '')
+ # If there are too many failures, printing them out will just be noisy.
+ if len(failures) > 10: # this is an arbitrary limit
+ return ''
+
+ return 'Failures: ' + ', '.join(failures)
+
+
+def print_summary_line(test: Test, printer: Printer) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+ status of test. Color is green if test passes, yellow if test is
+ skipped, and red if the test fails or crashes. Summary line contains
+ counts of the statuses of the tests subtests or the test itself if it
+ has no subtests.
+
+ Example:
+ "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
+ Errors: 0"
+
+ test - Test object representing current test being printed
+ printer - Printer object to output results
+ """
+ if test.status == TestStatus.SUCCESS:
+ color = stdout.green
+ elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
+ color = stdout.yellow
else:
- return TestResult(TestStatus.NO_TESTS, [], lines)
-
-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
- for test_suite in test_result.suites:
- if test_suite.status == TestStatus.SUCCESS:
- print_suite_divider(green('[PASSED] ') + test_suite.name)
- elif test_suite.status == TestStatus.TEST_CRASHED:
- print_suite_divider(red('[CRASHED] ' + test_suite.name))
- else:
- print_suite_divider(red('[FAILED] ') + test_suite.name)
- for test_case in test_suite.cases:
- total_tests += 1
- if test_case.status == TestStatus.SUCCESS:
- print_with_timestamp(green('[PASSED] ') + test_case.name)
- elif test_case.status == TestStatus.TEST_CRASHED:
- crashed_tests += 1
- print_with_timestamp(red('[CRASHED] ' + test_case.name))
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
+ color = stdout.red
+ printer.print_with_timestamp(color(f'Testing complete. {test.counts}'))
+
+ # Summarize failures that might have gone off-screen since we had a lot
+ # of tests (arbitrarily defined as >=100 for now).
+ if test.ok_status() or test.counts.total() < 100:
+ return
+ summarized = _summarize_failed_tests(test)
+ if not summarized:
+ return
+ printer.print_with_timestamp(color(summarized))
+
+# Other methods:
+
+def bubble_up_test_results(test: Test) -> None:
+ """
+ If the test has subtests, add the test counts of the subtests to the
+ test and check if any of the tests crashed and if so set the test
+ status to crashed. Otherwise if the test has no subtests add the
+ status of the test to the test counts.
+
+ Parameters:
+ test - Test object for current test being parsed
+ """
+ subtests = test.subtests
+ counts = test.counts
+ status = test.status
+ for t in subtests:
+ counts.add_subtest_counts(t.counts)
+ if counts.total() == 0:
+ counts.add_status(status)
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool, printer: Printer) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+ information (status, name) about the test and the Test objects for
+ any subtests, and then returns the Test object. The method accepts
+ three formats of tests:
+
+ Accepted test formats:
+
+ - Main KTAP/TAP header
+
+ Example:
+
+ KTAP version 1
+ 1..4
+ [subtests]
+
+ - Subtest header (must include either the KTAP version line or
+ "# Subtest" header line)
+
+ Example (preferred format with both KTAP version line and
+ "# Subtest" line):
+
+ KTAP version 1
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ Example (only "# Subtest" line):
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
+ Example (only KTAP version line, compliant with KTAP v1 spec):
+
+ KTAP version 1
+ 1..3
+ [subtests]
+ ok 1 name
+
+ - Test result line
+
+ Example:
+
+ ok 1 - test
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
+ is_subtest - boolean indicating whether test is a subtest
+ printer - Printer object to output results
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+
+ # Parse any errors prior to parsing tests
+ err_log = parse_diagnostic(lines)
+ test.log.extend(err_log)
+
+ if not is_subtest:
+ # If parsing the main/top-level test, parse KTAP version line and
+ # test plan
+ test.name = "main"
+ parse_ktap_header(lines, test, printer)
+ test.log.extend(parse_diagnostic(lines))
+ parse_test_plan(lines, test)
+ parent_test = True
+ else:
+ # If not the main test, attempt to parse a test header containing
+ # the KTAP version line and/or subtest header line
+ ktap_line = parse_ktap_header(lines, test, printer)
+ subtest_line = parse_test_header(lines, test)
+ test.log.extend(parse_diagnostic(lines))
+ parse_test_plan(lines, test)
+ parent_test = (ktap_line or subtest_line)
+ if parent_test:
+ print_test_header(test, printer)
+
+ expected_count = test.expected_count
+ subtests = []
+ test_num = 1
+ while parent_test and (expected_count is None or test_num <= expected_count):
+ # Loop to parse any subtests.
+ # Break after parsing expected number of tests or
+ # if expected number of tests is unknown break when test
+ # result line with matching name to subtest header is found
+ # or no more lines in stream.
+ sub_log = parse_diagnostic(lines)
+ sub_test = Test()
+ if not lines or (peek_test_name_match(lines, test) and
+ is_subtest):
+ if expected_count and test_num <= expected_count:
+ # If parser reaches end of test before
+ # parsing expected number of subtests, print
+ # crashed subtest and record error
+ test.add_error(printer, 'missing expected subtest!')
+ sub_test.log.extend(sub_log)
+ test.counts.add_status(
+ TestStatus.TEST_CRASHED)
+ print_test_result(sub_test, printer)
else:
- failed_tests += 1
- print_with_timestamp(red('[FAILED] ') + test_case.name)
- print_log(map(yellow, test_case.log))
- print_with_timestamp('')
- return total_tests, failed_tests, crashed_tests
-
-def parse_run_tests(kernel_output) -> TestResult:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
- test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
- if test_result.status == TestStatus.NO_TESTS:
- print(red('[ERROR] ') + yellow('no tests run!'))
- elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
- print(red('[ERROR] ') + yellow('could not parse test results!'))
+ test.log.extend(sub_log)
+ break
+ else:
+ sub_test = parse_test(lines, test_num, sub_log, True, printer)
+ subtests.append(sub_test)
+ test_num += 1
+ test.subtests = subtests
+ if is_subtest:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+ if test.name != "" and not peek_test_name_match(lines, test):
+ test.add_error(printer, 'missing subtest result line!')
+ elif not lines:
+ print_log(test.log, printer)
+ test.status = TestStatus.NO_TESTS
+ test.add_error(printer, 'No more test results!')
+ else:
+ parse_test_result(lines, test, expected_num, printer)
+
+ # Check for there being no subtests within parent test
+ if parent_test and len(subtests) == 0:
+ # Don't override a bad status if this test had one reported.
+ # Assumption: no subtests means CRASHED is from Test.__init__()
+ if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
+ print_log(test.log, printer)
+ test.status = TestStatus.NO_TESTS
+ test.add_error(printer, '0 tests run!')
+
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+ if parent_test and is_subtest:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test, printer)
+ elif is_subtest:
+ print_test_result(test, printer)
+ return test
+
+def parse_run_tests(kernel_output: Iterable[str], printer: Printer) -> Test:
+ """
+ Using kernel output, extract KTAP lines, parse the lines for test
+ results and print condensed test results and summary line.
+
+ Parameters:
+ kernel_output - Iterable object contains lines of kernel output
+ printer - Printer object to output results
+
+ Return:
+ Test - the main test object with all subtests.
+ """
+ printer.print_with_timestamp(DIVIDER)
+ lines = extract_tap_lines(kernel_output)
+ test = Test()
+ if not lines:
+ test.name = '<missing>'
+ test.add_error(printer, 'Could not find any KTAP output. Did any KUnit tests run?')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
- (total_tests,
- failed_tests,
- crashed_tests) = print_and_count_results(test_result)
- print_with_timestamp(DIVIDER)
- fmt = green if test_result.status == TestStatus.SUCCESS else red
- print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
- (total_tests, failed_tests, crashed_tests)))
- return test_result
+ test = parse_test(lines, 0, [], False, printer)
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
+ printer.print_with_timestamp(DIVIDER)
+ return test
diff --git a/tools/testing/kunit/kunit_printer.py b/tools/testing/kunit/kunit_printer.py
new file mode 100644
index 000000000000..ca119f61fe79
--- /dev/null
+++ b/tools/testing/kunit/kunit_printer.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Utilities for printing and coloring output.
+#
+# Copyright (C) 2022, Google LLC.
+# Author: Daniel Latypov <dlatypov@google.com>
+
+import datetime
+import sys
+import typing
+
+_RESET = '\033[0;0m'
+
+class Printer:
+ """Wraps a file object, providing utilities for coloring output, etc."""
+
+ def __init__(self, print: bool=True, output: typing.IO[str]=sys.stdout):
+ self._output = output
+ self._print = print
+ if print:
+ self._use_color = output.isatty()
+ else:
+ self._use_color = False
+
+ def print(self, message: str) -> None:
+ if self._print:
+ print(message, file=self._output)
+
+ def print_with_timestamp(self, message: str) -> None:
+ ts = datetime.datetime.now().strftime('%H:%M:%S')
+ self.print(f'[{ts}] {message}')
+
+ def _color(self, code: str, text: str) -> str:
+ if not self._use_color:
+ return text
+ return code + text + _RESET
+
+ def red(self, text: str) -> str:
+ return self._color('\033[1;31m', text)
+
+ def yellow(self, text: str) -> str:
+ return self._color('\033[1;33m', text)
+
+ def green(self, text: str) -> str:
+ return self._color('\033[1;32m', text)
+
+ def color_len(self) -> int:
+ """Returns the length of the color escape codes."""
+ return len(self.red(''))
+
+# Provides a default instance that prints to stdout
+stdout = Printer()
+null_printer = Printer(print=False)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index b593f4448e83..bbba921e0eac 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -11,26 +11,33 @@ from unittest import mock
import tempfile, shutil # Handling test_tmpdir
+import itertools
import json
import os
+import signal
+import subprocess
+from typing import Iterable
import kunit_config
import kunit_parser
import kunit_kernel
import kunit_json
import kunit
+from kunit_printer import stdout
test_tmpdir = ''
+abs_test_data_dir = ''
def setUpModule():
- global test_tmpdir
+ global test_tmpdir, abs_test_data_dir
test_tmpdir = tempfile.mkdtemp()
+ abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data'))
def tearDownModule():
shutil.rmtree(test_tmpdir)
-def get_absolute_path(path):
- return os.path.join(os.path.dirname(__file__), path)
+def test_data_path(path):
+ return os.path.join(abs_test_data_dir, path)
class KconfigTest(unittest.TestCase):
@@ -39,365 +46,814 @@ class KconfigTest(unittest.TestCase):
self.assertTrue(kconfig0.is_subset_of(kconfig0))
kconfig1 = kunit_config.Kconfig()
- kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y'))
+ kconfig1.add_entry('TEST', 'y')
self.assertTrue(kconfig1.is_subset_of(kconfig1))
self.assertTrue(kconfig0.is_subset_of(kconfig1))
self.assertFalse(kconfig1.is_subset_of(kconfig0))
def test_read_from_file(self):
- kconfig = kunit_config.Kconfig()
- kconfig_path = get_absolute_path(
- 'test_data/test_read_from_file.kconfig')
+ kconfig_path = test_data_path('test_read_from_file.kconfig')
- kconfig.read_from_file(kconfig_path)
+ kconfig = kunit_config.parse_file(kconfig_path)
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
-
- self.assertEqual(kconfig.entries(), expected_kconfig.entries())
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
+
+ self.assertEqual(kconfig, expected_kconfig)
def test_write_to_file(self):
kconfig_path = os.path.join(test_tmpdir, '.config')
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
expected_kconfig.write_to_file(kconfig_path)
- actual_kconfig = kunit_config.Kconfig()
- actual_kconfig.read_from_file(kconfig_path)
-
- self.assertEqual(actual_kconfig.entries(),
- expected_kconfig.entries())
+ actual_kconfig = kunit_config.parse_file(kconfig_path)
+ self.assertEqual(actual_kconfig, expected_kconfig)
class KUnitParserTest(unittest.TestCase):
+ def setUp(self):
+ self.print_mock = mock.patch('kunit_printer.Printer.print').start()
+ self.addCleanup(mock.patch.stopall)
- def assertContains(self, needle, haystack):
- for line in haystack:
+ def noPrintCallContains(self, substr: str):
+ for call in self.print_mock.mock_calls:
+ self.assertNotIn(substr, call.args[0])
+
+ def assertContains(self, needle: str, haystack: kunit_parser.LineStream):
+ # Clone the iterator so we can print the contents on failure.
+ copy, backup = itertools.tee(haystack)
+ for line in copy:
if needle in line:
return
- raise AssertionError('"' +
- str(needle) + '" not found in "' + str(haystack) + '"!')
+ raise AssertionError(f'"{needle}" not found in {list(backup)}!')
def test_output_isolated_correctly(self):
- log_path = get_absolute_path(
- 'test_data/test_output_isolated_correctly.log')
- file = open(log_path)
- result = kunit_parser.isolate_kunit_output(file.readlines())
+ log_path = test_data_path('test_output_isolated_correctly.log')
+ with open(log_path) as file:
+ result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: example', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - example_simple_test', result)
- self.assertContains(' ok 2 - example_mock_test', result)
+ self.assertContains('# Subtest: example', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - example_simple_test', result)
+ self.assertContains('ok 2 - example_mock_test', result)
self.assertContains('ok 1 - example', result)
- file.close()
def test_output_with_prefix_isolated_correctly(self):
- log_path = get_absolute_path(
- 'test_data/test_pound_sign.log')
+ log_path = test_data_path('test_pound_sign.log')
with open(log_path) as file:
- result = kunit_parser.isolate_kunit_output(file.readlines())
+ result = kunit_parser.extract_tap_lines(file.readlines())
self.assertContains('TAP version 14', result)
- self.assertContains(' # Subtest: kunit-resource-test', result)
- self.assertContains(' 1..5', result)
- self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
- self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
- self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
- self.assertContains(' foo bar #', result)
- self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
- self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('# Subtest: kunit-resource-test', result)
+ self.assertContains('1..5', result)
+ self.assertContains('ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains('foo bar #', result)
+ self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result)
self.assertContains('ok 1 - kunit-resource-test', result)
- self.assertContains(' foo bar # non-kunit output', result)
- self.assertContains(' # Subtest: kunit-try-catch-test', result)
- self.assertContains(' 1..2', result)
- self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ self.assertContains('foo bar # non-kunit output', result)
+ self.assertContains('# Subtest: kunit-try-catch-test', result)
+ self.assertContains('1..2', result)
+ self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch',
result)
- self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
result)
self.assertContains('ok 2 - kunit-try-catch-test', result)
- self.assertContains(' # Subtest: string-stream-test', result)
- self.assertContains(' 1..3', result)
- self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
- self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
- self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('# Subtest: string-stream-test', result)
+ self.assertContains('1..3', result)
+ self.assertContains('ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains('ok 3 - string_stream_test_get_string', result)
self.assertContains('ok 3 - string-stream-test', result)
def test_parse_successful_test_log(self):
- all_passed_log = get_absolute_path(
- 'test_data/test_is_test_passed-all_passed.log')
- file = open(all_passed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- file.close()
+ all_passed_log = test_data_path('test_is_test_passed-all_passed.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts.errors, 0)
+
+ def test_parse_successful_nested_tests_log(self):
+ all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
+ with open(all_passed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts.errors, 0)
+
+ def test_kselftest_nested(self):
+ kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
+ with open(kselftest_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts.errors, 0)
def test_parse_failed_test_log(self):
- failed_log = get_absolute_path(
- 'test_data/test_is_test_passed-failure.log')
- file = open(failed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.FAILURE,
- result.status)
- file.close()
+ failed_log = test_data_path('test_is_test_passed-failure.log')
+ with open(failed_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
+ self.assertEqual(result.counts.errors, 0)
+
+ def test_no_header(self):
+ empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
+ with open(empty_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()), stdout)
+ self.assertEqual(0, len(result.subtests))
+ self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status)
+ self.assertEqual(result.counts.errors, 1)
+
+ def test_missing_test_plan(self):
+ missing_plan_log = test_data_path('test_is_test_passed-'
+ 'missing_plan.log')
+ with open(missing_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(
+ file.readlines()), stdout)
+ # A missing test plan is not an error.
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0))
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
def test_no_tests(self):
- empty_log = get_absolute_path(
- 'test_data/test_is_test_passed-no_tests_run.log')
- file = open(empty_log)
- result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
- self.assertEqual(0, len(result.suites))
+ header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
+ with open(header_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()), stdout)
+ self.assertEqual(0, len(result.subtests))
+ self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status)
+ self.assertEqual(result.counts.errors, 1)
+
+ def test_no_tests_no_plan(self):
+ no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log')
+ with open(no_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()), stdout)
+ self.assertEqual(0, len(result.subtests[0].subtests[0].subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
- result.status)
- file.close()
+ result.subtests[0].subtests[0].status)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=1))
+
def test_no_kunit_output(self):
- crash_log = get_absolute_path(
- 'test_data/test_insufficient_memory.log')
- file = open(crash_log)
- print_mock = mock.patch('builtins.print').start()
- result = kunit_parser.parse_run_tests(
- kunit_parser.isolate_kunit_output(file.readlines()))
- print_mock.assert_any_call(StrContains('no tests run!'))
+ crash_log = test_data_path('test_insufficient_memory.log')
+ print_mock = mock.patch('kunit_printer.Printer.print').start()
+ with open(crash_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()), stdout)
+ print_mock.assert_any_call(StrContains('Could not find any KTAP output.'))
print_mock.stop()
- file.close()
+ self.assertEqual(0, len(result.subtests))
+ self.assertEqual(result.counts.errors, 1)
+
+ def test_skipped_test(self):
+ skipped_log = test_data_path('test_skip_tests.log')
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=4, skipped=1))
+
+ def test_skipped_all_tests(self):
+ skipped_log = test_data_path('test_skip_all_tests.log')
+ with open(skipped_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
- def test_crashed_test(self):
- crashed_log = get_absolute_path(
- 'test_data/test_is_test_passed-crash.log')
- file = open(crashed_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5))
+
+ def test_ignores_hyphen(self):
+ hyphen_log = test_data_path('test_strip_hyphen.log')
+ with open(hyphen_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(
+ "sysctl_test",
+ result.subtests[0].name)
self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
- result.status)
- file.close()
+ "example",
+ result.subtests[1].name)
def test_ignores_prefix_printk_time(self):
- prefix_log = get_absolute_path(
- 'test_data/test_config_printk_time.log')
+ prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertEqual(result.counts.errors, 0)
def test_ignores_multiple_prefixes(self):
- prefix_log = get_absolute_path(
- 'test_data/test_multiple_prefixes.log')
+ prefix_log = test_data_path('test_multiple_prefixes.log')
with open(prefix_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertEqual(result.counts.errors, 0)
def test_prefix_mixed_kernel_output(self):
- mixed_prefix_log = get_absolute_path(
- 'test_data/test_interrupted_tap_output.log')
+ mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
with open(mixed_prefix_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertEqual(result.counts.errors, 0)
def test_prefix_poundsign(self):
- pound_log = get_absolute_path('test_data/test_pound_sign.log')
+ pound_log = test_data_path('test_pound_sign.log')
with open(pound_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertEqual(result.counts.errors, 0)
def test_kernel_panic_end(self):
- panic_log = get_absolute_path('test_data/test_kernel_panic_interrupt.log')
+ panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertGreaterEqual(result.counts.errors, 1)
def test_pound_no_prefix(self):
- pound_log = get_absolute_path('test_data/test_pound_no_prefix.log')
+ pound_log = test_data_path('test_pound_no_prefix.log')
with open(pound_log) as file:
- result = kunit_parser.parse_run_tests(file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.SUCCESS,
- result.status)
- self.assertEqual('kunit-resource-test', result.suites[0].name)
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+ self.assertEqual(result.counts.errors, 0)
+
+ def test_summarize_failures(self):
+ output = """
+ KTAP version 1
+ 1..2
+ # Subtest: all_failed_suite
+ 1..2
+ not ok 1 - test1
+ not ok 2 - test2
+ not ok 1 - all_failed_suite
+ # Subtest: some_failed_suite
+ 1..2
+ ok 1 - test1
+ not ok 2 - test2
+ not ok 1 - some_failed_suite
+ """
+ result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
+
+ self.assertEqual(kunit_parser._summarize_failed_tests(result),
+ 'Failures: all_failed_suite, some_failed_suite.test2')
+
+ def test_ktap_format(self):
+ ktap_log = test_data_path('test_parse_ktap_output.log')
+ with open(ktap_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3))
+ self.assertEqual('suite', result.subtests[0].name)
+ self.assertEqual('case_1', result.subtests[0].subtests[0].name)
+ self.assertEqual('case_2', result.subtests[0].subtests[1].name)
+
+ def test_parse_subtest_header(self):
+ ktap_log = test_data_path('test_parse_subtest_header.log')
+ with open(ktap_log) as file:
+ kunit_parser.parse_run_tests(file.readlines(), stdout)
+ self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
+
+ def test_parse_attributes(self):
+ ktap_log = test_data_path('test_parse_attributes.log')
+ with open(ktap_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines(), stdout)
+
+ # Test should pass with no errors
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=0))
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+
+ # Ensure suite header is parsed correctly
+ self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
+
+ # Ensure attributes in correct test log
+ self.assertContains('# module: example', result.subtests[0].log)
+ self.assertContains('# test.speed: slow', result.subtests[0].subtests[0].log)
+
+ def test_show_test_output_on_failure(self):
+ output = """
+ KTAP version 1
+ 1..1
+ Test output.
+ Indented more.
+ not ok 1 test1
+ """
+ result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
+ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
+
+ self.print_mock.assert_any_call(StrContains('Test output.'))
+ self.print_mock.assert_any_call(StrContains(' Indented more.'))
+ self.noPrintCallContains('not ok 1 test1')
+
+ def test_parse_late_test_plan(self):
+ output = """
+ TAP version 13
+ ok 4 test4
+ 1..4
+ """
+ result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
+ # Missing test results after test plan should alert a suspected test crash.
+ self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
+ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=2))
+
+def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+ return kunit_parser.LineStream(enumerate(strs, start=1))
+
+class LineStreamTest(unittest.TestCase):
+
+ def test_basic(self):
+ stream = line_stream_from_strs(['hello', 'world'])
+
+ self.assertTrue(stream, msg='Should be more input')
+ self.assertEqual(stream.line_number(), 1)
+ self.assertEqual(stream.peek(), 'hello')
+ self.assertEqual(stream.pop(), 'hello')
+
+ self.assertTrue(stream, msg='Should be more input')
+ self.assertEqual(stream.line_number(), 2)
+ self.assertEqual(stream.peek(), 'world')
+ self.assertEqual(stream.pop(), 'world')
+
+ self.assertFalse(stream, msg='Should be no more input')
+ with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'):
+ stream.pop()
+
+ def test_is_lazy(self):
+ called_times = 0
+ def generator():
+ nonlocal called_times
+ for _ in range(1,5):
+ called_times += 1
+ yield called_times, str(called_times)
+
+ stream = kunit_parser.LineStream(generator())
+ self.assertEqual(called_times, 0)
+
+ self.assertEqual(stream.pop(), '1')
+ self.assertEqual(called_times, 1)
+
+ self.assertEqual(stream.pop(), '2')
+ self.assertEqual(called_times, 2)
+
+class LinuxSourceTreeTest(unittest.TestCase):
+
+ def setUp(self):
+ mock.patch.object(signal, 'signal').start()
+ self.addCleanup(mock.patch.stopall)
+
+ def test_invalid_kunitconfig(self):
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'):
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file'])
+
+ def test_valid_kunitconfig(self):
+ with tempfile.NamedTemporaryFile('wt') as kunitconfig:
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name])
+
+ def test_dir_kunitconfig(self):
+ with tempfile.TemporaryDirectory('') as dir:
+ with open(os.path.join(dir, '.kunitconfig'), 'w'):
+ pass
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir])
+
+ def test_multiple_kunitconfig(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('KUNIT', 'y')
+ want_kconfig.add_entry('KUNIT_TEST', 'm')
+
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT_TEST=m')
+ pass
+
+ tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
+
+
+ def test_multiple_kunitconfig_invalid(self):
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT=m')
+
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'):
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+
+
+ def test_kconfig_add(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('NOT_REAL', 'y')
+
+ tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y'])
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
+
+ def test_invalid_arch(self):
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'):
+ kunit_kernel.LinuxSourceTree('', arch='invalid')
+
+ def test_run_kernel_hits_exception(self):
+ def fake_start(unused_args, unused_build_dir):
+ return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
+
+ with tempfile.TemporaryDirectory('') as build_dir:
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
+
+ with self.assertRaises(ValueError):
+ for line in tree.run_kernel(build_dir=build_dir):
+ self.assertEqual(line, 'hi\n')
+ raise ValueError('uh oh, did not read all output')
+
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ # Should generate the .config
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ mock_build_config.assert_called_once_with(build_dir, [])
+
+ def test_build_reconfig_existing_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ # Existing .config is a superset, should not touch it
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ self.assertEqual(mock_build_config.call_count, 0)
+
+ def test_build_reconfig_remove_option(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig...
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+ with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ # ... so we should trigger a call to build_config()
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ mock_build_config.assert_called_once_with(build_dir, [])
+
+ # TODO: add more test cases.
+
class KUnitJsonTest(unittest.TestCase):
+ def setUp(self):
+ self.print_mock = mock.patch('kunit_printer.Printer.print').start()
+ self.addCleanup(mock.patch.stopall)
def _json_for(self, log_file):
- with(open(get_absolute_path(log_file))) as file:
- test_result = kunit_parser.parse_run_tests(file)
+ with open(test_data_path(log_file)) as file:
+ test_result = kunit_parser.parse_run_tests(file, stdout)
json_obj = kunit_json.get_json_result(
- test_result=test_result,
- def_config='kunit_defconfig',
- build_dir=None,
- json_path='stdout')
+ test=test_result,
+ metadata=kunit_json.Metadata())
return json.loads(json_obj)
def test_failed_test_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-failure.log')
+ result = self._json_for('test_is_test_passed-failure.log')
self.assertEqual(
{'name': 'example_simple_test', 'status': 'FAIL'},
result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-crash.log')
+ result = self._json_for('test_kernel_panic_interrupt.log')
self.assertEqual(
- {'name': 'example_simple_test', 'status': 'ERROR'},
- result["sub_groups"][1]["test_cases"][0])
+ {'name': '', 'status': 'ERROR'},
+ result["sub_groups"][2]["test_cases"][1])
+
+ def test_skipped_test_json(self):
+ result = self._json_for('test_skip_tests.log')
+ self.assertEqual(
+ {'name': 'example_skip_test', 'status': 'SKIP'},
+ result["sub_groups"][1]["test_cases"][1])
def test_no_tests_json(self):
- result = self._json_for(
- 'test_data/test_is_test_passed-no_tests_run.log')
+ result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
self.assertEqual(0, len(result['sub_groups']))
+ def test_nested_json(self):
+ result = self._json_for('test_is_test_passed-all_passed_nested.log')
+ self.assertEqual(
+ {'name': 'example_simple_test', 'status': 'PASS'},
+ result["sub_groups"][0]["sub_groups"][0]["test_cases"][0])
+
class StrContains(str):
def __eq__(self, other):
return self in other
class KUnitMainTest(unittest.TestCase):
def setUp(self):
- path = get_absolute_path('test_data/test_is_test_passed-all_passed.log')
- file = open(path)
- all_passed_log = file.readlines()
- self.print_patch = mock.patch('builtins.print')
- self.print_mock = self.print_patch.start()
- self.linux_source_mock = mock.Mock()
- self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
- self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True)
- self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
-
- def tearDown(self):
- self.print_patch.stop()
- pass
+ path = test_data_path('test_is_test_passed-all_passed.log')
+ with open(path) as file:
+ all_passed_log = file.readlines()
+
+ self.print_mock = mock.patch('kunit_printer.Printer.print').start()
+ self.addCleanup(mock.patch.stopall)
+
+ self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start()
+ self.linux_source_mock = self.mock_linux_init.return_value
+ self.linux_source_mock.build_reconfig.return_value = True
+ self.linux_source_mock.build_kernel.return_value = True
+ self.linux_source_mock.run_kernel.return_value = all_passed_log
def test_config_passes_args_pass(self):
- kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 0
+ kunit.main(['config', '--build_dir=.kunit'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_build_passes_args_pass(self):
- kunit.main(['build'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 0
- self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '.kunit', None)
- assert self.linux_source_mock.run_kernel.call_count == 0
+ kunit.main(['build'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.build_kernel.assert_called_once_with(kunit.get_default_jobs(), '.kunit', None)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
- kunit.main(['exec'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 0
- assert self.linux_source_mock.run_kernel.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=300)
+ kunit.main(['exec'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
- kunit.main(['run'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
+ kunit.main(['run'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir='.kunit', timeout=300)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_exec_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['exec'], self.linux_source_mock)
- assert type(e.exception) == SystemExit
- assert e.exception.code == 1
+ kunit.main(['exec'])
+ self.assertEqual(e.exception.code, 1)
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['run'], self.linux_source_mock)
- assert type(e.exception) == SystemExit
- assert e.exception.code == 1
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
- self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ kunit.main(['run'])
+ self.assertEqual(e.exception.code, 1)
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ self.print_mock.assert_any_call(StrContains('Could not find any KTAP output.'))
+
+ def test_exec_no_tests(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0'])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run'])
+ self.assertEqual(e.exception.code, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['exec', '--raw_output'], self.linux_source_mock)
- assert self.linux_source_mock.run_kernel.call_count == 1
- for kall in self.print_mock.call_args_list:
- assert kall != mock.call(StrContains('Testing complete.'))
- assert kall != mock.call(StrContains(' 0 tests run'))
+ kunit.main(['exec', '--raw_output'])
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ for call in self.print_mock.call_args_list:
+ self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
- assert self.linux_source_mock.run_kernel.call_count == 1
- for kall in self.print_mock.call_args_list:
- assert kall != mock.call(StrContains('Testing complete.'))
- assert kall != mock.call(StrContains(' 0 tests run'))
+ kunit.main(['run', '--raw_output'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ for call in self.print_mock.call_args_list:
+ self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!')))
+
+ def test_run_raw_output_kunit(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output=kunit'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ for call in self.print_mock.call_args_list:
+ self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
+ self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+
+ def test_run_raw_output_invalid(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run', '--raw_output=invalid'])
+ self.assertNotEqual(e.exception.code, 0)
+
+ def test_run_raw_output_does_not_take_positional_args(self):
+ # --raw_output is a string flag, but we don't want it to consume
+ # any positional arguments, only ones after an '='
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300)
def test_exec_timeout(self):
timeout = 3453
- kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=timeout)
+ kunit.main(['exec', '--timeout', str(timeout)])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
- kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ kunit.main(['run', '--timeout', str(timeout)])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir='.kunit', timeout=timeout)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
build_dir = '.kunit'
- kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ kunit.main(['run', '--build_dir=.kunit'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- build_dir=build_dir, timeout=300)
+ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_config_builddir(self):
build_dir = '.kunit'
- kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
- assert self.linux_source_mock.build_reconfig.call_count == 1
+ kunit.main(['config', '--build_dir', build_dir])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
def test_build_builddir(self):
build_dir = '.kunit'
- kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
- self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None)
+ jobs = kunit.get_default_jobs()
+ kunit.main(['build', '--build_dir', build_dir])
+ self.linux_source_mock.build_kernel.assert_called_once_with(jobs, build_dir, None)
def test_exec_builddir(self):
build_dir = '.kunit'
- kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
+ kunit.main(['exec', '--build_dir', build_dir])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_kunitconfig(self):
+ kunit.main(['run', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_config_kunitconfig(self):
+ kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_config_alltests(self):
+ kunit.main(['config', '--kunitconfig=mykunitconfig', '--alltests'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[kunit_kernel.ALL_TESTS_CONFIG_PATH, 'mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+
+ @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
+ def test_run_multiple_kunitconfig(self, mock_linux_init):
+ mock_linux_init.return_value = self.linux_source_mock
+ kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other'])
+ # Just verify that we parsed and initialized it correctly here.
+ mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig', 'other'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_run_kconfig_add(self):
+ kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[],
+ kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'],
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_run_qemu_args(self):
+ kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[],
+ kconfig_add=None,
+ arch='x86_64',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=['-m', '2048'])
+
+ def test_run_kernel_args(self):
+ kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
+ def test_list_tests(self):
+ want = ['suite.test1', 'suite.test2', 'suite2.test1']
+ self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
+
+ got = kunit._list_tests(self.linux_source_mock,
+ kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
+ self.assertEqual(got, want)
+ # Should respect the user's filter glob when listing tests.
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300)
+
+ @mock.patch.object(kunit, '_list_tests')
+ def test_run_isolated_by_suite(self, mock_tests):
+ mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
+ kunit.main(['exec', '--run_isolated=suite', 'suite*.test*'])
+
+ # Should respect the user's filter glob when listing tests.
+ mock_tests.assert_called_once_with(mock.ANY,
+ kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
+ self.linux_source_mock.run_kernel.assert_has_calls([
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
+ ])
+
+ @mock.patch.object(kunit, '_list_tests')
+ def test_run_isolated_by_test(self, mock_tests):
+ mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
+ kunit.main(['exec', '--run_isolated=test', 'suite*'])
+
+ # Should respect the user's filter glob when listing tests.
+ mock_tests.assert_called_once_with(mock.ANY,
+ kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
+ self.linux_source_mock.run_kernel.assert_has_calls([
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300),
+ ])
+
if __name__ == '__main__':
unittest.main()
diff --git a/tools/testing/kunit/mypy.ini b/tools/testing/kunit/mypy.ini
new file mode 100644
index 000000000000..ddd288309efa
--- /dev/null
+++ b/tools/testing/kunit/mypy.ini
@@ -0,0 +1,6 @@
+[mypy]
+strict = True
+
+# E.g. we can't write subprocess.Popen[str] until Python 3.9+.
+# But kunit.py tries to support Python 3.7+, so let's disable it.
+disable_error_code = type-arg
diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py
new file mode 100644
index 000000000000..b1fba9016eed
--- /dev/null
+++ b/tools/testing/kunit/qemu_config.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Collection of configs for building non-UML kernels and running them on QEMU.
+#
+# Copyright (C) 2021, Google LLC.
+# Author: Brendan Higgins <brendanhiggins@google.com>
+
+from dataclasses import dataclass
+from typing import List
+
+
+@dataclass(frozen=True)
+class QemuArchParams:
+ linux_arch: str
+ kconfig: str
+ qemu_arch: str
+ kernel_path: str
+ kernel_command_line: str
+ extra_qemu_params: List[str]
+ serial: str = 'stdio'
diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py
new file mode 100644
index 000000000000..3ac846e03a6b
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/alpha.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='alpha',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='alpha',
+ kernel_path='arch/alpha/boot/vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py
new file mode 100644
index 000000000000..db2160200566
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/arm.py
@@ -0,0 +1,13 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='arm',
+ kconfig='''
+CONFIG_ARCH_VIRT=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
+ qemu_arch='arm',
+ kernel_path='arch/arm/boot/zImage',
+ kernel_command_line='console=ttyAMA0',
+ extra_qemu_params=['-machine', 'virt'])
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
new file mode 100644
index 000000000000..5c44d3a87e6d
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -0,0 +1,12 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='arm64',
+ kconfig='''
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
+ qemu_arch='aarch64',
+ kernel_path='arch/arm64/boot/Image.gz',
+ kernel_command_line='console=ttyAMA0',
+ extra_qemu_params=['-machine', 'virt', '-cpu', 'max'])
diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py
new file mode 100644
index 000000000000..4463ebefd567
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/i386.py
@@ -0,0 +1,10 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='i386',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='i386',
+ kernel_path='arch/x86/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/loongarch.py b/tools/testing/kunit/qemu_configs/loongarch.py
new file mode 100644
index 000000000000..a92422967d1d
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/loongarch.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='loongarch',
+ kconfig='''
+CONFIG_EFI_STUB=n
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_PCI=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+''',
+ qemu_arch='loongarch64',
+ kernel_path='arch/loongarch/boot/vmlinux.elf',
+ kernel_command_line='console=ttyS0 kunit_shutdown=poweroff',
+ extra_qemu_params=[
+ '-machine', 'virt',
+ '-device', 'pvpanic-pci',
+ '-cpu', 'max',])
diff --git a/tools/testing/kunit/qemu_configs/m68k.py b/tools/testing/kunit/qemu_configs/m68k.py
new file mode 100644
index 000000000000..287fc386f8a7
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/m68k.py
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='m68k',
+ kconfig='''
+CONFIG_VIRT=y''',
+ qemu_arch='m68k',
+ kernel_path='vmlinux',
+ kernel_command_line='console=hvc0',
+ extra_qemu_params=['-machine', 'virt'])
diff --git a/tools/testing/kunit/qemu_configs/mips.py b/tools/testing/kunit/qemu_configs/mips.py
new file mode 100644
index 000000000000..8899ac157b30
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips.py
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_32BIT=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta'])
diff --git a/tools/testing/kunit/qemu_configs/mips64.py b/tools/testing/kunit/qemu_configs/mips64.py
new file mode 100644
index 000000000000..1478aed05b94
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips64.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_64BIT=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips64',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta', '-cpu', '5KEc'])
diff --git a/tools/testing/kunit/qemu_configs/mips64el.py b/tools/testing/kunit/qemu_configs/mips64el.py
new file mode 100644
index 000000000000..300c711d7a82
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips64el.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_64BIT=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips64el',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta', '-cpu', '5KEc'])
diff --git a/tools/testing/kunit/qemu_configs/mipsel.py b/tools/testing/kunit/qemu_configs/mipsel.py
new file mode 100644
index 000000000000..3d3543315b45
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mipsel.py
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_32BIT=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mipsel',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta'])
diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py
new file mode 100644
index 000000000000..5b4c895d5d5a
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/powerpc.py
@@ -0,0 +1,13 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='powerpc',
+ kconfig='''
+CONFIG_PPC64=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HVC_CONSOLE=y''',
+ qemu_arch='ppc64',
+ kernel_path='vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'pseries', '-cpu', 'power8'])
diff --git a/tools/testing/kunit/qemu_configs/powerpc32.py b/tools/testing/kunit/qemu_configs/powerpc32.py
new file mode 100644
index 000000000000..88bd60dbb948
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/powerpc32.py
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='powerpc',
+ kconfig='''
+CONFIG_PPC32=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_ADB_CUDA=y
+CONFIG_SERIAL_PMACZILOG=y
+CONFIG_SERIAL_PMACZILOG_TTYS=y
+CONFIG_SERIAL_PMACZILOG_CONSOLE=y
+''',
+ qemu_arch='ppc',
+ kernel_path='vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'g3beige', '-cpu', 'max'])
diff --git a/tools/testing/kunit/qemu_configs/powerpcle.py b/tools/testing/kunit/qemu_configs/powerpcle.py
new file mode 100644
index 000000000000..7ddee8af4bd7
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/powerpcle.py
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='powerpc',
+ kconfig='''
+CONFIG_PPC64=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HVC_CONSOLE=y
+''',
+ qemu_arch='ppc64',
+ kernel_path='vmlinux',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'pseries', '-cpu', 'power8'])
diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py
new file mode 100644
index 000000000000..c87758030ff7
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/riscv.py
@@ -0,0 +1,28 @@
+from ..qemu_config import QemuArchParams
+import os
+import os.path
+import sys
+
+OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin'
+OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE
+
+if not os.path.isfile(OPENSBI_PATH):
+ print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n'
+ 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n')
+ sys.exit()
+
+QEMU_ARCH = QemuArchParams(linux_arch='riscv',
+ kconfig='''
+CONFIG_ARCH_VIRT=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_RISCV_SBI_V01=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''',
+ qemu_arch='riscv64',
+ kernel_path='arch/riscv/boot/Image',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[
+ '-machine', 'virt',
+ '-cpu', 'rv64',
+ '-bios', OPENSBI_PATH])
diff --git a/tools/testing/kunit/qemu_configs/riscv32.py b/tools/testing/kunit/qemu_configs/riscv32.py
new file mode 100644
index 000000000000..b79ba0ae30f8
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/riscv32.py
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='riscv',
+ kconfig='''
+CONFIG_NONPORTABLE=y
+CONFIG_ARCH_RV32I=y
+CONFIG_ARCH_VIRT=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+''',
+ qemu_arch='riscv32',
+ kernel_path='arch/riscv/boot/Image',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-machine', 'virt'])
diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py
new file mode 100644
index 000000000000..98fa4fb60c0a
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/s390.py
@@ -0,0 +1,14 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='s390',
+ kconfig='''
+CONFIG_EXPERT=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NUMA=y
+CONFIG_MODULES=y''',
+ qemu_arch='s390x',
+ kernel_path='arch/s390/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=[
+ '-machine', 's390-ccw-virtio',
+ '-cpu', 'qemu',])
diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py
new file mode 100644
index 000000000000..f00cb89fdef6
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/sh.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='sh',
+ kconfig='''
+CONFIG_CPU_SUBTYPE_SH7751R=y
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_SH_RTS7751R2D=y
+CONFIG_RTS7751R2D_PLUS=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_CMDLINE_EXTEND=y
+''',
+ qemu_arch='sh4',
+ kernel_path='arch/sh/boot/zImage',
+ kernel_command_line='console=ttySC1',
+ serial='null',
+ extra_qemu_params=[
+ '-machine', 'r2d',
+ '-serial', 'mon:stdio'])
diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
new file mode 100644
index 000000000000..2019550a1b69
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/sparc.py
@@ -0,0 +1,13 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='sparc',
+ kconfig='''
+CONFIG_KUNIT_FAULT_TEST=n
+CONFIG_SPARC32=y
+CONFIG_SERIAL_SUNZILOG=y
+CONFIG_SERIAL_SUNZILOG_CONSOLE=y
+''',
+ qemu_arch='sparc',
+ kernel_path='arch/sparc/boot/zImage',
+ kernel_command_line='console=ttyS0 mem=256M',
+ extra_qemu_params=['-m', '256'])
diff --git a/tools/testing/kunit/qemu_configs/sparc64.py b/tools/testing/kunit/qemu_configs/sparc64.py
new file mode 100644
index 000000000000..53d4e5a8c972
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/sparc64.py
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='sparc',
+ kconfig='''
+CONFIG_64BIT=y
+CONFIG_SPARC64=y
+CONFIG_PCI=y
+CONFIG_SERIAL_SUNSU=y
+CONFIG_SERIAL_SUNSU_CONSOLE=y
+''',
+ qemu_arch='sparc64',
+ kernel_path='arch/sparc/boot/image',
+ kernel_command_line='console=ttyS0 kunit_shutdown=poweroff',
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
new file mode 100644
index 000000000000..4a6bf4e048f5
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/x86_64.py
@@ -0,0 +1,12 @@
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='x86_64',
+ kconfig='''
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y''',
+ qemu_arch='x86_64',
+ kernel_path='arch/x86/boot/bzImage',
+ kernel_command_line='console=ttyS0',
+ # qboot is faster than SeaBIOS and doesn't mess up
+ # the terminal.
+ extra_qemu_params=['-bios', 'qboot.rom'])
diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py
new file mode 100755
index 000000000000..c6d494ea3373
--- /dev/null
+++ b/tools/testing/kunit/run_checks.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# This file runs some basic checks to verify kunit works.
+# It is only of interest if you're making changes to KUnit itself.
+#
+# Copyright (C) 2021, Google LLC.
+# Author: Daniel Latypov <dlatypov@google.com.com>
+
+from concurrent import futures
+import datetime
+import os
+import shutil
+import subprocess
+import sys
+import textwrap
+from typing import Dict, List, Sequence
+
+ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
+TIMEOUT = datetime.timedelta(minutes=5).total_seconds()
+
+commands: Dict[str, Sequence[str]] = {
+ 'kunit_tool_test.py': ['./kunit_tool_test.py'],
+ 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'],
+ 'pytype': ['/bin/sh', '-c', 'pytype *.py'],
+ 'mypy': ['mypy', '--config-file', 'mypy.ini', '--exclude', '_test.py$', '--exclude', 'qemu_configs/', '.'],
+}
+
+# The user might not have mypy or pytype installed, skip them if so.
+# Note: you can install both via `$ pip install mypy pytype`
+necessary_deps : Dict[str, str] = {
+ 'pytype': 'pytype',
+ 'mypy': 'mypy',
+}
+
+def main(argv: Sequence[str]) -> None:
+ if argv:
+ raise RuntimeError('This script takes no arguments')
+
+ future_to_name: Dict[futures.Future[None], str] = {}
+ executor = futures.ThreadPoolExecutor(max_workers=len(commands))
+ for name, argv in commands.items():
+ if name in necessary_deps and shutil.which(necessary_deps[name]) is None:
+ print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH')
+ continue
+ f = executor.submit(run_cmd, argv)
+ future_to_name[f] = name
+
+ has_failures = False
+ print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...')
+ for f in futures.as_completed(future_to_name.keys()):
+ name = future_to_name[f]
+ ex = f.exception()
+ if not ex:
+ print(f'{name}: PASSED')
+ continue
+
+ has_failures = True
+ if isinstance(ex, subprocess.TimeoutExpired):
+ print(f'{name}: TIMED OUT')
+ elif isinstance(ex, subprocess.CalledProcessError):
+ print(f'{name}: FAILED')
+ else:
+ print(f'{name}: unexpected exception: {ex}')
+ continue
+
+ output = ex.output
+ if output:
+ print(textwrap.indent(output.decode(), '> '))
+ executor.shutdown()
+
+ if has_failures:
+ sys.exit(1)
+
+
+def run_cmd(argv: Sequence[str]) -> None:
+ subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
new file mode 100644
index 000000000000..9d5b04fe43a6
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
@@ -0,0 +1,34 @@
+TAP version 14
+1..2
+ # Subtest: sysctl_test
+ 1..4
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # Subtest: example
+ 1..2
+ init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+ kunit example: all tests passed
+ ok 2 - example
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log
deleted file mode 100644
index 4d97f6708c4a..000000000000
--- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log
+++ /dev/null
@@ -1,70 +0,0 @@
-printk: console [tty0] enabled
-printk: console [mc-1] enabled
-TAP version 14
-1..2
- # Subtest: sysctl_test
- 1..8
- # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
- ok 1 - sysctl_test_dointvec_null_tbl_data
- # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
- ok 2 - sysctl_test_dointvec_table_maxlen_unset
- # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
- ok 3 - sysctl_test_dointvec_table_len_is_zero
- # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
- ok 4 - sysctl_test_dointvec_table_read_but_position_set
- # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
- ok 5 - sysctl_test_dointvec_happy_single_positive
- # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
- ok 6 - sysctl_test_dointvec_happy_single_negative
- # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
- ok 7 - sysctl_test_dointvec_single_less_int_min
- # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
- ok 8 - sysctl_test_dointvec_single_greater_int_max
-kunit sysctl_test: all tests passed
-ok 1 - sysctl_test
- # Subtest: example
- 1..2
-init_suite
- # example_simple_test: initializing
-Stack:
- 6016f7db 6f81bd30 6f81bdd0 60021450
- 6024b0e8 60021440 60018bbe 16f81bdc0
- 00000001 6f81bd30 6f81bd20 6f81bdd0
-Call Trace:
- [<6016f7db>] ? kunit_try_run_case+0xab/0xf0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0
- [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0
- [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0
- [<600189e0>] ? kunit_um_throw+0x0/0x180
- [<6016f730>] ? kunit_try_run_case+0x0/0xf0
- [<6016f600>] ? kunit_catch_run_case+0x0/0x130
- [<6016edd0>] ? kunit_vprintk+0x0/0x30
- [<6016ece0>] ? kunit_fail+0x0/0x40
- [<6016eca0>] ? kunit_abort+0x0/0x40
- [<6016ed20>] ? kunit_printk_emit+0x0/0xb0
- [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0
- [<6016f46e>] ? kunit_run_tests+0xce/0x260
- [<6005b390>] ? unregister_console+0x0/0x190
- [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20
- [<60001cbb>] ? do_one_initcall+0x0/0x197
- [<60001d47>] ? do_one_initcall+0x8c/0x197
- [<6005cd20>] ? irq_to_desc+0x0/0x30
- [<60002005>] ? kernel_init_freeable+0x1b3/0x272
- [<6005c5ec>] ? printk+0x0/0x9b
- [<601c0086>] ? kernel_init+0x26/0x160
- [<60014442>] ? new_thread_handler+0x82/0xc0
-
- # example_simple_test: kunit test case crashed!
- # example_simple_test: example_simple_test failed
- not ok 1 - example_simple_test
- # example_mock_test: initializing
- # example_mock_test: example_mock_test passed
- ok 2 - example_mock_test
-kunit example: one or more tests failed
-not ok 2 - example
-List of all partitions:
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
new file mode 100644
index 000000000000..30d9ef18bcec
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -0,0 +1,15 @@
+TAP version 13
+1..3
+# selftests: membarrier: membarrier_test_single_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 1 selftests: membarrier: membarrier_test_single_thread
+# selftests: membarrier: membarrier_test_multi_thread
+# TAP version 13
+# 1..2
+# ok 1 sys_membarrier available
+# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
+ok 2 selftests: membarrier: membarrier_test_multi_thread
+ok 3 # SKIP selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
new file mode 100644
index 000000000000..5cd17b7f818a
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
@@ -0,0 +1,31 @@
+KTAP version 1
+ # Subtest: sysctl_test
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+ # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
+ ok 2 - sysctl_test_dointvec_table_maxlen_unset
+ # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
+ ok 3 - sysctl_test_dointvec_table_len_is_zero
+ # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
+ ok 4 - sysctl_test_dointvec_table_read_but_position_set
+ # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
+ ok 5 - sysctl_test_dointvec_happy_single_positive
+ # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
+ ok 6 - sysctl_test_dointvec_happy_single_negative
+ # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
+ ok 7 - sysctl_test_dointvec_single_less_int_min
+ # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
+ ok 8 - sysctl_test_dointvec_single_greater_int_max
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..2
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 - example_simple_test
+ # example_mock_test: initializing
+ # example_mock_test: example_mock_test passed
+ ok 2 - example_mock_test
+kunit example: all tests passed
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
new file mode 100644
index 000000000000..4f81876ee6f1
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
@@ -0,0 +1,7 @@
+TAP version 14
+1..1
+ # Subtest: suite
+ 1..1
+ # Subtest: case
+ ok 1 - case
+ok 1 - suite
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
index ba69f5c94b75..ba69f5c94b75 100644
--- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
new file mode 100644
index 000000000000..5f48ee659d40
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
@@ -0,0 +1,2 @@
+TAP version 14
+1..0
diff --git a/tools/testing/kunit/test_data/test_parse_attributes.log b/tools/testing/kunit/test_data/test_parse_attributes.log
new file mode 100644
index 000000000000..1a13c371fe9d
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_parse_attributes.log
@@ -0,0 +1,9 @@
+KTAP version 1
+1..1
+ KTAP version 1
+ # Subtest: suite
+ # module: example
+ 1..1
+ # test.speed: slow
+ ok 1 test
+ok 1 suite \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log
new file mode 100644
index 000000000000..ccdf244e5303
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log
@@ -0,0 +1,8 @@
+KTAP version 1
+1..1
+ KTAP version 1
+ 1..3
+ ok 1 case_1
+ ok 2 case_2
+ ok 3 case_3
+ok 1 suite
diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log
new file mode 100644
index 000000000000..216631092e7b
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log
@@ -0,0 +1,7 @@
+KTAP version 1
+1..1
+ KTAP version 1
+ # Subtest: suite
+ 1..1
+ ok 1 test
+ok 1 suite \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_skip_all_tests.log b/tools/testing/kunit/test_data/test_skip_all_tests.log
new file mode 100644
index 000000000000..2ea6e6d14fff
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_skip_all_tests.log
@@ -0,0 +1,15 @@
+TAP version 14
+1..2
+ # Subtest: string-stream-test
+ 1..3
+ ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped
+ ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped
+ ok 3 - string_stream_test_get_string # SKIP all tests skipped
+ok 1 - string-stream-test # SKIP
+ # Subtest: example
+ 1..2
+ # example_simple_test: initializing
+ ok 1 - example_simple_test # SKIP all tests skipped
+ # example_skip_test: initializing
+ ok 2 - example_skip_test # SKIP this test should be skipped
+ok 2 - example # SKIP
diff --git a/tools/testing/kunit/test_data/test_skip_tests.log b/tools/testing/kunit/test_data/test_skip_tests.log
new file mode 100644
index 000000000000..79b326e31274
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_skip_tests.log
@@ -0,0 +1,15 @@
+TAP version 14
+1..2
+ # Subtest: string-stream-test
+ 1..3
+ ok 1 - string_stream_test_empty_on_creation
+ ok 2 - string_stream_test_not_empty_after_add
+ ok 3 - string_stream_test_get_string
+ok 1 - string-stream-test
+ # Subtest: example
+ 1..2
+ # example_simple_test: initializing
+ ok 1 - example_simple_test
+ # example_skip_test: initializing
+ ok 2 - example_skip_test # SKIP this test should be skipped
+ok 2 - example
diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log
new file mode 100644
index 000000000000..92ac7c24b374
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_strip_hyphen.log
@@ -0,0 +1,16 @@
+KTAP version 1
+1..2
+ # Subtest: sysctl_test
+ 1..1
+ # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
+ ok 1 - sysctl_test_dointvec_null_tbl_data
+kunit sysctl_test: all tests passed
+ok 1 - sysctl_test
+ # Subtest: example
+ 1..1
+init_suite
+ # example_simple_test: initializing
+ # example_simple_test: example_simple_test passed
+ ok 1 example_simple_test
+kunit example: all tests passed
+ok 2 example
diff --git a/tools/testing/memblock/.gitignore b/tools/testing/memblock/.gitignore
new file mode 100644
index 000000000000..4cc7cd5aac2b
--- /dev/null
+++ b/tools/testing/memblock/.gitignore
@@ -0,0 +1,5 @@
+main
+memblock.c
+linux/memblock.h
+asm/asm.h
+asm/cmpxchg.h
diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
new file mode 100644
index 000000000000..d80982ccdc20
--- /dev/null
+++ b/tools/testing/memblock/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Memblock simulator requires AddressSanitizer (libasan) and liburcu development
+# packages installed
+CFLAGS += -I. -I../../include -Wall -O2 -fsanitize=address \
+ -fsanitize=undefined -D CONFIG_PHYS_ADDR_T_64BIT
+LDFLAGS += -fsanitize=address -fsanitize=undefined
+TARGETS = main
+TEST_OFILES = tests/alloc_nid_api.o tests/alloc_helpers_api.o tests/alloc_api.o \
+ tests/basic_api.o tests/common.o tests/alloc_exact_nid_api.o
+DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o
+OFILES = main.o $(DEP_OFILES) $(TEST_OFILES)
+EXTR_SRC = ../../../mm/memblock.c
+
+ifeq ($(BUILD), 32)
+ CFLAGS += -m32
+ LDFLAGS += -m32
+endif
+
+# Process user parameters
+include scripts/Makefile.include
+
+main: $(OFILES)
+
+$(OFILES): include
+
+include: ../../../include/linux/memblock.h ../../include/linux/*.h \
+ ../../include/asm/*.h
+
+ @mkdir -p linux
+ test -L linux/memblock.h || ln -s ../../../../include/linux/memblock.h linux/memblock.h
+ test -L asm/asm.h || ln -s ../../../arch/x86/include/asm/asm.h asm/asm.h
+ test -L asm/cmpxchg.h || ln -s ../../../arch/x86/include/asm/cmpxchg.h asm/cmpxchg.h
+
+memblock.c: $(EXTR_SRC)
+ test -L memblock.c || ln -s $(EXTR_SRC) memblock.c
+
+clean:
+ $(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/asm.h asm/cmpxchg.h
+
+help:
+ @echo 'Memblock simulator'
+ @echo ''
+ @echo 'Available targets:'
+ @echo ' main - Build the memblock simulator'
+ @echo ' clean - Remove generated files and symlinks in the directory'
+ @echo ''
+ @echo 'Configuration:'
+ @echo ' make MEMBLOCK_DEBUG=1 - enable memblock_dbg() messages'
+ @echo ' make NUMA=1 - simulate enabled NUMA'
+ @echo ' make 32BIT_PHYS_ADDR_T=1 - Use 32 bit physical addresses'
+
+vpath %.c ../../lib
+
+.PHONY: clean include help
diff --git a/tools/testing/memblock/README b/tools/testing/memblock/README
new file mode 100644
index 000000000000..7ca437d81806
--- /dev/null
+++ b/tools/testing/memblock/README
@@ -0,0 +1,118 @@
+==================
+Memblock simulator
+==================
+
+Introduction
+============
+
+Memblock is a boot time memory allocator[1] that manages memory regions before
+the actual memory management is initialized. Its APIs allow to register physical
+memory regions, mark them as available or reserved, allocate a block of memory
+within the requested range and/or in specific NUMA node, and many more.
+
+Because it is used so early in the booting process, testing and debugging it is
+difficult. This test suite, usually referred as memblock simulator, is
+an attempt at testing the memblock mechanism. It runs one monolithic test that
+consist of a series of checks that exercise both the basic operations and
+allocation functionalities of memblock. The main data structure of the boot time
+memory allocator is initialized at the build time, so the checks here reuse its
+instance throughout the duration of the test. To ensure that tests don't affect
+each other, region arrays are reset in between.
+
+As this project uses the actual memblock code and has to run in user space,
+some of the kernel definitions were stubbed by the initial commit that
+introduced memblock simulator (commit 16802e55dea9 ("memblock tests: Add
+skeleton of the memblock simulator")) and a few preparation commits just
+before it. Most of them don't match the kernel implementation, so one should
+consult them first before making any significant changes to the project.
+
+Usage
+=====
+
+To run the tests, build the main target and run it:
+
+$ make && ./main
+
+A successful run produces no output. It is possible to control the behavior
+by passing options from command line. For example, to include verbose output,
+append the `-v` options when you run the tests:
+
+$ ./main -v
+
+This will print information about which functions are being tested and the
+number of test cases that passed.
+
+For the full list of options from command line, see `./main --help`.
+
+It is also possible to override different configuration parameters to change
+the test functions. For example, to simulate enabled NUMA, use:
+
+$ make NUMA=1
+
+For the full list of build options, see `make help`.
+
+Project structure
+=================
+
+The project has one target, main, which calls a group of checks for basic and
+allocation functions. Tests for each group are defined in dedicated files, as it
+can be seen here:
+
+memblock
+|-- asm ------------------,
+|-- lib |-- implement function and struct stubs
+|-- linux ------------------'
+|-- scripts
+| |-- Makefile.include -- handles `make` parameters
+|-- tests
+| |-- alloc_api.(c|h) -- memblock_alloc tests
+| |-- alloc_helpers_api.(c|h) -- memblock_alloc_from tests
+| |-- alloc_nid_api.(c|h) -- memblock_alloc_try_nid tests
+| |-- basic_api.(c|h) -- memblock_add/memblock_reserve/... tests
+| |-- common.(c|h) -- helper functions for resetting memblock;
+|-- main.c --------------. dummy physical memory definition
+|-- Makefile `- test runner
+|-- README
+|-- TODO
+|-- .gitignore
+
+Simulating physical memory
+==========================
+
+Some allocation functions clear the memory in the process, so it is required for
+memblock to track valid memory ranges. To achieve this, the test suite registers
+with memblock memory stored by test_memory struct. It is a small wrapper that
+points to a block of memory allocated via malloc. For each group of allocation
+tests, dummy physical memory is allocated, added to memblock, and then released
+at the end of the test run. The structure of a test runner checking allocation
+functions is as follows:
+
+int memblock_alloc_foo_checks(void)
+{
+ reset_memblock_attributes(); /* data structure reset */
+ dummy_physical_memory_init(); /* allocate and register memory */
+
+ (...allocation checks...)
+
+ dummy_physical_memory_cleanup(); /* free the memory */
+}
+
+There's no need to explicitly free the dummy memory from memblock via
+memblock_free() call. The entry will be erased by reset_memblock_regions(),
+called at the beginning of each test.
+
+Known issues
+============
+
+1. Requesting a specific NUMA node via memblock_alloc_node() does not work as
+ intended. Once the fix is in place, tests for this function can be added.
+
+2. Tests for memblock_alloc_low() can't be easily implemented. The function uses
+ ARCH_LOW_ADDRESS_LIMIT marco, which can't be changed to point at the low
+ memory of the memory_block.
+
+References
+==========
+
+1. Boot time memory management documentation page:
+ https://www.kernel.org/doc/html/latest/core-api/boot-time-mm.html
diff --git a/tools/testing/memblock/TODO b/tools/testing/memblock/TODO
new file mode 100644
index 000000000000..e306c90c535f
--- /dev/null
+++ b/tools/testing/memblock/TODO
@@ -0,0 +1,5 @@
+TODO
+=====
+
+1. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
+ for the new region
diff --git a/tools/testing/memblock/asm/dma.h b/tools/testing/memblock/asm/dma.h
new file mode 100644
index 000000000000..13ff8e5d22ef
--- /dev/null
+++ b/tools/testing/memblock/asm/dma.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_DMA_H
+#define _TOOLS_DMA_H
+
+#endif
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
new file mode 100644
index 000000000000..0ab4b53bb4f3
--- /dev/null
+++ b/tools/testing/memblock/internal.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MM_INTERNAL_H
+#define _MM_INTERNAL_H
+
+/*
+ * Enable memblock_dbg() messages
+ */
+#ifdef MEMBLOCK_DEBUG
+static int memblock_debug = 1;
+#endif
+
+#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+bool mirrored_kernelcore = false;
+
+struct page {};
+
+void memblock_free_pages(struct page *page, unsigned long pfn,
+ unsigned int order)
+{
+}
+
+static inline void accept_memory(phys_addr_t start, unsigned long size)
+{
+}
+
+static inline unsigned long free_reserved_area(void *start, void *end,
+ int poison, const char *s)
+{
+ return 0;
+}
+
+#endif
diff --git a/tools/testing/memblock/lib/slab.c b/tools/testing/memblock/lib/slab.c
new file mode 100644
index 000000000000..6be6020328fb
--- /dev/null
+++ b/tools/testing/memblock/lib/slab.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/slab.h>
+
+enum slab_state slab_state;
+
+bool slab_is_available(void)
+{
+ return slab_state >= UP;
+}
diff --git a/tools/testing/memblock/linux/kernel.h b/tools/testing/memblock/linux/kernel.h
new file mode 100644
index 000000000000..4d1012d5be6e
--- /dev/null
+++ b/tools/testing/memblock/linux/kernel.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _MEMBLOCK_LINUX_KERNEL_H
+#define _MEMBLOCK_LINUX_KERNEL_H
+
+#include <../../include/linux/kernel.h>
+#include <linux/errno.h>
+#include <string.h>
+#include <linux/printk.h>
+#include <linux/linkage.h>
+#include <linux/kconfig.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#endif
diff --git a/tools/testing/memblock/linux/kmemleak.h b/tools/testing/memblock/linux/kmemleak.h
new file mode 100644
index 000000000000..5fed13bb9ec4
--- /dev/null
+++ b/tools/testing/memblock/linux/kmemleak.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _KMEMLEAK_H
+#define _KMEMLEAK_H
+
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+ gfp_t gfp)
+{
+}
+
+static inline void dump_stack(void)
+{
+}
+
+#endif
diff --git a/tools/testing/memblock/linux/memory_hotplug.h b/tools/testing/memblock/linux/memory_hotplug.h
new file mode 100644
index 000000000000..dabe2c556858
--- /dev/null
+++ b/tools/testing/memblock/linux/memory_hotplug.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_HOTPLUG_H
+#define _LINUX_MEMORY_HOTPLUG_H
+
+#include <linux/numa.h>
+#include <linux/pfn.h>
+#include <linux/cache.h>
+#include <linux/types.h>
+
+extern bool movable_node_enabled;
+
+static inline bool movable_node_is_enabled(void)
+{
+ return movable_node_enabled;
+}
+
+#endif
diff --git a/tools/testing/memblock/linux/mmzone.h b/tools/testing/memblock/linux/mmzone.h
new file mode 100644
index 000000000000..bb682659a12d
--- /dev/null
+++ b/tools/testing/memblock/linux/mmzone.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_MMZONE_H
+#define _TOOLS_MMZONE_H
+
+#include <linux/atomic.h>
+#include <linux/memory_hotplug.h>
+
+struct pglist_data *first_online_pgdat(void);
+struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
+
+#define for_each_online_pgdat(pgdat) \
+ for (pgdat = first_online_pgdat(); \
+ pgdat; \
+ pgdat = next_online_pgdat(pgdat))
+
+enum zone_type {
+ __MAX_NR_ZONES
+};
+
+#define MAX_NR_ZONES __MAX_NR_ZONES
+#define MAX_PAGE_ORDER 10
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
+
+#define pageblock_order MAX_PAGE_ORDER
+#define pageblock_nr_pages BIT(pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+
+struct zone {
+ atomic_long_t managed_pages;
+};
+
+typedef struct pglist_data {
+ struct zone node_zones[MAX_NR_ZONES];
+
+} pg_data_t;
+
+#endif
diff --git a/tools/testing/memblock/linux/mutex.h b/tools/testing/memblock/linux/mutex.h
new file mode 100644
index 000000000000..ae3f497165d6
--- /dev/null
+++ b/tools/testing/memblock/linux/mutex.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MUTEX_H
+#define _MUTEX_H
+
+#define DEFINE_MUTEX(name) int name
+
+static inline void dummy_mutex_guard(int *name)
+{
+}
+
+#define guard(mutex) \
+ dummy_##mutex##_guard
+
+#endif /* _MUTEX_H */ \ No newline at end of file
diff --git a/tools/testing/memblock/linux/printk.h b/tools/testing/memblock/linux/printk.h
new file mode 100644
index 000000000000..61af424d8c6c
--- /dev/null
+++ b/tools/testing/memblock/linux/printk.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PRINTK_H
+#define _PRINTK_H
+
+#include <stdio.h>
+#include <asm/bug.h>
+
+/*
+ * memblock_dbg is called with u64 arguments that don't match the "%llu"
+ * specifier in printf. This results in warnings that cannot be fixed without
+ * modifying memblock.c, which we wish to avoid. As these messaged are not used
+ * in testing anyway, the mismatch can be ignored.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat"
+#define printk printf
+#pragma GCC diagnostic push
+
+#define pr_info printk
+#define pr_debug printk
+#define pr_cont printk
+#define pr_err printk
+#define pr_warn printk
+
+#endif
diff --git a/tools/testing/memblock/main.c b/tools/testing/memblock/main.c
new file mode 100644
index 000000000000..278f9dec5008
--- /dev/null
+++ b/tools/testing/memblock/main.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "tests/basic_api.h"
+#include "tests/alloc_api.h"
+#include "tests/alloc_helpers_api.h"
+#include "tests/alloc_nid_api.h"
+#include "tests/alloc_exact_nid_api.h"
+#include "tests/common.h"
+
+int main(int argc, char **argv)
+{
+ parse_args(argc, argv);
+ memblock_basic_checks();
+ memblock_alloc_checks();
+ memblock_alloc_helpers_checks();
+ memblock_alloc_nid_checks();
+ memblock_alloc_exact_nid_checks();
+
+ return 0;
+}
diff --git a/tools/testing/memblock/mmzone.c b/tools/testing/memblock/mmzone.c
new file mode 100644
index 000000000000..d3d58851864e
--- /dev/null
+++ b/tools/testing/memblock/mmzone.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/mmzone.h>
+
+struct pglist_data *first_online_pgdat(void)
+{
+ return NULL;
+}
+
+struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
+{
+ return NULL;
+}
+
+void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
+{
+}
+
+void atomic_long_set(atomic_long_t *v, long i)
+{
+}
diff --git a/tools/testing/memblock/scripts/Makefile.include b/tools/testing/memblock/scripts/Makefile.include
new file mode 100644
index 000000000000..998281723590
--- /dev/null
+++ b/tools/testing/memblock/scripts/Makefile.include
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Definitions for user-provided arguments
+
+# Simulate CONFIG_NUMA=y
+ifeq ($(NUMA), 1)
+ CFLAGS += -D CONFIG_NUMA -D CONFIG_NODES_SHIFT=4
+endif
+
+# Use 32 bit physical addresses.
+# Remember to install 32-bit version of dependencies.
+ifeq ($(32BIT_PHYS_ADDR_T), 1)
+ CFLAGS += -m32 -U CONFIG_PHYS_ADDR_T_64BIT
+ LDFLAGS += -m32
+endif
+
+# Enable memblock_dbg() messages
+ifeq ($(MEMBLOCK_DEBUG), 1)
+ CFLAGS += -D MEMBLOCK_DEBUG
+endif
diff --git a/tools/testing/memblock/tests/alloc_api.c b/tools/testing/memblock/tests/alloc_api.c
new file mode 100644
index 000000000000..c55f67dd367d
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_api.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "alloc_api.h"
+
+static int alloc_test_flags = TEST_F_NONE;
+
+static inline const char * const get_memblock_alloc_name(int flags)
+{
+ if (flags & TEST_F_RAW)
+ return "memblock_alloc_raw";
+ return "memblock_alloc";
+}
+
+static inline void *run_memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ if (alloc_test_flags & TEST_F_RAW)
+ return memblock_alloc_raw(size, align);
+ return memblock_alloc(size, align);
+}
+
+/*
+ * A simple test that tries to allocate a small memory region.
+ * Expect to allocate an aligned region near the end of the available memory.
+ */
+static int alloc_top_down_simple_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_2;
+ phys_addr_t expected_start;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
+
+ allocated_ptr = run_memblock_alloc(size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, expected_start);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory next to a reserved region that starts at
+ * the misaligned address. Expect to create two separate entries, with the new
+ * entry aligned to the provided alignment:
+ *
+ * +
+ * | +--------+ +--------|
+ * | | rgn2 | | rgn1 |
+ * +------------+--------+---------+--------+
+ * ^
+ * |
+ * Aligned address boundary
+ *
+ * The allocation direction is top-down and region arrays are sorted from lower
+ * to higher addresses, so the new region will be the first entry in
+ * memory.reserved array. The previously reserved region does not get modified.
+ * Region counter and total size get updated.
+ */
+static int alloc_top_down_disjoint_check(void)
+{
+ /* After allocation, this will point to the "old" region */
+ struct memblock_region *rgn1 = &memblock.reserved.regions[1];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[0];
+ struct region r1;
+ void *allocated_ptr = NULL;
+ phys_addr_t r2_size = SZ_16;
+ /* Use custom alignment */
+ phys_addr_t alignment = SMP_CACHE_BYTES * 2;
+ phys_addr_t total_size;
+ phys_addr_t expected_start;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SZ_2;
+ r1.size = SZ_2;
+
+ total_size = r1.size + r2_size;
+ expected_start = memblock_end_of_DRAM() - alignment;
+
+ memblock_reserve(r1.base, r1.size);
+
+ allocated_ptr = run_memblock_alloc(r2_size, alignment);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there is enough space at the end
+ * of the previously reserved block (i.e. first fit):
+ *
+ * | +--------+--------------|
+ * | | r1 | r2 |
+ * +--------------+--------+--------------+
+ *
+ * Expect a merge of both regions. Only the region size gets updated.
+ */
+static int alloc_top_down_before_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ /*
+ * The first region ends at the aligned address to test region merging
+ */
+ phys_addr_t r1_size = SMP_CACHE_BYTES;
+ phys_addr_t r2_size = SZ_512;
+ phys_addr_t total_size = r1_size + r2_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ memblock_reserve_kern(memblock_end_of_DRAM() - total_size, r1_size);
+
+ allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there is not enough space at the
+ * end of the previously reserved block (i.e. second fit):
+ *
+ * | +-----------+------+ |
+ * | | r2 | r1 | |
+ * +------------+-----------+------+-----+
+ *
+ * Expect a merge of both regions. Both the base address and size of the region
+ * get updated.
+ */
+static int alloc_top_down_after_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ struct region r1;
+ void *allocated_ptr = NULL;
+ phys_addr_t r2_size = SZ_512;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /*
+ * The first region starts at the aligned address to test region merging
+ */
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
+ r1.size = SZ_8;
+
+ total_size = r1.size + r2_size;
+
+ memblock_reserve_kern(r1.base, r1.size);
+
+ allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there are two reserved regions with
+ * a gap too small to fit the new region:
+ *
+ * | +--------+----------+ +------|
+ * | | r3 | r2 | | r1 |
+ * +-------+--------+----------+---+------+
+ *
+ * Expect to allocate a region before the one that starts at the lower address,
+ * and merge them into one. The region counter and total size fields get
+ * updated.
+ */
+static int alloc_top_down_second_fit_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ struct region r1, r2;
+ void *allocated_ptr = NULL;
+ phys_addr_t r3_size = SZ_1K;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SZ_512;
+ r1.size = SZ_512;
+
+ r2.base = r1.base - SZ_512;
+ r2.size = SZ_256;
+
+ total_size = r1.size + r2.size + r3_size;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base - r3_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there are two reserved regions with
+ * a gap big enough to accommodate the new region:
+ *
+ * | +--------+--------+--------+ |
+ * | | r2 | r3 | r1 | |
+ * +-----+--------+--------+--------+-----+
+ *
+ * Expect to merge all of them, creating one big entry in memblock.reserved
+ * array. The region counter and total size fields get updated.
+ */
+static int alloc_in_between_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ struct region r1, r2;
+ void *allocated_ptr = NULL;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t r3_size = SZ_64;
+ /*
+ * Calculate regions size so there's just enough space for the new entry
+ */
+ phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.size = rgn_size;
+ r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
+
+ r2.size = rgn_size;
+ r2.base = memblock_start_of_DRAM() + gap_size;
+
+ total_size = r1.size + r2.size + r3_size;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when the memory is filled with reserved
+ * regions with memory gaps too small to fit the new region:
+ *
+ * +-------+
+ * | new |
+ * +--+----+
+ * | +-----+ +-----+ +-----+ |
+ * | | res | | res | | res | |
+ * +----+-----+----+-----+----+-----+----+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_small_gaps_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ phys_addr_t region_size = SZ_1K;
+ phys_addr_t gap_size = SZ_256;
+ phys_addr_t region_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ region_end = memblock_start_of_DRAM();
+
+ while (region_end < memblock_end_of_DRAM()) {
+ memblock_reserve(region_end + gap_size, region_size);
+ region_end += gap_size + region_size;
+ }
+
+ allocated_ptr = run_memblock_alloc(region_size, SMP_CACHE_BYTES);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when all memory is reserved.
+ * Expect no allocation to happen.
+ */
+static int alloc_all_reserved_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* Simulate full memory */
+ memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
+
+ allocated_ptr = run_memblock_alloc(SZ_256, SMP_CACHE_BYTES);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when the memory is almost full,
+ * with not enough space left for the new region:
+ *
+ * +-------+
+ * | new |
+ * +-------+
+ * |-----------------------------+ |
+ * | reserved | |
+ * +-----------------------------+---+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_no_space_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ phys_addr_t available_size = SZ_256;
+ phys_addr_t reserved_size = MEM_SIZE - available_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* Simulate almost-full memory */
+ memblock_reserve(memblock_start_of_DRAM(), reserved_size);
+
+ allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when the memory is almost full,
+ * but there is just enough space left:
+ *
+ * |---------------------------+---------|
+ * | reserved | new |
+ * +---------------------------+---------+
+ *
+ * Expect to allocate memory and merge all the regions. The total size field
+ * gets updated.
+ */
+static int alloc_limited_space_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t available_size = SZ_256;
+ phys_addr_t reserved_size = MEM_SIZE - available_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* Simulate almost-full memory */
+ memblock_reserve_kern(memblock_start_of_DRAM(), reserved_size);
+
+ allocated_ptr = run_memblock_alloc(available_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, available_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there is no available memory
+ * registered (i.e. memblock.memory has only a dummy entry).
+ * Expect no allocation to happen.
+ */
+static int alloc_no_memory_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+
+ allocated_ptr = run_memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, 0);
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(memblock.reserved.total_size, 0);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a region that is larger than the total size of
+ * available memory (memblock.memory):
+ *
+ * +-----------------------------------+
+ * | new |
+ * +-----------------------------------+
+ * | |
+ * | |
+ * +---------------------------------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_too_large_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ allocated_ptr = run_memblock_alloc(MEM_SIZE + SZ_2, SMP_CACHE_BYTES);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, 0);
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(memblock.reserved.total_size, 0);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a small memory region.
+ * Expect to allocate an aligned region at the beginning of the available
+ * memory.
+ */
+static int alloc_bottom_up_simple_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ allocated_ptr = run_memblock_alloc(SZ_2, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, SZ_2, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, SZ_2);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, SZ_2);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory next to a reserved region that starts at
+ * the misaligned address. Expect to create two separate entries, with the new
+ * entry aligned to the provided alignment:
+ *
+ * +
+ * | +----------+ +----------+ |
+ * | | rgn1 | | rgn2 | |
+ * +----+----------+---+----------+-----+
+ * ^
+ * |
+ * Aligned address boundary
+ *
+ * The allocation direction is bottom-up, so the new region will be the second
+ * entry in memory.reserved array. The previously reserved region does not get
+ * modified. Region counter and total size get updated.
+ */
+static int alloc_bottom_up_disjoint_check(void)
+{
+ struct memblock_region *rgn1 = &memblock.reserved.regions[0];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[1];
+ struct region r1;
+ void *allocated_ptr = NULL;
+ phys_addr_t r2_size = SZ_16;
+ /* Use custom alignment */
+ phys_addr_t alignment = SMP_CACHE_BYTES * 2;
+ phys_addr_t total_size;
+ phys_addr_t expected_start;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_start_of_DRAM() + SZ_2;
+ r1.size = SZ_2;
+
+ total_size = r1.size + r2_size;
+ expected_start = memblock_start_of_DRAM() + alignment;
+
+ memblock_reserve(r1.base, r1.size);
+
+ allocated_ptr = run_memblock_alloc(r2_size, alignment);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there is enough space at
+ * the beginning of the previously reserved block (i.e. first fit):
+ *
+ * |------------------+--------+ |
+ * | r1 | r2 | |
+ * +------------------+--------+---------+
+ *
+ * Expect a merge of both regions. Only the region size gets updated.
+ */
+static int alloc_bottom_up_before_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_512;
+ phys_addr_t r2_size = SZ_128;
+ phys_addr_t total_size = r1_size + r2_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ memblock_reserve_kern(memblock_start_of_DRAM() + r1_size, r2_size);
+
+ allocated_ptr = run_memblock_alloc(r1_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r1_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there is not enough space at
+ * the beginning of the previously reserved block (i.e. second fit):
+ *
+ * | +--------+--------------+ |
+ * | | r1 | r2 | |
+ * +----+--------+--------------+---------+
+ *
+ * Expect a merge of both regions. Only the region size gets updated.
+ */
+static int alloc_bottom_up_after_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ struct region r1;
+ void *allocated_ptr = NULL;
+ phys_addr_t r2_size = SZ_512;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /*
+ * The first region starts at the aligned address to test region merging
+ */
+ r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
+ r1.size = SZ_64;
+
+ total_size = r1.size + r2_size;
+
+ memblock_reserve_kern(r1.base, r1.size);
+
+ allocated_ptr = run_memblock_alloc(r2_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory when there are two reserved regions, the
+ * first one starting at the beginning of the available memory, with a gap too
+ * small to fit the new region:
+ *
+ * |------------+ +--------+--------+ |
+ * | r1 | | r2 | r3 | |
+ * +------------+-----+--------+--------+--+
+ *
+ * Expect to allocate after the second region, which starts at the higher
+ * address, and merge them into one. The region counter and total size fields
+ * get updated.
+ */
+static int alloc_bottom_up_second_fit_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[1];
+ struct region r1, r2;
+ void *allocated_ptr = NULL;
+ phys_addr_t r3_size = SZ_1K;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_start_of_DRAM();
+ r1.size = SZ_512;
+
+ r2.base = r1.base + r1.size + SZ_512;
+ r2.size = SZ_256;
+
+ total_size = r1.size + r2.size + r3_size;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc(r3_size, SMP_CACHE_BYTES);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_test_flags);
+
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/* Test case wrappers */
+static int alloc_simple_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_top_down_simple_check();
+ memblock_set_bottom_up(true);
+ alloc_bottom_up_simple_check();
+
+ return 0;
+}
+
+static int alloc_disjoint_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_top_down_disjoint_check();
+ memblock_set_bottom_up(true);
+ alloc_bottom_up_disjoint_check();
+
+ return 0;
+}
+
+static int alloc_before_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_top_down_before_check();
+ memblock_set_bottom_up(true);
+ alloc_bottom_up_before_check();
+
+ return 0;
+}
+
+static int alloc_after_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_top_down_after_check();
+ memblock_set_bottom_up(true);
+ alloc_bottom_up_after_check();
+
+ return 0;
+}
+
+static int alloc_in_between_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_in_between_generic_check);
+ run_bottom_up(alloc_in_between_generic_check);
+
+ return 0;
+}
+
+static int alloc_second_fit_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_top_down_second_fit_check();
+ memblock_set_bottom_up(true);
+ alloc_bottom_up_second_fit_check();
+
+ return 0;
+}
+
+static int alloc_small_gaps_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_small_gaps_generic_check);
+ run_bottom_up(alloc_small_gaps_generic_check);
+
+ return 0;
+}
+
+static int alloc_all_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_all_reserved_generic_check);
+ run_bottom_up(alloc_all_reserved_generic_check);
+
+ return 0;
+}
+
+static int alloc_no_space_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_no_space_generic_check);
+ run_bottom_up(alloc_no_space_generic_check);
+
+ return 0;
+}
+
+static int alloc_limited_space_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_limited_space_generic_check);
+ run_bottom_up(alloc_limited_space_generic_check);
+
+ return 0;
+}
+
+static int alloc_no_memory_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_no_memory_generic_check);
+ run_bottom_up(alloc_no_memory_generic_check);
+
+ return 0;
+}
+
+static int alloc_too_large_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_too_large_generic_check);
+ run_bottom_up(alloc_too_large_generic_check);
+
+ return 0;
+}
+
+static int memblock_alloc_checks_internal(int flags)
+{
+ const char *func = get_memblock_alloc_name(flags);
+
+ alloc_test_flags = flags;
+ prefix_reset();
+ prefix_push(func);
+ test_print("Running %s tests...\n", func);
+
+ reset_memblock_attributes();
+ dummy_physical_memory_init();
+
+ alloc_simple_check();
+ alloc_disjoint_check();
+ alloc_before_check();
+ alloc_after_check();
+ alloc_second_fit_check();
+ alloc_small_gaps_check();
+ alloc_in_between_check();
+ alloc_all_reserved_check();
+ alloc_no_space_check();
+ alloc_limited_space_check();
+ alloc_no_memory_check();
+ alloc_too_large_check();
+
+ dummy_physical_memory_cleanup();
+
+ prefix_pop();
+
+ return 0;
+}
+
+int memblock_alloc_checks(void)
+{
+ memblock_alloc_checks_internal(TEST_F_NONE);
+ memblock_alloc_checks_internal(TEST_F_RAW);
+
+ return 0;
+}
diff --git a/tools/testing/memblock/tests/alloc_api.h b/tools/testing/memblock/tests/alloc_api.h
new file mode 100644
index 000000000000..585b085baf21
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_api.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_ALLOCS_H
+#define _MEMBLOCK_ALLOCS_H
+
+#include "common.h"
+
+int memblock_alloc_checks(void);
+
+#endif
diff --git a/tools/testing/memblock/tests/alloc_exact_nid_api.c b/tools/testing/memblock/tests/alloc_exact_nid_api.c
new file mode 100644
index 000000000000..6e14447da6e1
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_exact_nid_api.c
@@ -0,0 +1,1113 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "alloc_exact_nid_api.h"
+#include "alloc_nid_api.h"
+
+#define FUNC_NAME "memblock_alloc_exact_nid_raw"
+
+/*
+ * contains the fraction of MEM_SIZE contained in each node in basis point
+ * units (one hundredth of 1% or 1/10000)
+ */
+static const unsigned int node_fractions[] = {
+ 2500, /* 1/4 */
+ 625, /* 1/16 */
+ 1250, /* 1/8 */
+ 1250, /* 1/8 */
+ 625, /* 1/16 */
+ 625, /* 1/16 */
+ 2500, /* 1/4 */
+ 625, /* 1/16 */
+};
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * has enough memory to allocate a region of the requested size.
+ * Expect to allocate an aligned region at the end of the requested node.
+ */
+static int alloc_exact_nid_top_down_numa_simple_check(void)
+{
+ int nid_req = 3;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved but has enough memory for the allocated region:
+ *
+ * | +---------------------------------------+ |
+ * | | requested | |
+ * +-----------+---------------------------------------+----------+
+ *
+ * | +------------------+ +-----+ |
+ * | | reserved | | new | |
+ * +-----------+------------------+--------------+-----+----------+
+ *
+ * Expect to allocate an aligned region at the end of the requested node. The
+ * region count and total size get updated.
+ */
+static int alloc_exact_nid_top_down_numa_part_reserved_check(void)
+{
+ int nid_req = 4;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[1];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_8, req_node->size);
+ r1.base = req_node->base;
+ r1.size = req_node->size / SZ_2;
+ size = r1.size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the first
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------------------+-----------+ |
+ * | | requested | node3 | |
+ * +-----------+-----------------------+-----------+--------------+
+ * + +
+ * | +-----------+ |
+ * | | rgn | |
+ * +-----------------------+-----------+--------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that ends at
+ * the end of the requested node.
+ */
+static int alloc_exact_nid_top_down_numa_split_range_low_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t req_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ req_node_end = region_end(req_node);
+ min_addr = req_node_end - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node_end - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the requested
+ * node ends before min_addr:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +---------------+ +-------------+---------+ |
+ * | | requested | | node1 | node2 | |
+ * +----+---------------+--------+-------------+---------+----------+
+ * + +
+ * | +---------+ |
+ * | | rgn | |
+ * +----------+---------+-------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that ends at
+ * the end of the requested node.
+ */
+static int alloc_exact_nid_top_down_numa_no_overlap_split_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *node2 = &memblock.memory.regions[6];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_512;
+ min_addr = node2->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node ends
+ * before min_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * |-----------+ +----------+----...----+----------+ |
+ * | requested | | min node | ... | max node | |
+ * +-----------+-----------+----------+----...----+----------+------+
+ * + +
+ * | +-----+ |
+ * | | rgn | |
+ * +-----+-----+----------------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that ends at
+ * the end of the requested node.
+ */
+static int alloc_exact_nid_top_down_numa_no_overlap_low_check(void)
+{
+ int nid_req = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * has enough memory to allocate a region of the requested size.
+ * Expect to allocate an aligned region at the beginning of the requested node.
+ */
+static int alloc_exact_nid_bottom_up_numa_simple_check(void)
+{
+ int nid_req = 3;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved but has enough memory for the allocated region:
+ *
+ * | +---------------------------------------+ |
+ * | | requested | |
+ * +-----------+---------------------------------------+---------+
+ *
+ * | +------------------+-----+ |
+ * | | reserved | new | |
+ * +-----------+------------------+-----+------------------------+
+ *
+ * Expect to allocate an aligned region in the requested node that merges with
+ * the existing reserved region. The total size gets updated.
+ */
+static int alloc_exact_nid_bottom_up_numa_part_reserved_check(void)
+{
+ int nid_req = 4;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_8, req_node->size);
+ r1.base = req_node->base;
+ r1.size = req_node->size / SZ_2;
+ size = r1.size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+ total_size = size + r1.size;
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, total_size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the first
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------------------+-----------+ |
+ * | | requested | node3 | |
+ * +-----------+-----------------------+-----------+--------------+
+ * + +
+ * | +-----------+ |
+ * | | rgn | |
+ * +-----------+-----------+--------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region at the beginning
+ * of the requested node.
+ */
+static int alloc_exact_nid_bottom_up_numa_split_range_low_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t req_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ req_node_end = region_end(req_node);
+ min_addr = req_node_end - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), req_node_end);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the requested
+ * node ends before min_addr:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +---------------+ +-------------+---------+ |
+ * | | requested | | node1 | node2 | |
+ * +----+---------------+--------+-------------+---------+---------+
+ * + +
+ * | +---------+ |
+ * | | rgn | |
+ * +----+---------+------------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that starts at
+ * the beginning of the requested node.
+ */
+static int alloc_exact_nid_bottom_up_numa_no_overlap_split_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *node2 = &memblock.memory.regions[6];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_512;
+ min_addr = node2->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node ends
+ * before min_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * |-----------+ +----------+----...----+----------+ |
+ * | requested | | min node | ... | max node | |
+ * +-----------+-----------+----------+----...----+----------+------+
+ * + +
+ * |-----+ |
+ * | rgn | |
+ * +-----+----------------------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that starts at
+ * the beginning of the requested node.
+ */
+static int alloc_exact_nid_bottom_up_numa_no_overlap_low_check(void)
+{
+ int nid_req = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * does not have enough memory to allocate a region of the requested size:
+ *
+ * | +-----+ |
+ * | | req | |
+ * +---+-----+----------------------------+
+ *
+ * +---------+
+ * | rgn |
+ * +---------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_small_node_generic_check(void)
+{
+ int nid_req = 1;
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_2 * req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is fully reserved:
+ *
+ * | +---------+ |
+ * | |requested| |
+ * +--------------+---------+-------------+
+ *
+ * | +---------+ |
+ * | | reserved| |
+ * +--------------+---------+-------------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_node_reserved_generic_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(req_node->base, req_node->size);
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved and does not have enough contiguous memory for the
+ * allocated region:
+ *
+ * | +-----------------------+ |
+ * | | requested | |
+ * +-----------+-----------------------+----+
+ *
+ * | +----------+ |
+ * | | reserved | |
+ * +-----------------+----------+-----------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_part_reserved_fail_generic_check(void)
+{
+ int nid_req = 4;
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_2;
+ r1.base = req_node->base + (size / SZ_2);
+ r1.size = size;
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the second
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +--------------------------+---------+ |
+ * | | first node |requested| |
+ * +------+--------------------------+---------+----------------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_split_range_high_generic_check(void)
+{
+ int nid_req = 3;
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = req_node->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node starts
+ * after max_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +----------+----...----+----------+ +-----------+ |
+ * | | min node | ... | max node | | requested | |
+ * +-----+----------+----...----+----------+--------+-----------+---+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_no_overlap_high_generic_check(void)
+{
+ int nid_req = 7;
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * does not have enough memory to allocate a region of the requested size.
+ * Additionally, none of the nodes have enough memory to allocate the region:
+ *
+ * +-----------------------------------+
+ * | new |
+ * +-----------------------------------+
+ * |-------+-------+-------+-------+-------+-------+-------+-------|
+ * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_large_region_generic_check(void)
+{
+ int nid_req = 3;
+ void *allocated_ptr = NULL;
+ phys_addr_t size = MEM_SIZE / SZ_2;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_addr range when
+ * there are two reserved regions at the borders. The requested node starts at
+ * min_addr and ends at max_addr and is the same size as the region to be
+ * allocated:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------+-----------------------+-----------------------|
+ * | | node5 | requested | node7 |
+ * +------+-----------+-----------------------+-----------------------+
+ * + +
+ * | +----+-----------------------+----+ |
+ * | | r2 | new | r1 | |
+ * +-------------+----+-----------------------+----+------------------+
+ *
+ * Expect to merge all of the regions into one. The region counter and total
+ * size fields get updated.
+ */
+static int alloc_exact_nid_numa_reserved_full_merge_generic_check(void)
+{
+ int nid_req = 6;
+ int nid_next = nid_req + 1;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *next_node = &memblock.memory.regions[nid_next];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t size = req_node->size;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ r1.base = next_node->base;
+ r1.size = SZ_128;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (size + r2.size);
+
+ total_size = r1.size + r2.size + size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_NE(allocated_ptr, 0, size);
+
+ ASSERT_EQ(new_rgn->size, total_size);
+ ASSERT_EQ(new_rgn->base, r2.base);
+
+ ASSERT_LE(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(req_node), region_end(new_rgn));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range,
+ * where the total range can fit the region, but it is split between two nodes
+ * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
+ * instead of requesting a specific node:
+ *
+ * +-----------+
+ * | new |
+ * +-----------+
+ * | +---------------------+-----------|
+ * | | prev node | next node |
+ * +------+---------------------+-----------+
+ * + +
+ * |----------------------+ +-----|
+ * | r1 | | r2 |
+ * +----------------------+-----------+-----+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_exact_nid_numa_split_all_reserved_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ struct memblock_region *next_node = &memblock.memory.regions[7];
+ struct region r1, r2;
+ phys_addr_t size = SZ_256;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ r2.base = next_node->base + SZ_128;
+ r2.size = memblock_end_of_DRAM() - r2.base;
+
+ r1.size = MEM_SIZE - (r2.size + size);
+ r1.base = memblock_start_of_DRAM();
+
+ min_addr = r1.base + r1.size;
+ max_addr = r2.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/* Test case wrappers for NUMA tests */
+static int alloc_exact_nid_numa_simple_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_exact_nid_top_down_numa_simple_check();
+ memblock_set_bottom_up(true);
+ alloc_exact_nid_bottom_up_numa_simple_check();
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_part_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_exact_nid_top_down_numa_part_reserved_check();
+ memblock_set_bottom_up(true);
+ alloc_exact_nid_bottom_up_numa_part_reserved_check();
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_split_range_low_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_exact_nid_top_down_numa_split_range_low_check();
+ memblock_set_bottom_up(true);
+ alloc_exact_nid_bottom_up_numa_split_range_low_check();
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_no_overlap_split_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_exact_nid_top_down_numa_no_overlap_split_check();
+ memblock_set_bottom_up(true);
+ alloc_exact_nid_bottom_up_numa_no_overlap_split_check();
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_no_overlap_low_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_exact_nid_top_down_numa_no_overlap_low_check();
+ memblock_set_bottom_up(true);
+ alloc_exact_nid_bottom_up_numa_no_overlap_low_check();
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_small_node_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_small_node_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_small_node_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_node_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_node_reserved_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_node_reserved_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_part_reserved_fail_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_part_reserved_fail_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_part_reserved_fail_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_split_range_high_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_split_range_high_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_split_range_high_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_no_overlap_high_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_no_overlap_high_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_no_overlap_high_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_large_region_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_large_region_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_large_region_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_reserved_full_merge_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_reserved_full_merge_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_reserved_full_merge_generic_check);
+
+ return 0;
+}
+
+static int alloc_exact_nid_numa_split_all_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_exact_nid_numa_split_all_reserved_generic_check);
+ run_bottom_up(alloc_exact_nid_numa_split_all_reserved_generic_check);
+
+ return 0;
+}
+
+int __memblock_alloc_exact_nid_numa_checks(void)
+{
+ test_print("Running %s NUMA tests...\n", FUNC_NAME);
+
+ alloc_exact_nid_numa_simple_check();
+ alloc_exact_nid_numa_part_reserved_check();
+ alloc_exact_nid_numa_split_range_low_check();
+ alloc_exact_nid_numa_no_overlap_split_check();
+ alloc_exact_nid_numa_no_overlap_low_check();
+
+ alloc_exact_nid_numa_small_node_check();
+ alloc_exact_nid_numa_node_reserved_check();
+ alloc_exact_nid_numa_part_reserved_fail_check();
+ alloc_exact_nid_numa_split_range_high_check();
+ alloc_exact_nid_numa_no_overlap_high_check();
+ alloc_exact_nid_numa_large_region_check();
+ alloc_exact_nid_numa_reserved_full_merge_check();
+ alloc_exact_nid_numa_split_all_reserved_check();
+
+ return 0;
+}
+
+int memblock_alloc_exact_nid_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_NAME);
+
+ reset_memblock_attributes();
+ dummy_physical_memory_init();
+
+ memblock_alloc_exact_nid_range_checks();
+ memblock_alloc_exact_nid_numa_checks();
+
+ dummy_physical_memory_cleanup();
+
+ prefix_pop();
+
+ return 0;
+}
diff --git a/tools/testing/memblock/tests/alloc_exact_nid_api.h b/tools/testing/memblock/tests/alloc_exact_nid_api.h
new file mode 100644
index 000000000000..cef419d55d2a
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_exact_nid_api.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_ALLOC_EXACT_NID_H
+#define _MEMBLOCK_ALLOC_EXACT_NID_H
+
+#include "common.h"
+
+int memblock_alloc_exact_nid_checks(void);
+int __memblock_alloc_exact_nid_numa_checks(void);
+
+#ifdef CONFIG_NUMA
+static inline int memblock_alloc_exact_nid_numa_checks(void)
+{
+ __memblock_alloc_exact_nid_numa_checks();
+ return 0;
+}
+
+#else
+static inline int memblock_alloc_exact_nid_numa_checks(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif
diff --git a/tools/testing/memblock/tests/alloc_helpers_api.c b/tools/testing/memblock/tests/alloc_helpers_api.c
new file mode 100644
index 000000000000..e5362cfd2ff3
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_helpers_api.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "alloc_helpers_api.h"
+
+/*
+ * A simple test that tries to allocate a memory region above a specified,
+ * aligned address:
+ *
+ * +
+ * | +-----------+ |
+ * | | rgn | |
+ * +----------+-----------+---------+
+ * ^
+ * |
+ * Aligned min_addr
+ *
+ * Expect to allocate a cleared region at the minimal memory address.
+ */
+static int alloc_from_simple_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_16;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
+
+ allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_EQ(allocated_ptr, 0, size);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region above a certain address.
+ * The minimal address here is not aligned:
+ *
+ * + +
+ * | + +---------+ |
+ * | | | rgn | |
+ * +------+------+---------+------------+
+ * ^ ^------.
+ * | |
+ * min_addr Aligned address
+ * boundary
+ *
+ * Expect to allocate a cleared region at the closest aligned memory address.
+ */
+static int alloc_from_misaligned_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_32;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* A misaligned address */
+ min_addr = memblock_end_of_DRAM() - (SMP_CACHE_BYTES * 2 - 1);
+
+ allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_MEM_EQ(allocated_ptr, 0, size);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region above an address that is too
+ * close to the end of the memory:
+ *
+ * + +
+ * | +--------+---+ |
+ * | | rgn + | |
+ * +-----------+--------+---+------+
+ * ^ ^
+ * | |
+ * | min_addr
+ * |
+ * Aligned address
+ * boundary
+ *
+ * Expect to prioritize granting memory over satisfying the minimal address
+ * requirement.
+ */
+static int alloc_from_top_down_high_addr_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_32;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* The address is too close to the end of the memory */
+ min_addr = memblock_end_of_DRAM() - SZ_16;
+
+ allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region when there is no space
+ * available above the minimal address above a certain address:
+ *
+ * +
+ * | +---------+-------------|
+ * | | rgn | |
+ * +--------+---------+-------------+
+ * ^
+ * |
+ * min_addr
+ *
+ * Expect to prioritize granting memory over satisfying the minimal address
+ * requirement and to allocate next to the previously reserved region. The
+ * regions get merged into one.
+ */
+static int alloc_from_top_down_no_space_above_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_64;
+ phys_addr_t r2_size = SZ_2;
+ phys_addr_t total_size = r1_size + r2_size;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+
+ /* No space above this address */
+ memblock_reserve_kern(min_addr, r2_size);
+
+ allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, min_addr - r1_size);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region with a minimal address below
+ * the start address of the available memory. As the allocation is top-down,
+ * first reserve a region that will force allocation near the start.
+ * Expect successful allocation and merge of both regions.
+ */
+static int alloc_from_top_down_min_addr_cap_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_64;
+ phys_addr_t min_addr;
+ phys_addr_t start_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ start_addr = (phys_addr_t)memblock_start_of_DRAM();
+ min_addr = start_addr - SMP_CACHE_BYTES * 3;
+
+ memblock_reserve_kern(start_addr + r1_size, MEM_SIZE - r1_size);
+
+ allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region above an address that is too
+ * close to the end of the memory:
+ *
+ * +
+ * |-----------+ + |
+ * | rgn | | |
+ * +-----------+--------------+-----+
+ * ^ ^
+ * | |
+ * Aligned address min_addr
+ * boundary
+ *
+ * Expect to prioritize granting memory over satisfying the minimal address
+ * requirement. Allocation happens at beginning of the available memory.
+ */
+static int alloc_from_bottom_up_high_addr_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_32;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ /* The address is too close to the end of the memory */
+ min_addr = memblock_end_of_DRAM() - SZ_8;
+
+ allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region when there is no space
+ * available above the minimal address above a certain address:
+ *
+ * +
+ * |-----------+ +-------------------|
+ * | rgn | | |
+ * +-----------+----+-------------------+
+ * ^
+ * |
+ * min_addr
+ *
+ * Expect to prioritize granting memory over satisfying the minimal address
+ * requirement and to allocate at the beginning of the available memory.
+ */
+static int alloc_from_bottom_up_no_space_above_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_64;
+ phys_addr_t min_addr;
+ phys_addr_t r2_size;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SZ_128;
+ r2_size = memblock_end_of_DRAM() - min_addr;
+
+ /* No space above this address */
+ memblock_reserve(min_addr - SMP_CACHE_BYTES, r2_size);
+
+ allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, r1_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size + r2_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region with a minimal address below
+ * the start address of the available memory. Expect to allocate a region
+ * at the beginning of the available memory.
+ */
+static int alloc_from_bottom_up_min_addr_cap_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_64;
+ phys_addr_t min_addr;
+ phys_addr_t start_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ start_addr = (phys_addr_t)memblock_start_of_DRAM();
+ min_addr = start_addr - SMP_CACHE_BYTES * 3;
+
+ allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, r1_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/* Test case wrappers */
+static int alloc_from_simple_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_from_simple_generic_check);
+ run_bottom_up(alloc_from_simple_generic_check);
+
+ return 0;
+}
+
+static int alloc_from_misaligned_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_from_misaligned_generic_check);
+ run_bottom_up(alloc_from_misaligned_generic_check);
+
+ return 0;
+}
+
+static int alloc_from_high_addr_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_from_top_down_high_addr_check();
+ memblock_set_bottom_up(true);
+ alloc_from_bottom_up_high_addr_check();
+
+ return 0;
+}
+
+static int alloc_from_no_space_above_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_from_top_down_no_space_above_check();
+ memblock_set_bottom_up(true);
+ alloc_from_bottom_up_no_space_above_check();
+
+ return 0;
+}
+
+static int alloc_from_min_addr_cap_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_from_top_down_min_addr_cap_check();
+ memblock_set_bottom_up(true);
+ alloc_from_bottom_up_min_addr_cap_check();
+
+ return 0;
+}
+
+int memblock_alloc_helpers_checks(void)
+{
+ const char *func_testing = "memblock_alloc_from";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
+ reset_memblock_attributes();
+ dummy_physical_memory_init();
+
+ alloc_from_simple_check();
+ alloc_from_misaligned_check();
+ alloc_from_high_addr_check();
+ alloc_from_no_space_above_check();
+ alloc_from_min_addr_cap_check();
+
+ dummy_physical_memory_cleanup();
+
+ prefix_pop();
+
+ return 0;
+}
diff --git a/tools/testing/memblock/tests/alloc_helpers_api.h b/tools/testing/memblock/tests/alloc_helpers_api.h
new file mode 100644
index 000000000000..c9e4827b1623
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_helpers_api.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_ALLOC_HELPERS_H
+#define _MEMBLOCK_ALLOC_HELPERS_H
+
+#include "common.h"
+
+int memblock_alloc_helpers_checks(void);
+
+#endif
diff --git a/tools/testing/memblock/tests/alloc_nid_api.c b/tools/testing/memblock/tests/alloc_nid_api.c
new file mode 100644
index 000000000000..562e4701b0e0
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_nid_api.c
@@ -0,0 +1,2733 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "alloc_nid_api.h"
+
+static int alloc_nid_test_flags = TEST_F_NONE;
+
+/*
+ * contains the fraction of MEM_SIZE contained in each node in basis point
+ * units (one hundredth of 1% or 1/10000)
+ */
+static const unsigned int node_fractions[] = {
+ 2500, /* 1/4 */
+ 625, /* 1/16 */
+ 1250, /* 1/8 */
+ 1250, /* 1/8 */
+ 625, /* 1/16 */
+ 625, /* 1/16 */
+ 2500, /* 1/4 */
+ 625, /* 1/16 */
+};
+
+static inline const char * const get_memblock_alloc_nid_name(int flags)
+{
+ if (flags & TEST_F_EXACT)
+ return "memblock_alloc_exact_nid_raw";
+ if (flags & TEST_F_RAW)
+ return "memblock_alloc_try_nid_raw";
+ return "memblock_alloc_try_nid";
+}
+
+static inline void *run_memblock_alloc_nid(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr,
+ phys_addr_t max_addr, int nid)
+{
+ assert(!(alloc_nid_test_flags & TEST_F_EXACT) ||
+ (alloc_nid_test_flags & TEST_F_RAW));
+ /*
+ * TEST_F_EXACT should be checked before TEST_F_RAW since
+ * memblock_alloc_exact_nid_raw() performs raw allocations.
+ */
+ if (alloc_nid_test_flags & TEST_F_EXACT)
+ return memblock_alloc_exact_nid_raw(size, align, min_addr,
+ max_addr, nid);
+ if (alloc_nid_test_flags & TEST_F_RAW)
+ return memblock_alloc_try_nid_raw(size, align, min_addr,
+ max_addr, nid);
+ return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
+}
+
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range:
+ *
+ * + +
+ * | + +-----------+ |
+ * | | | rgn | |
+ * +----+-------+-----------+------+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to allocate a region that ends at max_addr.
+ */
+static int alloc_nid_top_down_simple_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_128;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t rgn_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
+ max_addr = min_addr + SZ_512;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+ rgn_end = rgn->base + rgn->size;
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
+ ASSERT_EQ(rgn_end, max_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range, where the end address is misaligned:
+ *
+ * + + +
+ * | + +---------+ + |
+ * | | | rgn | | |
+ * +------+-------+---------+--+----+
+ * ^ ^ ^
+ * | | |
+ * min_add | max_addr
+ * |
+ * Aligned address
+ * boundary
+ *
+ * Expect to allocate an aligned region that ends before max_addr.
+ */
+static int alloc_nid_top_down_end_misaligned_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_128;
+ phys_addr_t misalign = SZ_2;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t rgn_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
+ max_addr = min_addr + SZ_512 + misalign;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+ rgn_end = rgn->base + rgn->size;
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size - misalign);
+ ASSERT_LT(rgn_end, max_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region, which spans over the
+ * min_addr and max_addr range:
+ *
+ * + +
+ * | +---------------+ |
+ * | | rgn | |
+ * +------+---------------+-------+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to allocate a region that starts at min_addr and ends at
+ * max_addr, given that min_addr is aligned.
+ */
+static int alloc_nid_exact_address_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_1K;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t rgn_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+ rgn_end = rgn->base + rgn->size;
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_EQ(rgn_end, max_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, which can't fit into
+ * min_addr and max_addr range:
+ *
+ * + + +
+ * | +----------+-----+ |
+ * | | rgn + | |
+ * +--------+----------+-----+----+
+ * ^ ^ ^
+ * | | |
+ * Aligned | max_addr
+ * address |
+ * boundary min_add
+ *
+ * Expect to drop the lower limit and allocate a memory region which
+ * ends at max_addr (if the address is aligned).
+ */
+static int alloc_nid_top_down_narrow_range_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_256;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SZ_512;
+ max_addr = min_addr + SMP_CACHE_BYTES;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, which can't fit into
+ * min_addr and max_addr range, with the latter being too close to the beginning
+ * of the available memory:
+ *
+ * +-------------+
+ * | new |
+ * +-------------+
+ * + +
+ * | + |
+ * | | |
+ * +-------+--------------+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_nid_low_max_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_1K;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = min_addr + SMP_CACHE_BYTES;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region within min_addr min_addr range,
+ * with min_addr being so close that it's next to an allocated region:
+ *
+ * + +
+ * | +--------+---------------|
+ * | | r1 | rgn |
+ * +-------+--------+---------------+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect a merge of both regions. Only the region size gets updated.
+ */
+static int alloc_nid_min_reserved_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_128;
+ phys_addr_t r2_size = SZ_64;
+ phys_addr_t total_size = r1_size + r2_size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t reserved_base;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ max_addr = memblock_end_of_DRAM();
+ min_addr = max_addr - r2_size;
+ reserved_base = min_addr - r1_size;
+
+ memblock_reserve_kern(reserved_base, r1_size);
+
+ allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, reserved_base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region within min_addr and max_addr,
+ * with max_addr being so close that it's next to an allocated region:
+ *
+ * + +
+ * | +-------------+--------|
+ * | | rgn | r1 |
+ * +----------+-------------+--------+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect a merge of regions. Only the region size gets updated.
+ */
+static int alloc_nid_max_reserved_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t r1_size = SZ_64;
+ phys_addr_t r2_size = SZ_128;
+ phys_addr_t total_size = r1_size + r2_size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ max_addr = memblock_end_of_DRAM() - r1_size;
+ min_addr = max_addr - r2_size;
+
+ memblock_reserve_kern(max_addr, r1_size);
+
+ allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, min_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap big enough to fit
+ * a new region:
+ *
+ * + +
+ * | +--------+ +-------+------+ |
+ * | | r2 | | rgn | r1 | |
+ * +----+--------+---+-------+------+--+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to merge the new region with r1. The second region does not get
+ * updated. The total size field gets updated.
+ */
+
+static int alloc_nid_top_down_reserved_with_space_check(void)
+{
+ struct memblock_region *rgn1 = &memblock.reserved.regions[1];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_64;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (r3_size + gap_size + r2.size);
+
+ total_size = r1.size + r2.size + r3_size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn1->size, r1.size + r3_size);
+ ASSERT_EQ(rgn1->base, max_addr - r3_size);
+
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap of a size equal to
+ * the size of the new region:
+ *
+ * + +
+ * | +--------+--------+--------+ |
+ * | | r2 | r3 | r1 | |
+ * +-----+--------+--------+--------+-----+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to merge all of the regions into one. The region counter and total
+ * size fields get updated.
+ */
+static int alloc_nid_reserved_full_merge_generic_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_64;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (r3_size + r2.size);
+
+ total_size = r1.size + r2.size + r3_size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r2.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap that can't fit
+ * a new region:
+ *
+ * + +
+ * | +----------+------+ +------+ |
+ * | | r3 | r2 | | r1 | |
+ * +--+----------+------+----+------+---+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect to merge the new region with r2. The second region does not get
+ * updated. The total size counter gets updated.
+ */
+static int alloc_nid_top_down_reserved_no_space_check(void)
+{
+ struct memblock_region *rgn1 = &memblock.reserved.regions[1];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_256;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (r2.size + gap_size);
+
+ total_size = r1.size + r2.size + r3_size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base - r3_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, but
+ * it's too narrow and everything else is reserved:
+ *
+ * +-----------+
+ * | new |
+ * +-----------+
+ * + +
+ * |--------------+ +----------|
+ * | r2 | | r1 |
+ * +--------------+------+----------+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect no allocation to happen.
+ */
+
+static int alloc_nid_reserved_all_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_256;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = MEM_SIZE - (r1.size + gap_size);
+ r2.base = memblock_start_of_DRAM();
+
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where max_addr is
+ * bigger than the end address of the available memory. Expect to allocate
+ * a region that ends before the end of the memory.
+ */
+static int alloc_nid_top_down_cap_max_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_256;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_end_of_DRAM() - SZ_1K;
+ max_addr = memblock_end_of_DRAM() + SZ_256;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where min_addr is
+ * smaller than the start address of the available memory. Expect to allocate
+ * a region that ends before the end of the memory.
+ */
+static int alloc_nid_top_down_cap_min_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_1K;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() - SZ_256;
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range:
+ *
+ * + +
+ * | +-----------+ | |
+ * | | rgn | | |
+ * +----+-----------+-----------+------+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to allocate a region that ends before max_addr.
+ */
+static int alloc_nid_bottom_up_simple_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_128;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t rgn_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
+ max_addr = min_addr + SZ_512;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+ rgn_end = rgn->base + rgn->size;
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_LT(rgn_end, max_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range, where the start address is misaligned:
+ *
+ * + +
+ * | + +-----------+ + |
+ * | | | rgn | | |
+ * +-----+---+-----------+-----+-----+
+ * ^ ^----. ^
+ * | | |
+ * min_add | max_addr
+ * |
+ * Aligned address
+ * boundary
+ *
+ * Expect to allocate an aligned region that ends before max_addr.
+ */
+static int alloc_nid_bottom_up_start_misaligned_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_128;
+ phys_addr_t misalign = SZ_2;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t rgn_end;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + misalign;
+ max_addr = min_addr + SZ_512;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+ rgn_end = rgn->base + rgn->size;
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
+ ASSERT_LT(rgn_end, max_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, which can't fit into min_addr
+ * and max_addr range:
+ *
+ * + +
+ * |---------+ + + |
+ * | rgn | | | |
+ * +---------+---------+----+------+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_add
+ *
+ * Expect to drop the lower limit and allocate a memory region which
+ * starts at the beginning of the available memory.
+ */
+static int alloc_nid_bottom_up_narrow_range_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_256;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SZ_512;
+ max_addr = min_addr + SMP_CACHE_BYTES;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap big enough to fit
+ * a new region:
+ *
+ * + +
+ * | +--------+-------+ +------+ |
+ * | | r2 | rgn | | r1 | |
+ * +----+--------+-------+---+------+--+
+ * ^ ^
+ * | |
+ * min_addr max_addr
+ *
+ * Expect to merge the new region with r2. The second region does not get
+ * updated. The total size field gets updated.
+ */
+
+static int alloc_nid_bottom_up_reserved_with_space_check(void)
+{
+ struct memblock_region *rgn1 = &memblock.reserved.regions[1];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_64;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (r3_size + gap_size + r2.size);
+
+ total_size = r1.size + r2.size + r3_size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve_kern(r1.base, r1.size);
+ memblock_reserve_kern(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, max_addr);
+
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap of a size equal to
+ * the size of the new region:
+ *
+ * + +
+ * |----------+ +------+ +----+ |
+ * | r3 | | r2 | | r1 | |
+ * +----------+----+------+---+----+--+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect to drop the lower limit and allocate memory at the beginning of the
+ * available memory. The region counter and total size fields get updated.
+ * Other regions are not modified.
+ */
+
+static int alloc_nid_bottom_up_reserved_no_space_check(void)
+{
+ struct memblock_region *rgn1 = &memblock.reserved.regions[2];
+ struct memblock_region *rgn2 = &memblock.reserved.regions[1];
+ struct memblock_region *rgn3 = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t r3_size = SZ_256;
+ phys_addr_t gap_size = SMP_CACHE_BYTES;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+ r1.size = SMP_CACHE_BYTES;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (r2.size + gap_size);
+
+ total_size = r1.size + r2.size + r3_size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn3->size, r3_size);
+ ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 3);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where max_addr is
+ * bigger than the end address of the available memory. Expect to allocate
+ * a region that starts at the min_addr.
+ */
+static int alloc_nid_bottom_up_cap_max_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_256;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM() + SZ_1K;
+ max_addr = memblock_end_of_DRAM() + SZ_256;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where min_addr is
+ * smaller than the start address of the available memory. Expect to allocate
+ * a region at the beginning of the available memory.
+ */
+static int alloc_nid_bottom_up_cap_min_check(void)
+{
+ struct memblock_region *rgn = &memblock.reserved.regions[0];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_1K;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_memblock();
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM() - SZ_256;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/* Test case wrappers for range tests */
+static int alloc_nid_simple_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_simple_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_simple_check();
+
+ return 0;
+}
+
+static int alloc_nid_misaligned_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_end_misaligned_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_start_misaligned_check();
+
+ return 0;
+}
+
+static int alloc_nid_narrow_range_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_narrow_range_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_narrow_range_check();
+
+ return 0;
+}
+
+static int alloc_nid_reserved_with_space_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_reserved_with_space_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_reserved_with_space_check();
+
+ return 0;
+}
+
+static int alloc_nid_reserved_no_space_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_reserved_no_space_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_reserved_no_space_check();
+
+ return 0;
+}
+
+static int alloc_nid_cap_max_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_cap_max_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_cap_max_check();
+
+ return 0;
+}
+
+static int alloc_nid_cap_min_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_cap_min_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_cap_min_check();
+
+ return 0;
+}
+
+static int alloc_nid_min_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_min_reserved_generic_check);
+ run_bottom_up(alloc_nid_min_reserved_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_max_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_max_reserved_generic_check);
+ run_bottom_up(alloc_nid_max_reserved_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_exact_address_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_exact_address_generic_check);
+ run_bottom_up(alloc_nid_exact_address_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_reserved_full_merge_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_reserved_full_merge_generic_check);
+ run_bottom_up(alloc_nid_reserved_full_merge_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_reserved_all_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_reserved_all_generic_check);
+ run_bottom_up(alloc_nid_reserved_all_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_low_max_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_low_max_generic_check);
+ run_bottom_up(alloc_nid_low_max_generic_check);
+
+ return 0;
+}
+
+static int memblock_alloc_nid_range_checks(void)
+{
+ test_print("Running %s range tests...\n",
+ get_memblock_alloc_nid_name(alloc_nid_test_flags));
+
+ alloc_nid_simple_check();
+ alloc_nid_misaligned_check();
+ alloc_nid_narrow_range_check();
+ alloc_nid_reserved_with_space_check();
+ alloc_nid_reserved_no_space_check();
+ alloc_nid_cap_max_check();
+ alloc_nid_cap_min_check();
+
+ alloc_nid_min_reserved_check();
+ alloc_nid_max_reserved_check();
+ alloc_nid_exact_address_check();
+ alloc_nid_reserved_full_merge_check();
+ alloc_nid_reserved_all_check();
+ alloc_nid_low_max_check();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * has enough memory to allocate a region of the requested size.
+ * Expect to allocate an aligned region at the end of the requested node.
+ */
+static int alloc_nid_top_down_numa_simple_check(void)
+{
+ int nid_req = 3;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * does not have enough memory to allocate a region of the requested size:
+ *
+ * | +-----+ +------------------+ |
+ * | | req | | expected | |
+ * +---+-----+----------+------------------+-----+
+ *
+ * | +---------+ |
+ * | | rgn | |
+ * +-----------------------------+---------+-----+
+ *
+ * Expect to allocate an aligned region at the end of the last node that has
+ * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_top_down_numa_small_node_check(void)
+{
+ int nid_req = 1;
+ int nid_exp = 6;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_2 * req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
+ ASSERT_LE(exp_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is fully reserved:
+ *
+ * | +---------+ +------------------+ |
+ * | |requested| | expected | |
+ * +--------------+---------+------------+------------------+-----+
+ *
+ * | +---------+ +---------+ |
+ * | | reserved| | new | |
+ * +--------------+---------+---------------------+---------+-----+
+ *
+ * Expect to allocate an aligned region at the end of the last node that is
+ * large enough and has enough unreserved memory (in this case, nid = 6) after
+ * falling back to NUMA_NO_NODE. The region count and total size get updated.
+ */
+static int alloc_nid_top_down_numa_node_reserved_check(void)
+{
+ int nid_req = 2;
+ int nid_exp = 6;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[1];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(req_node->base, req_node->size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
+ ASSERT_LE(exp_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved but has enough memory for the allocated region:
+ *
+ * | +---------------------------------------+ |
+ * | | requested | |
+ * +-----------+---------------------------------------+----------+
+ *
+ * | +------------------+ +-----+ |
+ * | | reserved | | new | |
+ * +-----------+------------------+--------------+-----+----------+
+ *
+ * Expect to allocate an aligned region at the end of the requested node. The
+ * region count and total size get updated.
+ */
+static int alloc_nid_top_down_numa_part_reserved_check(void)
+{
+ int nid_req = 4;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[1];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_8, req_node->size);
+ r1.base = req_node->base;
+ r1.size = req_node->size / SZ_2;
+ size = r1.size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved and does not have enough contiguous memory for the
+ * allocated region:
+ *
+ * | +-----------------------+ +----------------------|
+ * | | requested | | expected |
+ * +-----------+-----------------------+---------+----------------------+
+ *
+ * | +----------+ +-----------|
+ * | | reserved | | new |
+ * +-----------------+----------+---------------------------+-----------+
+ *
+ * Expect to allocate an aligned region at the end of the last node that is
+ * large enough and has enough unreserved memory (in this case,
+ * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
+ * and total size get updated.
+ */
+static int alloc_nid_top_down_numa_part_reserved_fallback_check(void)
+{
+ int nid_req = 4;
+ int nid_exp = NUMA_NODES - 1;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[1];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_2;
+ r1.base = req_node->base + (size / SZ_2);
+ r1.size = size;
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
+ ASSERT_LE(exp_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the first
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------------------+-----------+ |
+ * | | requested | node3 | |
+ * +-----------+-----------------------+-----------+--------------+
+ * + +
+ * | +-----------+ |
+ * | | rgn | |
+ * +-----------------------+-----------+--------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that ends at
+ * the end of the requested node.
+ */
+static int alloc_nid_top_down_numa_split_range_low_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t req_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ req_node_end = region_end(req_node);
+ min_addr = req_node_end - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node_end - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the second
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +--------------------------+---------+ |
+ * | | expected |requested| |
+ * +------+--------------------------+---------+----------------+
+ * + +
+ * | +---------+ |
+ * | | rgn | |
+ * +-----------------------+---------+--------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that
+ * ends at the end of the first node that overlaps with the range.
+ */
+static int alloc_nid_top_down_numa_split_range_high_check(void)
+{
+ int nid_req = 3;
+ int nid_exp = nid_req - 1;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t exp_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ exp_node_end = region_end(exp_node);
+ min_addr = exp_node_end - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, exp_node_end - size);
+ ASSERT_LE(exp_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the requested
+ * node ends before min_addr:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +---------------+ +-------------+---------+ |
+ * | | requested | | node1 | node2 | |
+ * +----+---------------+--------+-------------+---------+----------+
+ * + +
+ * | +---------+ |
+ * | | rgn | |
+ * +----------+---------+-------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that ends at
+ * the end of the requested node.
+ */
+static int alloc_nid_top_down_numa_no_overlap_split_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *node2 = &memblock.memory.regions[6];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_512;
+ min_addr = node2->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
+ ASSERT_LE(req_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node ends
+ * before min_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * |-----------+ +----------+----...----+----------+ |
+ * | requested | | min node | ... | max node | |
+ * +-----------+-----------+----------+----...----+----------+------+
+ * + +
+ * | +-----+ |
+ * | | rgn | |
+ * +---------------------------------------------------+-----+------+
+ *
+ * Expect to allocate a memory region at the end of the final node in
+ * the range after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_top_down_numa_no_overlap_low_check(void)
+{
+ int nid_req = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, max_addr - size);
+ ASSERT_LE(max_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node starts
+ * after max_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +----------+----...----+----------+ +-----------+ |
+ * | | min node | ... | max node | | requested | |
+ * +-----+----------+----...----+----------+--------+-----------+---+
+ * + +
+ * | +-----+ |
+ * | | rgn | |
+ * +---------------------------------+-----+------------------------+
+ *
+ * Expect to allocate a memory region at the end of the final node in
+ * the range after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_top_down_numa_no_overlap_high_check(void)
+{
+ int nid_req = 7;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, max_addr - size);
+ ASSERT_LE(max_node->base, new_rgn->base);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * has enough memory to allocate a region of the requested size.
+ * Expect to allocate an aligned region at the beginning of the requested node.
+ */
+static int alloc_nid_bottom_up_numa_simple_check(void)
+{
+ int nid_req = 3;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * does not have enough memory to allocate a region of the requested size:
+ *
+ * |----------------------+-----+ |
+ * | expected | req | |
+ * +----------------------+-----+----------------+
+ *
+ * |---------+ |
+ * | rgn | |
+ * +---------+-----------------------------------+
+ *
+ * Expect to allocate an aligned region at the beginning of the first node that
+ * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_bottom_up_numa_small_node_check(void)
+{
+ int nid_req = 1;
+ int nid_exp = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_2 * req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, exp_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(exp_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is fully reserved:
+ *
+ * |----------------------+ +-----------+ |
+ * | expected | | requested | |
+ * +----------------------+-----+-----------+--------------------+
+ *
+ * |-----------+ +-----------+ |
+ * | new | | reserved | |
+ * +-----------+----------------+-----------+--------------------+
+ *
+ * Expect to allocate an aligned region at the beginning of the first node that
+ * is large enough and has enough unreserved memory (in this case, nid = 0)
+ * after falling back to NUMA_NO_NODE. The region count and total size get
+ * updated.
+ */
+static int alloc_nid_bottom_up_numa_node_reserved_check(void)
+{
+ int nid_req = 2;
+ int nid_exp = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = req_node->size;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(req_node->base, req_node->size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, exp_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(exp_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved but has enough memory for the allocated region:
+ *
+ * | +---------------------------------------+ |
+ * | | requested | |
+ * +-----------+---------------------------------------+---------+
+ *
+ * | +------------------+-----+ |
+ * | | reserved | new | |
+ * +-----------+------------------+-----+------------------------+
+ *
+ * Expect to allocate an aligned region in the requested node that merges with
+ * the existing reserved region. The total size gets updated.
+ */
+static int alloc_nid_bottom_up_numa_part_reserved_check(void)
+{
+ int nid_req = 4;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t total_size;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_8, req_node->size);
+ r1.base = req_node->base;
+ r1.size = req_node->size / SZ_2;
+ size = r1.size / SZ_4;
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+ total_size = size + r1.size;
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, total_size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * is partially reserved and does not have enough contiguous memory for the
+ * allocated region:
+ *
+ * |----------------------+ +-----------------------+ |
+ * | expected | | requested | |
+ * +----------------------+-------+-----------------------+---------+
+ *
+ * |-----------+ +----------+ |
+ * | new | | reserved | |
+ * +-----------+------------------------+----------+----------------+
+ *
+ * Expect to allocate an aligned region at the beginning of the first
+ * node that is large enough and has enough unreserved memory (in this case,
+ * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
+ * get updated.
+ */
+static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
+{
+ int nid_req = 4;
+ int nid_exp = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ struct region r1;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ ASSERT_LE(SZ_4, req_node->size);
+ size = req_node->size / SZ_2;
+ r1.base = req_node->base + (size / SZ_2);
+ r1.size = size;
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ memblock_reserve(r1.base, r1.size);
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, exp_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(exp_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the first
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------------------+-----------+ |
+ * | | requested | node3 | |
+ * +-----------+-----------------------+-----------+--------------+
+ * + +
+ * | +-----------+ |
+ * | | rgn | |
+ * +-----------+-----------+--------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region at the beginning
+ * of the requested node.
+ */
+static int alloc_nid_bottom_up_numa_split_range_low_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t req_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ req_node_end = region_end(req_node);
+ min_addr = req_node_end - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), req_node_end);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the second
+ * node is the requested node:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * |------------------+ +----------------------+---------+ |
+ * | expected | | previous |requested| |
+ * +------------------+--------+----------------------+---------+------+
+ * + +
+ * |---------+ |
+ * | rgn | |
+ * +---------+---------------------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region at the beginning
+ * of the first node that has enough memory.
+ */
+static int alloc_nid_bottom_up_numa_split_range_high_check(void)
+{
+ int nid_req = 3;
+ int nid_exp = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_512;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+ phys_addr_t exp_node_end;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ exp_node_end = region_end(req_node);
+ min_addr = req_node->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, exp_node->base);
+ ASSERT_LE(region_end(new_rgn), exp_node_end);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region that spans over the min_addr
+ * and max_addr range and overlaps with two different nodes, where the requested
+ * node ends before min_addr:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +---------------+ +-------------+---------+ |
+ * | | requested | | node1 | node2 | |
+ * +----+---------------+--------+-------------+---------+---------+
+ * + +
+ * | +---------+ |
+ * | | rgn | |
+ * +----+---------+------------------------------------------------+
+ *
+ * Expect to drop the lower limit and allocate a memory region that starts at
+ * the beginning of the requested node.
+ */
+static int alloc_nid_bottom_up_numa_no_overlap_split_check(void)
+{
+ int nid_req = 2;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *node2 = &memblock.memory.regions[6];
+ void *allocated_ptr = NULL;
+ phys_addr_t size;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ size = SZ_512;
+ min_addr = node2->base - SZ_256;
+ max_addr = min_addr + size;
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(new_rgn), region_end(req_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node ends
+ * before min_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * |-----------+ +----------+----...----+----------+ |
+ * | requested | | min node | ... | max node | |
+ * +-----------+-----------+----------+----...----+----------+------+
+ * + +
+ * | +-----+ |
+ * | | rgn | |
+ * +-----------------------+-----+----------------------------------+
+ *
+ * Expect to allocate a memory region at the beginning of the first node
+ * in the range after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_bottom_up_numa_no_overlap_low_check(void)
+{
+ int nid_req = 0;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, min_addr);
+ ASSERT_LE(region_end(new_rgn), region_end(min_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range when
+ * the requested node and the range do not overlap, and requested node starts
+ * after max_addr. The range overlaps with multiple nodes along node
+ * boundaries:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +----------+----...----+----------+ +---------+ |
+ * | | min node | ... | max node | |requested| |
+ * +-----+----------+----...----+----------+---------+---------+---+
+ * + +
+ * | +-----+ |
+ * | | rgn | |
+ * +-----+-----+---------------------------------------------------+
+ *
+ * Expect to allocate a memory region at the beginning of the first node
+ * in the range after falling back to NUMA_NO_NODE.
+ */
+static int alloc_nid_bottom_up_numa_no_overlap_high_check(void)
+{
+ int nid_req = 7;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *min_node = &memblock.memory.regions[2];
+ struct memblock_region *max_node = &memblock.memory.regions[5];
+ void *allocated_ptr = NULL;
+ phys_addr_t size = SZ_64;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = min_node->base;
+ max_addr = region_end(max_node);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, size);
+ ASSERT_EQ(new_rgn->base, min_addr);
+ ASSERT_LE(region_end(new_rgn), region_end(min_node));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region in a specific NUMA node that
+ * does not have enough memory to allocate a region of the requested size.
+ * Additionally, none of the nodes have enough memory to allocate the region:
+ *
+ * +-----------------------------------+
+ * | new |
+ * +-----------------------------------+
+ * |-------+-------+-------+-------+-------+-------+-------+-------|
+ * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_nid_numa_large_region_generic_check(void)
+{
+ int nid_req = 3;
+ void *allocated_ptr = NULL;
+ phys_addr_t size = MEM_SIZE / SZ_2;
+ phys_addr_t min_addr;
+ phys_addr_t max_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ min_addr = memblock_start_of_DRAM();
+ max_addr = memblock_end_of_DRAM();
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_addr range when
+ * there are two reserved regions at the borders. The requested node starts at
+ * min_addr and ends at max_addr and is the same size as the region to be
+ * allocated:
+ *
+ * min_addr
+ * | max_addr
+ * | |
+ * v v
+ * | +-----------+-----------------------+-----------------------|
+ * | | node5 | requested | node7 |
+ * +------+-----------+-----------------------+-----------------------+
+ * + +
+ * | +----+-----------------------+----+ |
+ * | | r2 | new | r1 | |
+ * +-------------+----+-----------------------+----+------------------+
+ *
+ * Expect to merge all of the regions into one. The region counter and total
+ * size fields get updated.
+ */
+static int alloc_nid_numa_reserved_full_merge_generic_check(void)
+{
+ int nid_req = 6;
+ int nid_next = nid_req + 1;
+ struct memblock_region *new_rgn = &memblock.reserved.regions[0];
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+ struct memblock_region *next_node = &memblock.memory.regions[nid_next];
+ void *allocated_ptr = NULL;
+ struct region r1, r2;
+ phys_addr_t size = req_node->size;
+ phys_addr_t total_size;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ r1.base = next_node->base;
+ r1.size = SZ_128;
+
+ r2.size = SZ_128;
+ r2.base = r1.base - (size + r2.size);
+
+ total_size = r1.size + r2.size + size;
+ min_addr = r2.base + r2.size;
+ max_addr = r1.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+ assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
+
+ ASSERT_EQ(new_rgn->size, total_size);
+ ASSERT_EQ(new_rgn->base, r2.base);
+
+ ASSERT_LE(new_rgn->base, req_node->base);
+ ASSERT_LE(region_end(req_node), region_end(new_rgn));
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range,
+ * where the total range can fit the region, but it is split between two nodes
+ * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
+ * instead of requesting a specific node:
+ *
+ * +-----------+
+ * | new |
+ * +-----------+
+ * | +---------------------+-----------|
+ * | | prev node | next node |
+ * +------+---------------------+-----------+
+ * + +
+ * |----------------------+ +-----|
+ * | r1 | | r2 |
+ * +----------------------+-----------+-----+
+ * ^ ^
+ * | |
+ * | max_addr
+ * |
+ * min_addr
+ *
+ * Expect no allocation to happen.
+ */
+static int alloc_nid_numa_split_all_reserved_generic_check(void)
+{
+ void *allocated_ptr = NULL;
+ struct memblock_region *next_node = &memblock.memory.regions[7];
+ struct region r1, r2;
+ phys_addr_t size = SZ_256;
+ phys_addr_t max_addr;
+ phys_addr_t min_addr;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ r2.base = next_node->base + SZ_128;
+ r2.size = memblock_end_of_DRAM() - r2.base;
+
+ r1.size = MEM_SIZE - (r2.size + size);
+ r1.base = memblock_start_of_DRAM();
+
+ min_addr = r1.base + r1.size;
+ max_addr = r2.base;
+
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
+ min_addr, max_addr,
+ NUMA_NO_NODE);
+
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region through the
+ * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the
+ * correct NUMA node set for the new region.
+ */
+static int alloc_node_on_correct_nid(void)
+{
+ int nid_req = 2;
+ void *allocated_ptr = NULL;
+#ifdef CONFIG_NUMA
+ struct memblock_region *req_node = &memblock.memory.regions[nid_req];
+#endif
+ phys_addr_t size = SZ_512;
+
+ PREFIX_PUSH();
+ setup_numa_memblock(node_fractions);
+
+ allocated_ptr = memblock_alloc_node(size, SMP_CACHE_BYTES, nid_req);
+
+ ASSERT_NE(allocated_ptr, NULL);
+#ifdef CONFIG_NUMA
+ ASSERT_EQ(nid_req, req_node->nid);
+#endif
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/* Test case wrappers for NUMA tests */
+static int alloc_nid_numa_simple_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_simple_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_simple_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_small_node_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_small_node_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_small_node_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_node_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_node_reserved_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_node_reserved_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_part_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_part_reserved_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_part_reserved_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_part_reserved_fallback_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_part_reserved_fallback_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_part_reserved_fallback_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_split_range_low_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_split_range_low_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_split_range_low_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_split_range_high_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_split_range_high_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_split_range_high_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_no_overlap_split_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_no_overlap_split_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_no_overlap_split_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_no_overlap_low_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_no_overlap_low_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_no_overlap_low_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_no_overlap_high_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ memblock_set_bottom_up(false);
+ alloc_nid_top_down_numa_no_overlap_high_check();
+ memblock_set_bottom_up(true);
+ alloc_nid_bottom_up_numa_no_overlap_high_check();
+
+ return 0;
+}
+
+static int alloc_nid_numa_large_region_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_numa_large_region_generic_check);
+ run_bottom_up(alloc_nid_numa_large_region_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_numa_reserved_full_merge_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_numa_reserved_full_merge_generic_check);
+ run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check);
+
+ return 0;
+}
+
+static int alloc_nid_numa_split_all_reserved_check(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_nid_numa_split_all_reserved_generic_check);
+ run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check);
+
+ return 0;
+}
+
+static int alloc_node_numa_on_correct_nid(void)
+{
+ test_print("\tRunning %s...\n", __func__);
+ run_top_down(alloc_node_on_correct_nid);
+ run_bottom_up(alloc_node_on_correct_nid);
+
+ return 0;
+}
+
+int __memblock_alloc_nid_numa_checks(void)
+{
+ test_print("Running %s NUMA tests...\n",
+ get_memblock_alloc_nid_name(alloc_nid_test_flags));
+
+ alloc_nid_numa_simple_check();
+ alloc_nid_numa_small_node_check();
+ alloc_nid_numa_node_reserved_check();
+ alloc_nid_numa_part_reserved_check();
+ alloc_nid_numa_part_reserved_fallback_check();
+ alloc_nid_numa_split_range_low_check();
+ alloc_nid_numa_split_range_high_check();
+
+ alloc_nid_numa_no_overlap_split_check();
+ alloc_nid_numa_no_overlap_low_check();
+ alloc_nid_numa_no_overlap_high_check();
+ alloc_nid_numa_large_region_check();
+ alloc_nid_numa_reserved_full_merge_check();
+ alloc_nid_numa_split_all_reserved_check();
+
+ alloc_node_numa_on_correct_nid();
+
+ return 0;
+}
+
+static int memblock_alloc_nid_checks_internal(int flags)
+{
+ alloc_nid_test_flags = flags;
+
+ prefix_reset();
+ prefix_push(get_memblock_alloc_nid_name(flags));
+
+ reset_memblock_attributes();
+ dummy_physical_memory_init();
+
+ memblock_alloc_nid_range_checks();
+ memblock_alloc_nid_numa_checks();
+
+ dummy_physical_memory_cleanup();
+
+ prefix_pop();
+
+ return 0;
+}
+
+int memblock_alloc_nid_checks(void)
+{
+ memblock_alloc_nid_checks_internal(TEST_F_NONE);
+ memblock_alloc_nid_checks_internal(TEST_F_RAW);
+
+ return 0;
+}
+
+int memblock_alloc_exact_nid_range_checks(void)
+{
+ alloc_nid_test_flags = (TEST_F_RAW | TEST_F_EXACT);
+
+ memblock_alloc_nid_range_checks();
+
+ return 0;
+}
diff --git a/tools/testing/memblock/tests/alloc_nid_api.h b/tools/testing/memblock/tests/alloc_nid_api.h
new file mode 100644
index 000000000000..2b8cabacacb8
--- /dev/null
+++ b/tools/testing/memblock/tests/alloc_nid_api.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_ALLOC_NID_H
+#define _MEMBLOCK_ALLOC_NID_H
+
+#include "common.h"
+
+int memblock_alloc_nid_checks(void);
+int memblock_alloc_exact_nid_range_checks(void);
+int __memblock_alloc_nid_numa_checks(void);
+
+#ifdef CONFIG_NUMA
+static inline int memblock_alloc_nid_numa_checks(void)
+{
+ __memblock_alloc_nid_numa_checks();
+ return 0;
+}
+
+#else
+static inline int memblock_alloc_nid_numa_checks(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
new file mode 100644
index 000000000000..01e836fba488
--- /dev/null
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -0,0 +1,2551 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "basic_api.h"
+#include <string.h>
+#include <linux/memblock.h>
+
+#define EXPECTED_MEMBLOCK_REGIONS 128
+#define FUNC_ADD "memblock_add"
+#define FUNC_RESERVE "memblock_reserve"
+#define FUNC_REMOVE "memblock_remove"
+#define FUNC_FREE "memblock_free"
+#define FUNC_TRIM "memblock_trim_memory"
+
+static int memblock_initialization_check(void)
+{
+ PREFIX_PUSH();
+
+ ASSERT_NE(memblock.memory.regions, NULL);
+ ASSERT_EQ(memblock.memory.cnt, 0);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
+
+ ASSERT_NE(memblock.reserved.regions, NULL);
+ ASSERT_EQ(memblock.reserved.cnt, 0);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
+
+ ASSERT_EQ(memblock.bottom_up, false);
+ ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that adds a memory block of a specified base address
+ * and size to the collection of available memory regions (memblock.memory).
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
+ */
+static int memblock_add_simple_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r = {
+ .base = SZ_1G,
+ .size = SZ_4M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that adds a memory block of a specified base address, size,
+ * NUMA node and memory flags to the collection of available memory regions.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
+ */
+static int memblock_add_node_simple_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r = {
+ .base = SZ_1M,
+ .size = SZ_16M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+#ifdef CONFIG_NUMA
+ ASSERT_EQ(rgn->nid, 1);
+#endif
+ ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to add two memory blocks that don't overlap with one
+ * another:
+ *
+ * | +--------+ +--------+ |
+ * | | r1 | | r2 | |
+ * +--------+--------+--------+--------+--+
+ *
+ * Expect to add two correctly initialized entries to the collection of
+ * available memory regions (memblock.memory). The total size and
+ * region counter fields get updated.
+ */
+static int memblock_add_disjoint_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+
+ rgn1 = &memblock.memory.regions[0];
+ rgn2 = &memblock.memory.regions[1];
+
+ struct region r1 = {
+ .base = SZ_1G,
+ .size = SZ_8K
+ };
+ struct region r2 = {
+ .base = SZ_1G + SZ_16K,
+ .size = SZ_8K
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the beginning of r1 (that is r1.base < r2.base + r2.size):
+ *
+ * | +----+----+------------+ |
+ * | | |r2 | r1 | |
+ * +----+----+----+------------+----------+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge the two entries into one region that starts at r2.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
+ */
+static int memblock_add_overlap_top_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_512M,
+ .size = SZ_1G
+ };
+ struct region r2 = {
+ .base = SZ_256M,
+ .size = SZ_512M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = (r1.base - r2.base) + r1.size;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the end of r1 (that is r2.base < r1.base + r1.size):
+ *
+ * | +--+------+----------+ |
+ * | | | r1 | r2 | |
+ * +--+--+------+----------+--------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge the two entries into one region that starts at r1.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
+ */
+static int memblock_add_overlap_bottom_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_128M,
+ .size = SZ_512M
+ };
+ struct region r2 = {
+ .base = SZ_256M,
+ .size = SZ_1G
+ };
+
+ PREFIX_PUSH();
+
+ total_size = (r2.base - r1.base) + r2.size;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to add two memory blocks r1 and r2, where r2 is
+ * within the range of r1 (that is r1.base < r2.base &&
+ * r2.base + r2.size < r1.base + r1.size):
+ *
+ * | +-------+--+-----------------------+
+ * | | |r2| r1 |
+ * +---+-------+--+-----------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same.
+ * The counter and total size of available memory are not updated.
+ */
+static int memblock_add_within_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_8M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_16M,
+ .size = SZ_1M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to add the same memory block twice. Expect
+ * the counter and total size of available memory to not be updated.
+ */
+static int memblock_add_twice_check(void)
+{
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+
+ memblock_add(r.base, r.size);
+ memblock_add(r.base, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to add two memory blocks that don't overlap with one
+ * another and then add a third memory block in the space between the first two:
+ *
+ * | +--------+--------+--------+ |
+ * | | r1 | r3 | r2 | |
+ * +--------+--------+--------+--------+--+
+ *
+ * Expect to merge the three entries into one region that starts at r1.base
+ * and has size of r1.size + r2.size + r3.size. The region counter and total
+ * size of the available memory are updated.
+ */
+static int memblock_add_between_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_1G,
+ .size = SZ_8K
+ };
+ struct region r2 = {
+ .base = SZ_1G + SZ_16K,
+ .size = SZ_8K
+ };
+ struct region r3 = {
+ .base = SZ_1G + SZ_8K,
+ .size = SZ_8K
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r1.size + r2.size + r3.size;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_add(r3.base, r3.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to add a memory block r when r extends past
+ * PHYS_ADDR_MAX:
+ *
+ * +--------+
+ * | r |
+ * +--------+
+ * | +----+
+ * | | rgn|
+ * +----------------------------+----+
+ *
+ * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the
+ * total size of available memory and the counter to be updated.
+ */
+static int memblock_add_near_max_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r = {
+ .base = PHYS_ADDR_MAX - SZ_1M,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = PHYS_ADDR_MAX - r.base;
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that trying to add the 129th memory block.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as
+ * memory.regions.
+ */
+static int memblock_add_many_check(void)
+{
+ int i;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_memory_regions_size;
+ phys_addr_t base, size = SZ_64;
+ phys_addr_t gap_size = SZ_64;
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ dummy_physical_memory_init();
+ /*
+ * We allocated enough memory by using dummy_physical_memory_init(), and
+ * split it into small block. First we split a large enough memory block
+ * as the memory region which will be choosed by memblock_double_array().
+ */
+ base = PAGE_ALIGN(dummy_physical_memory_base());
+ new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 *
+ sizeof(struct memblock_region));
+ memblock_add(base, new_memory_regions_size);
+
+ /* This is the base of small memory block. */
+ base += new_memory_regions_size + gap_size;
+
+ orig_region = memblock.memory.regions;
+
+ for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
+ /*
+ * Add these small block to fulfill the memblock. We keep a
+ * gap between the nearby memory to avoid being merged.
+ */
+ memblock_add(base, size);
+ base += size + gap_size;
+
+ ASSERT_EQ(memblock.memory.cnt, i + 2);
+ ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size +
+ (i + 1) * size);
+ }
+
+ /*
+ * At there, memblock_double_array() has been succeed, check if it
+ * update the memory.max.
+ */
+ ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /* memblock_double_array() will reserve the memory it used. Check it. */
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_add() still works as normal.
+ */
+ memblock_add(r.base, r.size);
+ ASSERT_EQ(memblock.memory.regions[0].base, r.base);
+ ASSERT_EQ(memblock.memory.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size +
+ new_memory_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current memory.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.memory.regions = orig_region;
+ memblock.memory.cnt = INIT_MEMBLOCK_REGIONS;
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_add_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_ADD);
+ test_print("Running %s tests...\n", FUNC_ADD);
+
+ memblock_add_simple_check();
+ memblock_add_node_simple_check();
+ memblock_add_disjoint_check();
+ memblock_add_overlap_top_check();
+ memblock_add_overlap_bottom_check();
+ memblock_add_within_check();
+ memblock_add_twice_check();
+ memblock_add_between_check();
+ memblock_add_near_max_check();
+ memblock_add_many_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that marks a memory block of a specified base address
+ * and size as reserved and to the collection of reserved memory regions
+ * (memblock.reserved). Expect to create a new entry. The region counter
+ * and total memory size are updated.
+ */
+static int memblock_reserve_simple_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r = {
+ .base = SZ_2G,
+ .size = SZ_128M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r.base, r.size);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to mark two memory blocks that don't overlap as reserved:
+ *
+ * | +--+ +----------------+ |
+ * | |r1| | r2 | |
+ * +--------+--+------+----------------+--+
+ *
+ * Expect to add two entries to the collection of reserved memory regions
+ * (memblock.reserved). The total size and region counter for
+ * memblock.reserved are updated.
+ */
+static int memblock_reserve_disjoint_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+
+ rgn1 = &memblock.reserved.regions[0];
+ rgn2 = &memblock.reserved.regions[1];
+
+ struct region r1 = {
+ .base = SZ_256M,
+ .size = SZ_16M
+ };
+ struct region r2 = {
+ .base = SZ_512M,
+ .size = SZ_512M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the beginning of r1 (that is
+ * r1.base < r2.base + r2.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r2 | | r1 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge two entries into one region that starts at r2.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
+ */
+static int memblock_reserve_overlap_top_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_1G,
+ .size = SZ_1G
+ };
+ struct region r2 = {
+ .base = SZ_128M,
+ .size = SZ_1G
+ };
+
+ PREFIX_PUSH();
+
+ total_size = (r1.base - r2.base) + r1.size;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the end of r1 (that is
+ * r2.base < r1.base + r1.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r1 | | r2 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that starts at r1.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
+ */
+static int memblock_reserve_overlap_bottom_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2K,
+ .size = SZ_128K
+ };
+ struct region r2 = {
+ .base = SZ_128K,
+ .size = SZ_128K
+ };
+
+ PREFIX_PUSH();
+
+ total_size = (r2.base - r1.base) + r2.size;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 is within the range of r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * | +-----+--+---------------------------|
+ * | | |r2| r1 |
+ * +-+-----+--+---------------------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same. The
+ * counter and total size of available memory are not updated.
+ */
+static int memblock_reserve_within_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_1M,
+ .size = SZ_8M
+ };
+ struct region r2 = {
+ .base = SZ_2M,
+ .size = SZ_64K
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to reserve the same memory block twice.
+ * Expect the region counter and total size of reserved memory to not
+ * be updated.
+ */
+static int memblock_reserve_twice_check(void)
+{
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+
+ memblock_reserve(r.base, r.size);
+ memblock_reserve(r.base, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to mark two memory blocks that don't overlap as reserved
+ * and then reserve a third memory block in the space between the first two:
+ *
+ * | +--------+--------+--------+ |
+ * | | r1 | r3 | r2 | |
+ * +--------+--------+--------+--------+--+
+ *
+ * Expect to merge the three entries into one reserved region that starts at
+ * r1.base and has size of r1.size + r2.size + r3.size. The region counter and
+ * total for memblock.reserved are updated.
+ */
+static int memblock_reserve_between_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_1G,
+ .size = SZ_8K
+ };
+ struct region r2 = {
+ .base = SZ_1G + SZ_16K,
+ .size = SZ_8K
+ };
+ struct region r3 = {
+ .base = SZ_1G + SZ_8K,
+ .size = SZ_8K
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r1.size + r2.size + r3.size;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+ memblock_reserve(r3.base, r3.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to reserve a memory block r when r extends past
+ * PHYS_ADDR_MAX:
+ *
+ * +--------+
+ * | r |
+ * +--------+
+ * | +----+
+ * | | rgn|
+ * +----------------------------+----+
+ *
+ * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the
+ * total size of reserved memory and the counter to be updated.
+ */
+static int memblock_reserve_near_max_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r = {
+ .base = PHYS_ADDR_MAX - SZ_1M,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = PHYS_ADDR_MAX - r.base;
+
+ reset_memblock_regions();
+ memblock_reserve(r.base, r.size);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that trying to reserve the 129th memory block.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as
+ * reserved.regions.
+ */
+static int memblock_reserve_many_check(void)
+{
+ int i;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t memory_base = SZ_128K;
+ phys_addr_t new_reserved_regions_size;
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ /* Add a valid memory region used by double_array(). */
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+
+ for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) {
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(memory_base, MEM_SIZE);
+
+ ASSERT_EQ(memblock.reserved.cnt, i + 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
+
+ /* Keep the gap so these memory region will not be merged. */
+ memory_base += MEM_SIZE * 2;
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(memory_base, MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+
+ test_pass_pop();
+
+ return 0;
+}
+
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions.
+ *
+ * 0 1 2 128
+ * +-------+ +-------+ +-------+ +-------+
+ * | 32K | | 32K | | 32K | ... | 32K |
+ * +-------+-------+-------+-------+-------+ +-------+
+ * |<-32K->| |<-32K->|
+ *
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_all_locations_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ /* Add a valid memory region used by double_array(). */
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+
+ for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE(i), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i + 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE(skip), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that trying to reserve the 129th memory block at all locations.
+ * Expect to trigger memblock_double_array() to double the
+ * memblock.memory.max, find a new valid memory as reserved.regions. And make
+ * sure it doesn't conflict with the range we want to reserve.
+ *
+ * For example, we have 128 regions in reserved and now want to reserve
+ * the skipped one. Since reserved is full, memblock_double_array() would find
+ * an available range in memory for the new array. We intended to put two
+ * ranges in memory with one is the exact range of the skipped one. Before
+ * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling
+ * reserved array"), the new array would sits in the skipped range which is a
+ * conflict. The expected new array should be allocated from memory.regions[0].
+ *
+ * 0 1
+ * memory +-------+ +-------+
+ * | 32K | | 32K |
+ * +-------+ ------+-------+-------+-------+
+ * |<-32K->|<-32K->|<-32K->|
+ *
+ * 0 skipped 127
+ * reserved +-------+ ......... +-------+
+ * | 32K | . 32K . ... | 32K |
+ * +-------+-------+-------+ +-------+
+ * |<-32K->|
+ * ^
+ * |
+ * |
+ * skipped one
+ */
+/* Keep the gap so these memory region will not be merged. */
+#define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx))
+static int memblock_reserve_many_may_conflict_check(void)
+{
+ int i, skip;
+ void *orig_region;
+ struct region r = {
+ .base = SZ_16K,
+ .size = SZ_16K,
+ };
+ phys_addr_t new_reserved_regions_size;
+
+ /*
+ * 0 1 129
+ * +---+ +---+ +---+
+ * |32K| |32K| .. |32K|
+ * +---+ +---+ +---+
+ *
+ * Pre-allocate the range for 129 memory block + one range for double
+ * memblock.reserved.regions at idx 0.
+ */
+ dummy_physical_memory_init();
+ phys_addr_t memory_base = dummy_physical_memory_base();
+ phys_addr_t offset = PAGE_ALIGN(memory_base);
+
+ PREFIX_PUSH();
+
+ /* Reserve the 129th memory block for all possible positions*/
+ for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) {
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ reset_memblock_attributes();
+ /* Add a valid memory region used by double_array(). */
+ memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE);
+ /*
+ * Add a memory region which will be reserved as 129th memory
+ * region. This is not expected to be used by double_array().
+ */
+ memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) {
+ if (i == skip)
+ continue;
+
+ /* Reserve some fakes memory region to fulfill the memblock. */
+ memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE);
+
+ if (i < skip) {
+ ASSERT_EQ(memblock.reserved.cnt, i);
+ ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE);
+ } else {
+ ASSERT_EQ(memblock.reserved.cnt, i - 1);
+ ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE);
+ }
+ }
+
+ orig_region = memblock.reserved.regions;
+
+ /* This reserve the 129 memory_region, and makes it double array. */
+ memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE);
+
+ /*
+ * This is the memory region size used by the doubled reserved.regions,
+ * and it has been reserved due to it has been used. The size is used to
+ * calculate the total_size that the memblock.reserved have now.
+ */
+ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) *
+ sizeof(struct memblock_region));
+ /*
+ * The double_array() will find a free memory region as the new
+ * reserved.regions, and the used memory region will be reserved, so
+ * there will be one more region exist in the reserved memblock. And the
+ * one more reserved region's size is new_reserved_regions_size.
+ */
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The first reserved region is allocated for double array
+ * with the size of new_reserved_regions_size and the base to be
+ * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size
+ */
+ ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size,
+ MEMORY_BASE_OFFSET(0, offset) + SZ_32K);
+ ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size);
+
+ /*
+ * Now memblock_double_array() works fine. Let's check after the
+ * double_array(), the memblock_reserve() still works as normal.
+ */
+ memblock_reserve(r.base, r.size);
+ ASSERT_EQ(memblock.reserved.regions[0].base, r.base);
+ ASSERT_EQ(memblock.reserved.regions[0].size, r.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3);
+ ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE +
+ new_reserved_regions_size +
+ r.size);
+ ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2);
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+ }
+
+ dummy_physical_memory_cleanup();
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_reserve_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_RESERVE);
+ test_print("Running %s tests...\n", FUNC_RESERVE);
+
+ memblock_reserve_simple_check();
+ memblock_reserve_disjoint_check();
+ memblock_reserve_overlap_top_check();
+ memblock_reserve_overlap_bottom_check();
+ memblock_reserve_within_check();
+ memblock_reserve_twice_check();
+ memblock_reserve_between_check();
+ memblock_reserve_near_max_check();
+ memblock_reserve_many_check();
+ memblock_reserve_all_locations_check();
+ memblock_reserve_many_may_conflict_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to remove a region r1 from the array of
+ * available memory regions. By "removing" a region we mean overwriting it
+ * with the next region r2 in memblock.memory:
+ *
+ * | ...... +----------------+ |
+ * | : r1 : | r2 | |
+ * +--+----+----------+----------------+--+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to add two memory blocks r1 and r2 and then remove r1 so that
+ * r2 is the first available region. The region counter and total size
+ * are updated.
+ */
+static int memblock_remove_simple_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2K,
+ .size = SZ_4K
+ };
+ struct region r2 = {
+ .base = SZ_128K,
+ .size = SZ_4M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_remove(r1.base, r1.size);
+
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r2.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to remove a region r2 that was not registered as
+ * available memory (i.e. has no corresponding entry in memblock.memory):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect the array, regions counter and total size to not be modified.
+ */
+static int memblock_remove_absent_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_512K,
+ .size = SZ_4M
+ };
+ struct region r2 = {
+ .base = SZ_64M,
+ .size = SZ_1G
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to remove a region r2 that overlaps with the
+ * beginning of the already existing entry r1
+ * (that is r1.base < r2.base + r2.size):
+ *
+ * +-----------------+
+ * | r2 |
+ * +-----------------+
+ * | .........+--------+ |
+ * | : r1 | rgn | |
+ * +-----------------+--------+--------+--+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
+ */
+static int memblock_remove_overlap_top_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t r1_end, r2_end, total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_32M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_16M,
+ .size = SZ_32M
+ };
+
+ PREFIX_PUSH();
+
+ r1_end = r1.base + r1.size;
+ r2_end = r2.base + r2.size;
+ total_size = r1_end - r2_end;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base + r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to remove a region r2 that overlaps with the end of
+ * the already existing region r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +--------------------------------+
+ * | r2 |
+ * +--------------------------------+
+ * | +---+..... |
+ * | |rgn| r1 : |
+ * +-+---+----+---------------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
+ */
+static int memblock_remove_overlap_bottom_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2M,
+ .size = SZ_64M
+ };
+ struct region r2 = {
+ .base = SZ_32M,
+ .size = SZ_256M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r2.base - r1.base;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to remove a region r2 that is within the range of
+ * the already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +-------------+....+---------------+ |
+ * | | rgn1 | r1 | rgn2 | |
+ * +-+-------------+----+---------------+-+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size are updated.
+ */
+static int memblock_remove_within_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ phys_addr_t r1_size, r2_size, total_size;
+
+ rgn1 = &memblock.memory.regions[0];
+ rgn2 = &memblock.memory.regions[1];
+
+ struct region r1 = {
+ .base = SZ_1M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_16M,
+ .size = SZ_1M
+ };
+
+ PREFIX_PUSH();
+
+ r1_size = r2.base - r1.base;
+ r2_size = (r1.base + r1.size) - (r2.base + r2.size);
+ total_size = r1_size + r2_size;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r2.base, r2.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
+
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to remove a region r1 from the array of
+ * available memory regions when r1 is the only available region.
+ * Expect to add a memory block r1 and then remove r1 so that a dummy
+ * region is added. The region counter stays the same, and the total size
+ * is updated.
+ */
+static int memblock_remove_only_region_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2K,
+ .size = SZ_4K
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r1.base, r1.size);
+
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(rgn->size, 0);
+
+ ASSERT_EQ(memblock.memory.cnt, 0);
+ ASSERT_EQ(memblock.memory.total_size, 0);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries remove a region r2 from the array of available
+ * memory regions when r2 extends past PHYS_ADDR_MAX:
+ *
+ * +--------+
+ * | r2 |
+ * +--------+
+ * | +---+....+
+ * | |rgn| |
+ * +------------------------+---+----+
+ *
+ * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed.
+ * Expect the total size of available memory to be updated and the counter to
+ * not be updated.
+ */
+static int memblock_remove_near_max_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = PHYS_ADDR_MAX - SZ_2M,
+ .size = SZ_2M
+ };
+
+ struct region r2 = {
+ .base = PHYS_ADDR_MAX - SZ_1M,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_remove(r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to remove a region r3 that overlaps with two existing
+ * regions r1 and r2:
+ *
+ * +----------------+
+ * | r3 |
+ * +----------------+
+ * | +----+..... ........+--------+
+ * | | |r1 : : |r2 | |
+ * +----+----+----+---+-------+--------+-----+
+ *
+ * Expect that only the intersections of r1 with r3 and r2 with r3 are removed
+ * from the available memory pool. Expect the total size of available memory to
+ * be updated and the counter to not be updated.
+ */
+static int memblock_remove_overlap_two_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
+
+ rgn1 = &memblock.memory.regions[0];
+ rgn2 = &memblock.memory.regions[1];
+
+ struct region r1 = {
+ .base = SZ_16M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_64M,
+ .size = SZ_64M
+ };
+ struct region r3 = {
+ .base = SZ_32M,
+ .size = SZ_64M
+ };
+
+ PREFIX_PUSH();
+
+ r2_end = r2.base + r2.size;
+ r3_end = r3.base + r3.size;
+ new_r1_size = r3.base - r1.base;
+ new_r2_size = r2_end - r3_end;
+ total_size = new_r1_size + new_r2_size;
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_remove(r3.base, r3.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, new_r1_size);
+
+ ASSERT_EQ(rgn2->base, r3_end);
+ ASSERT_EQ(rgn2->size, new_r2_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_remove_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_REMOVE);
+ test_print("Running %s tests...\n", FUNC_REMOVE);
+
+ memblock_remove_simple_check();
+ memblock_remove_absent_check();
+ memblock_remove_overlap_top_check();
+ memblock_remove_overlap_bottom_check();
+ memblock_remove_within_check();
+ memblock_remove_only_region_check();
+ memblock_remove_near_max_check();
+ memblock_remove_overlap_two_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to free a memory block r1 that was marked
+ * earlier as reserved. By "freeing" a region we mean overwriting it with
+ * the next entry r2 in memblock.reserved:
+ *
+ * | ...... +----+ |
+ * | : r1 : | r2 | |
+ * +--------------+----+-----------+----+-+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to reserve two memory regions and then erase r1 region with the
+ * value of r2. The region counter and total size are updated.
+ */
+static int memblock_free_simple_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_4M,
+ .size = SZ_1M
+ };
+ struct region r2 = {
+ .base = SZ_8M,
+ .size = SZ_1M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+ memblock_free((void *)r1.base, r1.size);
+
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r2.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to free a region r2 that was not marked as reserved
+ * (i.e. has no corresponding entry in memblock.reserved):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * The array, regions counter and total size are not modified.
+ */
+static int memblock_free_absent_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2M,
+ .size = SZ_8K
+ };
+ struct region r2 = {
+ .base = SZ_16M,
+ .size = SZ_128M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to free a region r2 that overlaps with the beginning
+ * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | ...+--------------+ |
+ * | : | r1 | |
+ * +----+--+--------------+---------------+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
+ */
+static int memblock_free_overlap_top_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_8M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_1M,
+ .size = SZ_8M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = (r1.size + r1.base) - (r2.base + r2.size);
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r2.base + r2.size);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to free a region r2 that overlaps with the end of
+ * the already existing entry r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +-----------+..... |
+ * | | r1 | : |
+ * +----+-----------+----+----------------+
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
+ */
+static int memblock_free_overlap_bottom_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_8M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_32M,
+ .size = SZ_32M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r2.base - r1.base;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to free a region r2 that is within the range of the
+ * already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +------------+....+---------------+
+ * | | rgn1 | r1 | rgn2 |
+ * +----+------------+----+---------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size fields are updated.
+ */
+static int memblock_free_within_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ phys_addr_t r1_size, r2_size, total_size;
+
+ rgn1 = &memblock.reserved.regions[0];
+ rgn2 = &memblock.reserved.regions[1];
+
+ struct region r1 = {
+ .base = SZ_1M,
+ .size = SZ_8M
+ };
+ struct region r2 = {
+ .base = SZ_4M,
+ .size = SZ_1M
+ };
+
+ PREFIX_PUSH();
+
+ r1_size = r2.base - r1.base;
+ r2_size = (r1.base + r1.size) - (r2.base + r2.size);
+ total_size = r1_size + r2_size;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r2.base, r2.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
+
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries to free a memory block r1 that was marked
+ * earlier as reserved when r1 is the only available region.
+ * Expect to reserve a memory block r1 and then free r1 so that r1 is
+ * overwritten with a dummy region. The region counter stays the same,
+ * and the total size is updated.
+ */
+static int memblock_free_only_region_check(void)
+{
+ struct memblock_region *rgn;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = SZ_2K,
+ .size = SZ_4K
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r1.base, r1.size);
+
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(rgn->size, 0);
+
+ ASSERT_EQ(memblock.reserved.cnt, 0);
+ ASSERT_EQ(memblock.reserved.total_size, 0);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX:
+ *
+ * +--------+
+ * | r2 |
+ * +--------+
+ * | +---+....+
+ * | |rgn| |
+ * +------------------------+---+----+
+ *
+ * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed.
+ * Expect the total size of reserved memory to be updated and the counter to
+ * not be updated.
+ */
+static int memblock_free_near_max_check(void)
+{
+ struct memblock_region *rgn;
+ phys_addr_t total_size;
+
+ rgn = &memblock.reserved.regions[0];
+
+ struct region r1 = {
+ .base = PHYS_ADDR_MAX - SZ_2M,
+ .size = SZ_2M
+ };
+
+ struct region r2 = {
+ .base = PHYS_ADDR_MAX - SZ_1M,
+ .size = SZ_2M
+ };
+
+ PREFIX_PUSH();
+
+ total_size = r1.size - (PHYS_ADDR_MAX - r2.base);
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_free((void *)r2.base, r2.size);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to free a reserved region r3 that overlaps with two
+ * existing reserved regions r1 and r2:
+ *
+ * +----------------+
+ * | r3 |
+ * +----------------+
+ * | +----+..... ........+--------+
+ * | | |r1 : : |r2 | |
+ * +----+----+----+---+-------+--------+-----+
+ *
+ * Expect that only the intersections of r1 with r3 and r2 with r3 are freed
+ * from the collection of reserved memory. Expect the total size of reserved
+ * memory to be updated and the counter to not be updated.
+ */
+static int memblock_free_overlap_two_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size;
+
+ rgn1 = &memblock.reserved.regions[0];
+ rgn2 = &memblock.reserved.regions[1];
+
+ struct region r1 = {
+ .base = SZ_16M,
+ .size = SZ_32M
+ };
+ struct region r2 = {
+ .base = SZ_64M,
+ .size = SZ_64M
+ };
+ struct region r3 = {
+ .base = SZ_32M,
+ .size = SZ_64M
+ };
+
+ PREFIX_PUSH();
+
+ r2_end = r2.base + r2.size;
+ r3_end = r3.base + r3.size;
+ new_r1_size = r3.base - r1.base;
+ new_r2_size = r2_end - r3_end;
+ total_size = new_r1_size + new_r2_size;
+
+ reset_memblock_regions();
+ memblock_reserve(r1.base, r1.size);
+ memblock_reserve(r2.base, r2.size);
+ memblock_free((void *)r3.base, r3.size);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, new_r1_size);
+
+ ASSERT_EQ(rgn2->base, r3_end);
+ ASSERT_EQ(rgn2->size, new_r2_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_free_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_FREE);
+ test_print("Running %s tests...\n", FUNC_FREE);
+
+ memblock_free_simple_check();
+ memblock_free_absent_check();
+ memblock_free_overlap_top_check();
+ memblock_free_overlap_bottom_check();
+ memblock_free_within_check();
+ memblock_free_only_region_check();
+ memblock_free_near_max_check();
+ memblock_free_overlap_two_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+static int memblock_set_bottom_up_check(void)
+{
+ prefix_push("memblock_set_bottom_up");
+
+ memblock_set_bottom_up(false);
+ ASSERT_EQ(memblock.bottom_up, false);
+ memblock_set_bottom_up(true);
+ ASSERT_EQ(memblock.bottom_up, true);
+
+ reset_memblock_attributes();
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_bottom_up_check(void)
+{
+ prefix_push("memblock_bottom_up");
+
+ memblock_set_bottom_up(false);
+ ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
+ ASSERT_EQ(memblock_bottom_up(), false);
+ memblock_set_bottom_up(true);
+ ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up);
+ ASSERT_EQ(memblock_bottom_up(), true);
+
+ reset_memblock_attributes();
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_bottom_up_checks(void)
+{
+ test_print("Running memblock_*bottom_up tests...\n");
+
+ prefix_reset();
+ memblock_set_bottom_up_check();
+ prefix_reset();
+ memblock_bottom_up_check();
+
+ return 0;
+}
+
+/*
+ * A test that tries to trim memory when both ends of the memory region are
+ * aligned. Expect that the memory will not be trimmed. Expect the counter to
+ * not be updated.
+ */
+static int memblock_trim_memory_aligned_check(void)
+{
+ struct memblock_region *rgn;
+ const phys_addr_t alignment = SMP_CACHE_BYTES;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r = {
+ .base = alignment,
+ .size = alignment * 4
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+ memblock_trim_memory(alignment);
+
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to trim memory when there are two available regions, r1 and
+ * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end
+ * and smaller than the alignment:
+ *
+ * alignment
+ * |--------|
+ * | +-----------------+ +------+ |
+ * | | r1 | | r2 | |
+ * +--------+-----------------+--------+------+---+
+ * ^ ^ ^ ^ ^
+ * |________|________|________| |
+ * | Unaligned address
+ * Aligned addresses
+ *
+ * Expect that r1 will not be trimmed and r2 will be removed. Expect the
+ * counter to be updated.
+ */
+static int memblock_trim_memory_too_small_check(void)
+{
+ struct memblock_region *rgn;
+ const phys_addr_t alignment = SMP_CACHE_BYTES;
+
+ rgn = &memblock.memory.regions[0];
+
+ struct region r1 = {
+ .base = alignment,
+ .size = alignment * 2
+ };
+ struct region r2 = {
+ .base = alignment * 4,
+ .size = alignment - SZ_2
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_trim_memory(alignment);
+
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to trim memory when there are two available regions, r1 and
+ * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base
+ * and aligned at the end:
+ *
+ * Unaligned address
+ * |
+ * v
+ * | +-----------------+ +---------------+ |
+ * | | r1 | | r2 | |
+ * +--------+-----------------+----------+---------------+---+
+ * ^ ^ ^ ^ ^ ^
+ * |________|________|________|________|________|
+ * |
+ * Aligned addresses
+ *
+ * Expect that r1 will not be trimmed and r2 will be trimmed at the base.
+ * Expect the counter to not be updated.
+ */
+static int memblock_trim_memory_unaligned_base_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ const phys_addr_t alignment = SMP_CACHE_BYTES;
+ phys_addr_t offset = SZ_2;
+ phys_addr_t new_r2_base, new_r2_size;
+
+ rgn1 = &memblock.memory.regions[0];
+ rgn2 = &memblock.memory.regions[1];
+
+ struct region r1 = {
+ .base = alignment,
+ .size = alignment * 2
+ };
+ struct region r2 = {
+ .base = alignment * 4 + offset,
+ .size = alignment * 2 - offset
+ };
+
+ PREFIX_PUSH();
+
+ new_r2_base = r2.base + (alignment - offset);
+ new_r2_size = r2.size - (alignment - offset);
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_trim_memory(alignment);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, new_r2_base);
+ ASSERT_EQ(rgn2->size, new_r2_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+/*
+ * A test that tries to trim memory when there are two available regions, r1 and
+ * r2. Region r1 is aligned on both ends and region r2 is aligned at the base
+ * and unaligned at the end:
+ *
+ * Unaligned address
+ * |
+ * v
+ * | +-----------------+ +---------------+ |
+ * | | r1 | | r2 | |
+ * +--------+-----------------+--------+---------------+---+
+ * ^ ^ ^ ^ ^ ^
+ * |________|________|________|________|________|
+ * |
+ * Aligned addresses
+ *
+ * Expect that r1 will not be trimmed and r2 will be trimmed at the end.
+ * Expect the counter to not be updated.
+ */
+static int memblock_trim_memory_unaligned_end_check(void)
+{
+ struct memblock_region *rgn1, *rgn2;
+ const phys_addr_t alignment = SMP_CACHE_BYTES;
+ phys_addr_t offset = SZ_2;
+ phys_addr_t new_r2_size;
+
+ rgn1 = &memblock.memory.regions[0];
+ rgn2 = &memblock.memory.regions[1];
+
+ struct region r1 = {
+ .base = alignment,
+ .size = alignment * 2
+ };
+ struct region r2 = {
+ .base = alignment * 4,
+ .size = alignment * 2 - offset
+ };
+
+ PREFIX_PUSH();
+
+ new_r2_size = r2.size - (alignment - offset);
+
+ reset_memblock_regions();
+ memblock_add(r1.base, r1.size);
+ memblock_add(r2.base, r2.size);
+ memblock_trim_memory(alignment);
+
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, new_r2_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_trim_memory_checks(void)
+{
+ prefix_reset();
+ prefix_push(FUNC_TRIM);
+ test_print("Running %s tests...\n", FUNC_TRIM);
+
+ memblock_trim_memory_aligned_check();
+ memblock_trim_memory_too_small_check();
+ memblock_trim_memory_unaligned_base_check();
+ memblock_trim_memory_unaligned_end_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+static int memblock_overlaps_region_check(void)
+{
+ struct region r = {
+ .base = SZ_1G,
+ .size = SZ_4M
+ };
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_add(r.base, r.size);
+
+ /* Far Away */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M));
+
+ /* Neighbor */
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M));
+ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M));
+
+ /* Partial Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M));
+
+ /* Totally Overlap */
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M));
+ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M));
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_overlaps_region_checks(void)
+{
+ prefix_reset();
+ prefix_push("memblock_overlaps_region");
+ test_print("Running memblock_overlaps_region tests...\n");
+
+ memblock_overlaps_region_check();
+
+ prefix_pop();
+
+ return 0;
+}
+
+#ifdef CONFIG_NUMA
+static int memblock_set_node_check(void)
+{
+ unsigned long i, max_reserved;
+ struct memblock_region *rgn;
+ void *orig_region;
+
+ PREFIX_PUSH();
+
+ reset_memblock_regions();
+ memblock_allow_resize();
+
+ dummy_physical_memory_init();
+ memblock_add(dummy_physical_memory_base(), MEM_SIZE);
+ orig_region = memblock.reserved.regions;
+
+ /* Equally Split range to node 0 and 1*/
+ memblock_set_node(memblock_start_of_DRAM(),
+ memblock_phys_mem_size() / 2, &memblock.memory, 0);
+ memblock_set_node(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2,
+ memblock_phys_mem_size() / 2, &memblock.memory, 1);
+
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ rgn = &memblock.memory.regions[0];
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
+ ASSERT_EQ(memblock_get_region_node(rgn), 0);
+ rgn = &memblock.memory.regions[1];
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM() + memblock_phys_mem_size() / 2);
+ ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2);
+ ASSERT_EQ(memblock_get_region_node(rgn), 1);
+
+ /* Reserve 126 regions with the last one across node boundary */
+ for (i = 0; i < 125; i++)
+ memblock_reserve(memblock_start_of_DRAM() + SZ_16 * i, SZ_8);
+
+ memblock_reserve(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2 - SZ_8,
+ SZ_16);
+
+ /*
+ * Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()")
+ * do following process to set nid to each memblock.reserved region.
+ * But it may miss some region if memblock_set_node() double the
+ * array.
+ *
+ * By checking 'max', we make sure all region nid is set properly.
+ */
+repeat:
+ max_reserved = memblock.reserved.max;
+ for_each_mem_region(rgn) {
+ int nid = memblock_get_region_node(rgn);
+
+ memblock_set_node(rgn->base, rgn->size, &memblock.reserved, nid);
+ }
+ if (max_reserved != memblock.reserved.max)
+ goto repeat;
+
+ /* Confirm each region has valid node set */
+ for_each_reserved_mem_region(rgn) {
+ ASSERT_TRUE(numa_valid_node(memblock_get_region_node(rgn)));
+ if (rgn == (memblock.reserved.regions + memblock.reserved.cnt - 1))
+ ASSERT_EQ(1, memblock_get_region_node(rgn));
+ else
+ ASSERT_EQ(0, memblock_get_region_node(rgn));
+ }
+
+ dummy_physical_memory_cleanup();
+
+ /*
+ * The current reserved.regions is occupying a range of memory that
+ * allocated from dummy_physical_memory_init(). After free the memory,
+ * we must not use it. So restore the origin memory region to make sure
+ * the tests can run as normal and not affected by the double array.
+ */
+ memblock.reserved.regions = orig_region;
+ memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS;
+
+ test_pass_pop();
+
+ return 0;
+}
+
+static int memblock_set_node_checks(void)
+{
+ prefix_reset();
+ prefix_push("memblock_set_node");
+ test_print("Running memblock_set_node tests...\n");
+
+ memblock_set_node_check();
+
+ prefix_pop();
+
+ return 0;
+}
+#else
+static int memblock_set_node_checks(void)
+{
+ return 0;
+}
+#endif
+
+int memblock_basic_checks(void)
+{
+ memblock_initialization_check();
+ memblock_add_checks();
+ memblock_reserve_checks();
+ memblock_remove_checks();
+ memblock_free_checks();
+ memblock_bottom_up_checks();
+ memblock_trim_memory_checks();
+ memblock_overlaps_region_checks();
+ memblock_set_node_checks();
+
+ return 0;
+}
diff --git a/tools/testing/memblock/tests/basic_api.h b/tools/testing/memblock/tests/basic_api.h
new file mode 100644
index 000000000000..1873faa54754
--- /dev/null
+++ b/tools/testing/memblock/tests/basic_api.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_BASIC_H
+#define _MEMBLOCK_BASIC_H
+
+#include "common.h"
+
+int memblock_basic_checks(void);
+
+#endif
diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c
new file mode 100644
index 000000000000..3250c8e5124b
--- /dev/null
+++ b/tools/testing/memblock/tests/common.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "tests/common.h"
+#include <string.h>
+#include <getopt.h>
+#include <linux/memory_hotplug.h>
+#include <linux/build_bug.h>
+
+#define PREFIXES_MAX 15
+#define DELIM ": "
+#define BASIS 10000
+
+static struct test_memory memory_block;
+static const char __maybe_unused *prefixes[PREFIXES_MAX];
+static int __maybe_unused nr_prefixes;
+
+static const char *short_opts = "hmv";
+static const struct option long_opts[] = {
+ {"help", 0, NULL, 'h'},
+ {"movable-node", 0, NULL, 'm'},
+ {"verbose", 0, NULL, 'v'},
+ {NULL, 0, NULL, 0}
+};
+
+static const char * const help_opts[] = {
+ "display this help message and exit",
+ "disallow allocations from regions marked as hotplugged\n\t\t\t"
+ "by simulating enabling the \"movable_node\" kernel\n\t\t\t"
+ "parameter",
+ "enable verbose output, which includes the name of the\n\t\t\t"
+ "memblock function being tested, the name of the test,\n\t\t\t"
+ "and whether the test passed or failed."
+};
+
+static int verbose;
+
+/* sets global variable returned by movable_node_is_enabled() stub */
+bool movable_node_enabled;
+
+void reset_memblock_regions(void)
+{
+ memset(memblock.memory.regions, 0,
+ memblock.memory.cnt * sizeof(struct memblock_region));
+ memblock.memory.cnt = 0;
+ memblock.memory.max = INIT_MEMBLOCK_REGIONS;
+ memblock.memory.total_size = 0;
+
+ memset(memblock.reserved.regions, 0,
+ memblock.reserved.cnt * sizeof(struct memblock_region));
+ memblock.reserved.cnt = 0;
+ memblock.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS;
+ memblock.reserved.total_size = 0;
+}
+
+void reset_memblock_attributes(void)
+{
+ memblock.memory.name = "memory";
+ memblock.reserved.name = "reserved";
+ memblock.bottom_up = false;
+ memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
+}
+
+static inline void fill_memblock(void)
+{
+ memset(memory_block.base, 1, PHYS_MEM_SIZE);
+}
+
+void setup_memblock(void)
+{
+ reset_memblock_regions();
+ memblock_add((phys_addr_t)memory_block.base, MEM_SIZE);
+ fill_memblock();
+}
+
+/**
+ * setup_numa_memblock:
+ * Set up a memory layout with multiple NUMA nodes in a previously allocated
+ * dummy physical memory.
+ * @node_fracs: an array representing the fraction of MEM_SIZE contained in
+ * each node in basis point units (one hundredth of 1% or 1/10000).
+ * For example, if node 0 should contain 1/8 of MEM_SIZE,
+ * node_fracs[0] = 1250.
+ *
+ * The nids will be set to 0 through NUMA_NODES - 1.
+ */
+void setup_numa_memblock(const unsigned int node_fracs[])
+{
+ phys_addr_t base;
+ int flags;
+
+ reset_memblock_regions();
+ base = (phys_addr_t)memory_block.base;
+ flags = (movable_node_is_enabled()) ? MEMBLOCK_NONE : MEMBLOCK_HOTPLUG;
+
+ for (int i = 0; i < NUMA_NODES; i++) {
+ assert(node_fracs[i] <= BASIS);
+ phys_addr_t size = MEM_SIZE * node_fracs[i] / BASIS;
+
+ memblock_add_node(base, size, i, flags);
+ base += size;
+ }
+ fill_memblock();
+}
+
+void dummy_physical_memory_init(void)
+{
+ memory_block.base = malloc(PHYS_MEM_SIZE);
+ assert(memory_block.base);
+ fill_memblock();
+}
+
+void dummy_physical_memory_cleanup(void)
+{
+ free(memory_block.base);
+}
+
+phys_addr_t dummy_physical_memory_base(void)
+{
+ return (phys_addr_t)memory_block.base;
+}
+
+static void usage(const char *prog)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(help_opts) != ARRAY_SIZE(long_opts) - 1);
+
+ printf("Usage: %s [-%s]\n", prog, short_opts);
+
+ for (int i = 0; long_opts[i].name; i++) {
+ printf(" -%c, --%-12s\t%s\n", long_opts[i].val,
+ long_opts[i].name, help_opts[i]);
+ }
+
+ exit(1);
+}
+
+void parse_args(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt_long_only(argc, argv, short_opts, long_opts,
+ NULL)) != -1) {
+ switch (c) {
+ case 'm':
+ movable_node_enabled = true;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+}
+
+void print_prefixes(const char *postfix)
+{
+ for (int i = 0; i < nr_prefixes; i++)
+ test_print("%s%s", prefixes[i], DELIM);
+ test_print(postfix);
+}
+
+void test_fail(void)
+{
+ if (verbose) {
+ ksft_test_result_fail(": ");
+ print_prefixes("failed\n");
+ }
+}
+
+void test_pass(void)
+{
+ if (verbose) {
+ ksft_test_result_pass(": ");
+ print_prefixes("passed\n");
+ }
+}
+
+void test_print(const char *fmt, ...)
+{
+ if (verbose) {
+ int saved_errno = errno;
+ va_list args;
+
+ va_start(args, fmt);
+ errno = saved_errno;
+ vprintf(fmt, args);
+ va_end(args);
+ }
+}
+
+void prefix_reset(void)
+{
+ memset(prefixes, 0, PREFIXES_MAX * sizeof(char *));
+ nr_prefixes = 0;
+}
+
+void prefix_push(const char *prefix)
+{
+ assert(nr_prefixes < PREFIXES_MAX);
+ prefixes[nr_prefixes] = prefix;
+ nr_prefixes++;
+}
+
+void prefix_pop(void)
+{
+ if (nr_prefixes > 0) {
+ prefixes[nr_prefixes - 1] = 0;
+ nr_prefixes--;
+ }
+}
diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
new file mode 100644
index 000000000000..e1138e06c903
--- /dev/null
+++ b/tools/testing/memblock/tests/common.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _MEMBLOCK_TEST_H
+#define _MEMBLOCK_TEST_H
+
+#include <stdlib.h>
+#include <assert.h>
+#include <linux/types.h>
+#include <linux/seq_file.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include <linux/printk.h>
+#include <../selftests/kselftest.h>
+
+#define MEM_SIZE SZ_32K
+#define PHYS_MEM_SIZE SZ_16M
+#define NUMA_NODES 8
+
+#define INIT_MEMBLOCK_REGIONS 128
+#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
+
+enum test_flags {
+ /* No special request. */
+ TEST_F_NONE = 0x0,
+ /* Perform raw allocations (no zeroing of memory). */
+ TEST_F_RAW = 0x1,
+ /* Perform allocations on the exact node specified. */
+ TEST_F_EXACT = 0x2
+};
+
+/**
+ * ASSERT_EQ():
+ * Check the condition
+ * @_expected == @_seen
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_EQ(_expected, _seen) do { \
+ if ((_expected) != (_seen)) \
+ test_fail(); \
+ assert((_expected) == (_seen)); \
+} while (0)
+
+#define ASSERT_TRUE(_seen) ASSERT_EQ(true, _seen)
+#define ASSERT_FALSE(_seen) ASSERT_EQ(false, _seen)
+
+/**
+ * ASSERT_NE():
+ * Check the condition
+ * @_expected != @_seen
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_NE(_expected, _seen) do { \
+ if ((_expected) == (_seen)) \
+ test_fail(); \
+ assert((_expected) != (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_LT():
+ * Check the condition
+ * @_expected < @_seen
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_LT(_expected, _seen) do { \
+ if ((_expected) >= (_seen)) \
+ test_fail(); \
+ assert((_expected) < (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_LE():
+ * Check the condition
+ * @_expected <= @_seen
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_LE(_expected, _seen) do { \
+ if ((_expected) > (_seen)) \
+ test_fail(); \
+ assert((_expected) <= (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_MEM_EQ():
+ * Check that the first @_size bytes of @_seen are all equal to @_expected.
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_MEM_EQ(_seen, _expected, _size) do { \
+ for (int _i = 0; _i < (_size); _i++) { \
+ ASSERT_EQ(((char *)_seen)[_i], (_expected)); \
+ } \
+} while (0)
+
+/**
+ * ASSERT_MEM_NE():
+ * Check that none of the first @_size bytes of @_seen are equal to @_expected.
+ * If false, print failed test message (if running with --verbose) and then
+ * assert.
+ */
+#define ASSERT_MEM_NE(_seen, _expected, _size) do { \
+ for (int _i = 0; _i < (_size); _i++) { \
+ ASSERT_NE(((char *)_seen)[_i], (_expected)); \
+ } \
+} while (0)
+
+#define PREFIX_PUSH() prefix_push(__func__)
+
+/*
+ * Available memory registered with memblock needs to be valid for allocs
+ * test to run. This is a convenience wrapper for memory allocated in
+ * dummy_physical_memory_init() that is later registered with memblock
+ * in setup_memblock().
+ */
+struct test_memory {
+ void *base;
+};
+
+struct region {
+ phys_addr_t base;
+ phys_addr_t size;
+};
+
+static inline phys_addr_t __maybe_unused region_end(struct memblock_region *rgn)
+{
+ return rgn->base + rgn->size;
+}
+
+void reset_memblock_regions(void);
+void reset_memblock_attributes(void);
+void setup_memblock(void);
+void setup_numa_memblock(const unsigned int node_fracs[]);
+void dummy_physical_memory_init(void);
+void dummy_physical_memory_cleanup(void);
+phys_addr_t dummy_physical_memory_base(void);
+void parse_args(int argc, char **argv);
+
+void test_fail(void);
+void test_pass(void);
+void test_print(const char *fmt, ...);
+void prefix_reset(void);
+void prefix_push(const char *prefix);
+void prefix_pop(void);
+
+static inline void test_pass_pop(void)
+{
+ test_pass();
+ prefix_pop();
+}
+
+static inline void run_top_down(int (*func)())
+{
+ memblock_set_bottom_up(false);
+ prefix_push("top-down");
+ func();
+ prefix_pop();
+}
+
+static inline void run_bottom_up(int (*func)())
+{
+ memblock_set_bottom_up(true);
+ prefix_push("bottom-up");
+ func();
+ prefix_pop();
+}
+
+static inline void assert_mem_content(void *mem, int size, int flags)
+{
+ if (flags & TEST_F_RAW)
+ ASSERT_MEM_NE(mem, 0, size);
+ else
+ ASSERT_MEM_EQ(mem, 0, size);
+}
+
+#endif
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 47f9cc9dcd94..91a3627f301a 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -27,7 +27,6 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
-obj-$(CONFIG_ND_BLK) += nd_blk.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_ACPI_NFIT) += nfit.o
ifeq ($(CONFIG_DAX),m)
@@ -35,8 +34,6 @@ obj-$(CONFIG_DAX) += dax.o
endif
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
-obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
nfit-y := $(ACPI_SRC)/core.o
nfit-y += $(ACPI_SRC)/intel.o
@@ -52,9 +49,6 @@ nd_pmem-y += config_check.o
nd_btt-y := $(NVDIMM_SRC)/btt.o
nd_btt-y += config_check.o
-nd_blk-y := $(NVDIMM_SRC)/blk.o
-nd_blk-y += config_check.o
-
nd_e820-y := $(NVDIMM_SRC)/e820.o
nd_e820-y += config_check.o
@@ -67,12 +61,8 @@ device_dax-y += dax-dev.o
device_dax-y += device_dax_test.o
device_dax-y += config_check.o
-dax_pmem-y := $(DAX_SRC)/pmem/pmem.o
+dax_pmem-y := $(DAX_SRC)/pmem.o
dax_pmem-y += dax_pmem_test.o
-dax_pmem_core-y := $(DAX_SRC)/pmem/core.o
-dax_pmem_core-y += dax_pmem_core_test.o
-dax_pmem_compat-y := $(DAX_SRC)/pmem/compat.o
-dax_pmem_compat-y += dax_pmem_compat_test.o
dax_pmem-y += config_check.o
libnvdimm-y := $(NVDIMM_SRC)/core.o
@@ -89,8 +79,9 @@ libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
-libnvdimm-y += dimm_devs.o
libnvdimm-y += libnvdimm_test.o
libnvdimm-y += config_check.o
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
obj-m += test/
diff --git a/tools/testing/nvdimm/config_check.c b/tools/testing/nvdimm/config_check.c
index cac891028cd1..baed75e2ccbc 100644
--- a/tools/testing/nvdimm/config_check.c
+++ b/tools/testing/nvdimm/config_check.c
@@ -11,8 +11,8 @@ void check(void)
BUILD_BUG_ON(!IS_MODULE(CONFIG_BLK_DEV_PMEM));
BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_PFN));
- BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
- BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
+ if (IS_ENABLED(CONFIG_ACPI_NFIT))
+ BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX_PMEM));
}
diff --git a/tools/testing/nvdimm/dax_pmem_compat_test.c b/tools/testing/nvdimm/dax_pmem_compat_test.c
deleted file mode 100644
index 7cd1877f3765..000000000000
--- a/tools/testing/nvdimm/dax_pmem_compat_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2019 Intel Corporation. All rights reserved.
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include "watermark.h"
-
-nfit_test_watermark(dax_pmem_compat);
diff --git a/tools/testing/nvdimm/dax_pmem_core_test.c b/tools/testing/nvdimm/dax_pmem_core_test.c
deleted file mode 100644
index a4249cdbeec1..000000000000
--- a/tools/testing/nvdimm/dax_pmem_core_test.c
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2019 Intel Corporation. All rights reserved.
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include "watermark.h"
-
-nfit_test_watermark(dax_pmem_core);
diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c
deleted file mode 100644
index 57bd27dedf1f..000000000000
--- a/tools/testing/nvdimm/dimm_devs.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright Intel Corp. 2018 */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/nd.h>
-#include "pmem.h"
-#include "pfn.h"
-#include "nd.h"
-#include "nd-core.h"
-
-ssize_t security_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvdimm *nvdimm = to_nvdimm(dev);
-
- /*
- * For the test version we need to poll the "hardware" in order
- * to get the updated status for unlock testing.
- */
- nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
-
- if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
- return sprintf(buf, "disabled\n");
- if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
- return sprintf(buf, "unlocked\n");
- if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
- return sprintf(buf, "locked\n");
- return -ENOTTY;
-}
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index af19c85558e7..05e763aab104 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -4,11 +4,13 @@
*/
#include "test/nfit_test.h"
#include <linux/blkdev.h>
+#include <linux/dax.h>
#include <pmem.h>
#include <nd.h>
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ unsigned long *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
@@ -27,7 +29,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
if (pfn)
- *pfn = page_to_pfn_t(page);
+ *pfn = page_to_pfn(page);
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
__func__, pmem, pgoff, page_to_pfn(page));
@@ -37,7 +39,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ *pfn = PHYS_PFN(pmem->phys_addr + offset);
/*
* If badblocks are present, limit known good range to the
diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild
index 75baebf8f4ba..003d48f5f24f 100644
--- a/tools/testing/nvdimm/test/Kbuild
+++ b/tools/testing/nvdimm/test/Kbuild
@@ -5,5 +5,10 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
obj-m += nfit_test.o
obj-m += nfit_test_iomap.o
-nfit_test-y := nfit.o
+ifeq ($(CONFIG_ACPI_NFIT),m)
+ nfit_test-y := nfit.o
+ obj-m += ndtest.o
+else
+ nfit_test-y := ndtest.o
+endif
nfit_test_iomap-y := iomap.o
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c62d372d426f..f7e7bfe9bb85 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -8,7 +8,6 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/pfn_t.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -62,16 +61,14 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource)
}
EXPORT_SYMBOL(get_nfit_res);
-void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
- void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
-{
- struct nfit_test_resource *nfit_res = get_nfit_res(offset);
-
- if (nfit_res)
- return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res.start;
- return fallback_fn(offset, size);
-}
+#define __nfit_test_ioremap(offset, size, fallback_fn) ({ \
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
+ nfit_res ? \
+ (void __iomem *) nfit_res->buf + (offset) \
+ - nfit_res->res.start \
+ : \
+ fallback_fn((offset), (size)) ; \
+})
void __iomem *__wrap_devm_ioremap(struct device *dev,
resource_size_t offset, unsigned long size)
@@ -100,25 +97,17 @@ static void nfit_test_kill(void *_pgmap)
{
struct dev_pagemap *pgmap = _pgmap;
- WARN_ON(!pgmap || !pgmap->ref);
+ WARN_ON(!pgmap);
- if (pgmap->ops && pgmap->ops->kill)
- pgmap->ops->kill(pgmap);
- else
- percpu_ref_kill(pgmap->ref);
+ percpu_ref_kill(&pgmap->ref);
- if (pgmap->ops && pgmap->ops->cleanup) {
- pgmap->ops->cleanup(pgmap);
- } else {
- wait_for_completion(&pgmap->done);
- percpu_ref_exit(pgmap->ref);
- }
+ wait_for_completion(&pgmap->done);
+ percpu_ref_exit(&pgmap->ref);
}
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{
- struct dev_pagemap *pgmap =
- container_of(ref, struct dev_pagemap, internal_ref);
+ struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
complete(&pgmap->done);
}
@@ -132,22 +121,11 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
if (!nfit_res)
return devm_memremap_pages(dev, pgmap);
- if (!pgmap->ref) {
- if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
- return ERR_PTR(-EINVAL);
-
- init_completion(&pgmap->done);
- error = percpu_ref_init(&pgmap->internal_ref,
- dev_pagemap_percpu_release, 0, GFP_KERNEL);
- if (error)
- return ERR_PTR(error);
- pgmap->ref = &pgmap->internal_ref;
- } else {
- if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
- WARN(1, "Missing reference count teardown definition\n");
- return ERR_PTR(-EINVAL);
- }
- }
+ init_completion(&pgmap->done);
+ error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
+ GFP_KERNEL);
+ if (error)
+ return ERR_PTR(error);
error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
if (error)
@@ -156,16 +134,6 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
}
EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
-pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
-{
- struct nfit_test_resource *nfit_res = get_nfit_res(addr);
-
- if (nfit_res)
- flags &= ~PFN_MAP;
- return phys_to_pfn_t(addr, flags);
-}
-EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
-
void *__wrap_memremap(resource_size_t offset, size_t size,
unsigned long flags)
{
@@ -428,4 +396,5 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *g
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
+MODULE_DESCRIPTION("NVDIMM unit test");
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
new file mode 100644
index 000000000000..8e3b6be53839
--- /dev/null
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/list_sort.h>
+#include <linux/libnvdimm.h>
+#include <linux/ndctl.h>
+#include <nd-core.h>
+#include <linux/printk.h>
+#include <linux/seq_buf.h>
+#include <linux/papr_scm.h>
+#include <uapi/linux/papr_pdsm.h>
+
+#include "../watermark.h"
+#include "nfit_test.h"
+#include "ndtest.h"
+
+enum {
+ DIMM_SIZE = SZ_32M,
+ LABEL_SIZE = SZ_128K,
+ NUM_INSTANCES = 2,
+ NUM_DCR = 4,
+ NDTEST_MAX_MAPPING = 6,
+};
+
+#define NDTEST_SCM_DIMM_CMD_MASK \
+ ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
+ (1ul << ND_CMD_GET_CONFIG_DATA) | \
+ (1ul << ND_CMD_SET_CONFIG_DATA) | \
+ (1ul << ND_CMD_CALL))
+
+#define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
+ (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
+ | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
+
+static DEFINE_SPINLOCK(ndtest_lock);
+static struct ndtest_priv *instances[NUM_INSTANCES];
+
+static const struct class ndtest_dimm_class = {
+ .name = "nfit_test_dimm",
+};
+
+static struct gen_pool *ndtest_pool;
+
+static struct ndtest_dimm dimm_group1[] = {
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
+ .uuid_str = "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
+ .physical_id = 0,
+ .num_formats = 2,
+ },
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
+ .uuid_str = "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
+ .physical_id = 1,
+ .num_formats = 2,
+ },
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
+ .uuid_str = "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
+ .physical_id = 2,
+ .num_formats = 2,
+ },
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
+ .uuid_str = "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
+ .physical_id = 3,
+ .num_formats = 2,
+ },
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
+ .uuid_str = "bf9baaee-b618-11ea-b181-507b9ddc0f72",
+ .physical_id = 4,
+ .num_formats = 2,
+ },
+};
+
+static struct ndtest_dimm dimm_group2[] = {
+ {
+ .size = DIMM_SIZE,
+ .handle = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
+ .uuid_str = "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
+ .physical_id = 0,
+ .num_formats = 1,
+ .flags = PAPR_PMEM_UNARMED | PAPR_PMEM_EMPTY |
+ PAPR_PMEM_SAVE_FAILED | PAPR_PMEM_SHUTDOWN_DIRTY |
+ PAPR_PMEM_HEALTH_FATAL,
+ },
+};
+
+static struct ndtest_mapping region0_mapping[] = {
+ {
+ .dimm = 0,
+ .position = 0,
+ .start = 0,
+ .size = SZ_16M,
+ },
+ {
+ .dimm = 1,
+ .position = 1,
+ .start = 0,
+ .size = SZ_16M,
+ }
+};
+
+static struct ndtest_mapping region1_mapping[] = {
+ {
+ .dimm = 0,
+ .position = 0,
+ .start = SZ_16M,
+ .size = SZ_16M,
+ },
+ {
+ .dimm = 1,
+ .position = 1,
+ .start = SZ_16M,
+ .size = SZ_16M,
+ },
+ {
+ .dimm = 2,
+ .position = 2,
+ .start = SZ_16M,
+ .size = SZ_16M,
+ },
+ {
+ .dimm = 3,
+ .position = 3,
+ .start = SZ_16M,
+ .size = SZ_16M,
+ },
+};
+
+static struct ndtest_region bus0_regions[] = {
+ {
+ .type = ND_DEVICE_NAMESPACE_PMEM,
+ .num_mappings = ARRAY_SIZE(region0_mapping),
+ .mapping = region0_mapping,
+ .size = DIMM_SIZE,
+ .range_index = 1,
+ },
+ {
+ .type = ND_DEVICE_NAMESPACE_PMEM,
+ .num_mappings = ARRAY_SIZE(region1_mapping),
+ .mapping = region1_mapping,
+ .size = DIMM_SIZE * 2,
+ .range_index = 2,
+ },
+};
+
+static struct ndtest_mapping region6_mapping[] = {
+ {
+ .dimm = 0,
+ .position = 0,
+ .start = 0,
+ .size = DIMM_SIZE,
+ },
+};
+
+static struct ndtest_region bus1_regions[] = {
+ {
+ .type = ND_DEVICE_NAMESPACE_IO,
+ .num_mappings = ARRAY_SIZE(region6_mapping),
+ .mapping = region6_mapping,
+ .size = DIMM_SIZE,
+ .range_index = 1,
+ },
+};
+
+static struct ndtest_config bus_configs[NUM_INSTANCES] = {
+ /* bus 1 */
+ {
+ .dimm_start = 0,
+ .dimm_count = ARRAY_SIZE(dimm_group1),
+ .dimms = dimm_group1,
+ .regions = bus0_regions,
+ .num_regions = ARRAY_SIZE(bus0_regions),
+ },
+ /* bus 2 */
+ {
+ .dimm_start = ARRAY_SIZE(dimm_group1),
+ .dimm_count = ARRAY_SIZE(dimm_group2),
+ .dimms = dimm_group2,
+ .regions = bus1_regions,
+ .num_regions = ARRAY_SIZE(bus1_regions),
+ },
+};
+
+static inline struct ndtest_priv *to_ndtest_priv(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ return container_of(pdev, struct ndtest_priv, pdev);
+}
+
+static int ndtest_config_get(struct ndtest_dimm *p, unsigned int buf_len,
+ struct nd_cmd_get_config_data_hdr *hdr)
+{
+ unsigned int len;
+
+ if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
+ return -EINVAL;
+
+ hdr->status = 0;
+ len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
+ memcpy(hdr->out_buf, p->label_area + hdr->in_offset, len);
+
+ return buf_len - len;
+}
+
+static int ndtest_config_set(struct ndtest_dimm *p, unsigned int buf_len,
+ struct nd_cmd_set_config_hdr *hdr)
+{
+ unsigned int len;
+
+ if ((hdr->in_offset + hdr->in_length) > LABEL_SIZE)
+ return -EINVAL;
+
+ len = min(hdr->in_length, LABEL_SIZE - hdr->in_offset);
+ memcpy(p->label_area + hdr->in_offset, hdr->in_buf, len);
+
+ return buf_len - len;
+}
+
+static int ndtest_get_config_size(struct ndtest_dimm *dimm, unsigned int buf_len,
+ struct nd_cmd_get_config_size *size)
+{
+ size->status = 0;
+ size->max_xfer = 8;
+ size->config_size = dimm->config_size;
+
+ return 0;
+}
+
+static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct ndtest_dimm *dimm;
+ int _cmd_rc;
+
+ if (!cmd_rc)
+ cmd_rc = &_cmd_rc;
+
+ *cmd_rc = 0;
+
+ if (!nvdimm)
+ return -EINVAL;
+
+ dimm = nvdimm_provider_data(nvdimm);
+ if (!dimm)
+ return -EINVAL;
+
+ switch (cmd) {
+ case ND_CMD_GET_CONFIG_SIZE:
+ *cmd_rc = ndtest_get_config_size(dimm, buf_len, buf);
+ break;
+ case ND_CMD_GET_CONFIG_DATA:
+ *cmd_rc = ndtest_config_get(dimm, buf_len, buf);
+ break;
+ case ND_CMD_SET_CONFIG_DATA:
+ *cmd_rc = ndtest_config_set(dimm, buf_len, buf);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Failures for a DIMM can be injected using fail_cmd and
+ * fail_cmd_code, see the device attributes below
+ */
+ if ((1 << cmd) & dimm->fail_cmd)
+ return dimm->fail_cmd_code ? dimm->fail_cmd_code : -EIO;
+
+ return 0;
+}
+
+static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
+{
+ int i;
+
+ for (i = 0; i < NUM_INSTANCES; i++) {
+ struct nfit_test_resource *n, *nfit_res = NULL;
+ struct ndtest_priv *t = instances[i];
+
+ if (!t)
+ continue;
+ spin_lock(&ndtest_lock);
+ list_for_each_entry(n, &t->resources, list) {
+ if (addr >= n->res.start && (addr < n->res.start
+ + resource_size(&n->res))) {
+ nfit_res = n;
+ break;
+ } else if (addr >= (unsigned long) n->buf
+ && (addr < (unsigned long) n->buf
+ + resource_size(&n->res))) {
+ nfit_res = n;
+ break;
+ }
+ }
+ spin_unlock(&ndtest_lock);
+ if (nfit_res)
+ return nfit_res;
+ }
+
+ pr_warn("Failed to get resource\n");
+
+ return NULL;
+}
+
+static void ndtest_release_resource(void *data)
+{
+ struct nfit_test_resource *res = data;
+
+ spin_lock(&ndtest_lock);
+ list_del(&res->list);
+ spin_unlock(&ndtest_lock);
+
+ if (resource_size(&res->res) >= DIMM_SIZE)
+ gen_pool_free(ndtest_pool, res->res.start,
+ resource_size(&res->res));
+ vfree(res->buf);
+ kfree(res);
+}
+
+static void *ndtest_alloc_resource(struct ndtest_priv *p, size_t size,
+ dma_addr_t *dma)
+{
+ dma_addr_t __dma;
+ void *buf;
+ struct nfit_test_resource *res;
+ struct genpool_data_align data = {
+ .align = SZ_128M,
+ };
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ buf = vmalloc(size);
+ if (size >= DIMM_SIZE)
+ __dma = gen_pool_alloc_algo(ndtest_pool, size,
+ gen_pool_first_fit_align, &data);
+ else
+ __dma = (unsigned long) buf;
+
+ if (!__dma)
+ goto buf_err;
+
+ INIT_LIST_HEAD(&res->list);
+ res->dev = &p->pdev.dev;
+ res->buf = buf;
+ res->res.start = __dma;
+ res->res.end = __dma + size - 1;
+ res->res.name = "NFIT";
+ spin_lock_init(&res->lock);
+ INIT_LIST_HEAD(&res->requests);
+ spin_lock(&ndtest_lock);
+ list_add(&res->list, &p->resources);
+ spin_unlock(&ndtest_lock);
+
+ if (dma)
+ *dma = __dma;
+
+ if (!devm_add_action(&p->pdev.dev, ndtest_release_resource, res))
+ return res->buf;
+
+buf_err:
+ if (__dma && size >= DIMM_SIZE)
+ gen_pool_free(ndtest_pool, __dma, size);
+ if (buf)
+ vfree(buf);
+ kfree(res);
+
+ return NULL;
+}
+
+static ssize_t range_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct ndtest_region *region = nd_region_provider_data(nd_region);
+
+ return sprintf(buf, "%d\n", region->range_index);
+}
+static DEVICE_ATTR_RO(range_index);
+
+static struct attribute *ndtest_region_attributes[] = {
+ &dev_attr_range_index.attr,
+ NULL,
+};
+
+static const struct attribute_group ndtest_region_attribute_group = {
+ .name = "papr",
+ .attrs = ndtest_region_attributes,
+};
+
+static const struct attribute_group *ndtest_region_attribute_groups[] = {
+ &ndtest_region_attribute_group,
+ NULL,
+};
+
+static int ndtest_create_region(struct ndtest_priv *p,
+ struct ndtest_region *region)
+{
+ struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
+ struct nd_region_desc *ndr_desc, _ndr_desc;
+ struct nd_interleave_set *nd_set;
+ struct resource res;
+ int i, ndimm = region->mapping[0].dimm;
+ u64 uuid[2];
+
+ memset(&res, 0, sizeof(res));
+ memset(&mappings, 0, sizeof(mappings));
+ memset(&_ndr_desc, 0, sizeof(_ndr_desc));
+ ndr_desc = &_ndr_desc;
+
+ if (!ndtest_alloc_resource(p, region->size, &res.start))
+ return -ENOMEM;
+
+ res.end = res.start + region->size - 1;
+ ndr_desc->mapping = mappings;
+ ndr_desc->res = &res;
+ ndr_desc->provider_data = region;
+ ndr_desc->attr_groups = ndtest_region_attribute_groups;
+
+ if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
+ pr_err("failed to parse UUID\n");
+ return -ENXIO;
+ }
+
+ nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set)
+ return -ENOMEM;
+
+ nd_set->cookie1 = cpu_to_le64(uuid[0]);
+ nd_set->cookie2 = cpu_to_le64(uuid[1]);
+ nd_set->altcookie = nd_set->cookie1;
+ ndr_desc->nd_set = nd_set;
+
+ for (i = 0; i < region->num_mappings; i++) {
+ ndimm = region->mapping[i].dimm;
+ mappings[i].start = region->mapping[i].start;
+ mappings[i].size = region->mapping[i].size;
+ mappings[i].position = region->mapping[i].position;
+ mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
+ }
+
+ ndr_desc->num_mappings = region->num_mappings;
+ region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
+
+ if (!region->region) {
+ dev_err(&p->pdev.dev, "Error registering region %pR\n",
+ ndr_desc->res);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ndtest_init_regions(struct ndtest_priv *p)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < p->config->num_regions; i++) {
+ ret = ndtest_create_region(p, &p->config->regions[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void put_dimms(void *data)
+{
+ struct ndtest_priv *p = data;
+ int i;
+
+ for (i = 0; i < p->config->dimm_count; i++)
+ if (p->config->dimms[i].dev) {
+ device_unregister(p->config->dimms[i].dev);
+ p->config->dimms[i].dev = NULL;
+ }
+}
+
+static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", dimm->handle);
+}
+static DEVICE_ATTR_RO(handle);
+
+static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", dimm->fail_cmd);
+}
+
+static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+ unsigned long val;
+ ssize_t rc;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ dimm->fail_cmd = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(fail_cmd);
+
+static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dimm->fail_cmd_code);
+}
+
+static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct ndtest_dimm *dimm = dev_get_drvdata(dev);
+ unsigned long val;
+ ssize_t rc;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ dimm->fail_cmd_code = val;
+ return size;
+}
+static DEVICE_ATTR_RW(fail_cmd_code);
+
+static struct attribute *dimm_attributes[] = {
+ &dev_attr_handle.attr,
+ &dev_attr_fail_cmd.attr,
+ &dev_attr_fail_cmd_code.attr,
+ NULL,
+};
+
+static struct attribute_group dimm_attribute_group = {
+ .attrs = dimm_attributes,
+};
+
+static const struct attribute_group *dimm_attribute_groups[] = {
+ &dimm_attribute_group,
+ NULL,
+};
+
+static ssize_t phys_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%#x\n", dimm->physical_id);
+}
+static DEVICE_ATTR_RO(phys_id);
+
+static ssize_t vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x1234567\n");
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%04x-%02x-%04x-%08x", 0xabcd,
+ 0xa, 2016, ~(dimm->handle));
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t nvdimm_handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%#x\n", dimm->handle);
+}
+
+static struct device_attribute dev_attr_nvdimm_show_handle = {
+ .attr = { .name = "handle", .mode = 0444 },
+ .show = nvdimm_handle_show,
+};
+
+static ssize_t subsystem_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%04x\n", 0);
+}
+static DEVICE_ATTR_RO(subsystem_vendor);
+
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", 42);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
+static ssize_t formats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%d\n", dimm->num_formats);
+}
+static DEVICE_ATTR_RO(formats);
+
+static ssize_t format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (dimm->num_formats > 1)
+ return sprintf(buf, "0x201\n");
+
+ return sprintf(buf, "0x101\n");
+}
+static DEVICE_ATTR_RO(format);
+
+static ssize_t format1_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x301\n");
+}
+static DEVICE_ATTR_RO(format1);
+
+static umode_t ndtest_nvdimm_attr_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+
+ if (a == &dev_attr_format1.attr && dimm->num_formats <= 1)
+ return 0;
+
+ return a->mode;
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct ndtest_dimm *dimm = nvdimm_provider_data(nvdimm);
+ struct seq_buf s;
+ u64 flags;
+
+ flags = dimm->flags;
+
+ seq_buf_init(&s, buf, PAGE_SIZE);
+ if (flags & PAPR_PMEM_UNARMED_MASK)
+ seq_buf_printf(&s, "not_armed ");
+
+ if (flags & PAPR_PMEM_BAD_SHUTDOWN_MASK)
+ seq_buf_printf(&s, "flush_fail ");
+
+ if (flags & PAPR_PMEM_BAD_RESTORE_MASK)
+ seq_buf_printf(&s, "restore_fail ");
+
+ if (flags & PAPR_PMEM_SAVE_MASK)
+ seq_buf_printf(&s, "save_fail ");
+
+ if (flags & PAPR_PMEM_SMART_EVENT_MASK)
+ seq_buf_printf(&s, "smart_notify ");
+
+
+ if (seq_buf_used(&s))
+ seq_buf_printf(&s, "\n");
+
+ return seq_buf_used(&s);
+}
+static DEVICE_ATTR_RO(flags);
+
+static struct attribute *ndtest_nvdimm_attributes[] = {
+ &dev_attr_nvdimm_show_handle.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_id.attr,
+ &dev_attr_phys_id.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_dirty_shutdown.attr,
+ &dev_attr_formats.attr,
+ &dev_attr_format.attr,
+ &dev_attr_format1.attr,
+ &dev_attr_flags.attr,
+ NULL,
+};
+
+static const struct attribute_group ndtest_nvdimm_attribute_group = {
+ .name = "papr",
+ .attrs = ndtest_nvdimm_attributes,
+ .is_visible = ndtest_nvdimm_attr_visible,
+};
+
+static const struct attribute_group *ndtest_nvdimm_attribute_groups[] = {
+ &ndtest_nvdimm_attribute_group,
+ NULL,
+};
+
+static int ndtest_dimm_register(struct ndtest_priv *priv,
+ struct ndtest_dimm *dimm, int id)
+{
+ struct device *dev = &priv->pdev.dev;
+ unsigned long dimm_flags = dimm->flags;
+
+ if (dimm->num_formats > 1)
+ set_bit(NDD_LABELING, &dimm_flags);
+
+ if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
+ set_bit(NDD_UNARMED, &dimm_flags);
+
+ dimm->nvdimm = nvdimm_create(priv->bus, dimm,
+ ndtest_nvdimm_attribute_groups, dimm_flags,
+ NDTEST_SCM_DIMM_CMD_MASK, 0, NULL);
+ if (!dimm->nvdimm) {
+ dev_err(dev, "Error creating DIMM object for %pOF\n", priv->dn);
+ return -ENXIO;
+ }
+
+ dimm->dev = device_create_with_groups(&ndtest_dimm_class,
+ &priv->pdev.dev,
+ 0, dimm, dimm_attribute_groups,
+ "test_dimm%d", id);
+ if (!dimm->dev) {
+ pr_err("Could not create dimm device attributes\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ndtest_nvdimm_init(struct ndtest_priv *p)
+{
+ struct ndtest_dimm *d;
+ void *res;
+ int i, id;
+
+ for (i = 0; i < p->config->dimm_count; i++) {
+ d = &p->config->dimms[i];
+ d->id = id = p->config->dimm_start + i;
+ res = ndtest_alloc_resource(p, LABEL_SIZE, NULL);
+ if (!res)
+ return -ENOMEM;
+
+ d->label_area = res;
+ sprintf(d->label_area, "label%d", id);
+ d->config_size = LABEL_SIZE;
+
+ if (!ndtest_alloc_resource(p, d->size,
+ &p->dimm_dma[id]))
+ return -ENOMEM;
+
+ if (!ndtest_alloc_resource(p, LABEL_SIZE,
+ &p->label_dma[id]))
+ return -ENOMEM;
+
+ if (!ndtest_alloc_resource(p, LABEL_SIZE,
+ &p->dcr_dma[id]))
+ return -ENOMEM;
+
+ d->address = p->dimm_dma[id];
+
+ ndtest_dimm_register(p, d, id);
+ }
+
+ return 0;
+}
+
+static ssize_t compatible_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "nvdimm_test");
+}
+static DEVICE_ATTR_RO(compatible);
+
+static struct attribute *of_node_attributes[] = {
+ &dev_attr_compatible.attr,
+ NULL
+};
+
+static const struct attribute_group of_node_attribute_group = {
+ .name = "of_node",
+ .attrs = of_node_attributes,
+};
+
+static const struct attribute_group *ndtest_attribute_groups[] = {
+ &of_node_attribute_group,
+ NULL,
+};
+
+static int ndtest_bus_register(struct ndtest_priv *p)
+{
+ p->config = &bus_configs[p->pdev.id];
+
+ p->bus_desc.ndctl = ndtest_ctl;
+ p->bus_desc.module = THIS_MODULE;
+ p->bus_desc.provider_name = NULL;
+ p->bus_desc.attr_groups = ndtest_attribute_groups;
+
+ p->bus = nvdimm_bus_register(&p->pdev.dev, &p->bus_desc);
+ if (!p->bus) {
+ dev_err(&p->pdev.dev, "Error creating nvdimm bus %pOF\n", p->dn);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ndtest_remove(struct platform_device *pdev)
+{
+ struct ndtest_priv *p = to_ndtest_priv(&pdev->dev);
+
+ nvdimm_bus_unregister(p->bus);
+}
+
+static int ndtest_probe(struct platform_device *pdev)
+{
+ struct ndtest_priv *p;
+ int rc;
+
+ p = to_ndtest_priv(&pdev->dev);
+ if (ndtest_bus_register(p))
+ return -ENOMEM;
+
+ p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->dcr_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->label_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->dimm_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ rc = ndtest_nvdimm_init(p);
+ if (rc)
+ goto err;
+
+ rc = ndtest_init_regions(p);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
+ if (rc)
+ goto err;
+
+ platform_set_drvdata(pdev, p);
+
+ return 0;
+
+err:
+ pr_err("%s:%d Failed nvdimm init\n", __func__, __LINE__);
+ return rc;
+}
+
+static const struct platform_device_id ndtest_id[] = {
+ { KBUILD_MODNAME },
+ { },
+};
+
+static struct platform_driver ndtest_driver = {
+ .probe = ndtest_probe,
+ .remove = ndtest_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .id_table = ndtest_id,
+};
+
+static void ndtest_release(struct device *dev)
+{
+ struct ndtest_priv *p = to_ndtest_priv(dev);
+
+ kfree(p);
+}
+
+static void cleanup_devices(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_INSTANCES; i++)
+ if (instances[i])
+ platform_device_unregister(&instances[i]->pdev);
+
+ nfit_test_teardown();
+
+ if (ndtest_pool)
+ gen_pool_destroy(ndtest_pool);
+
+
+ class_unregister(&ndtest_dimm_class);
+}
+
+static __init int ndtest_init(void)
+{
+ int rc, i;
+
+ pmem_test();
+ libnvdimm_test();
+ device_dax_test();
+ dax_pmem_test();
+
+ nfit_test_setup(ndtest_resource_lookup, NULL);
+
+ rc = class_register(&ndtest_dimm_class);
+ if (rc)
+ goto err_register;
+
+ ndtest_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
+ if (!ndtest_pool) {
+ rc = -ENOMEM;
+ goto err_register;
+ }
+
+ if (gen_pool_add(ndtest_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
+ rc = -ENOMEM;
+ goto err_register;
+ }
+
+ /* Each instance can be taken as a bus, which can have multiple dimms */
+ for (i = 0; i < NUM_INSTANCES; i++) {
+ struct ndtest_priv *priv;
+ struct platform_device *pdev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_register;
+ }
+
+ INIT_LIST_HEAD(&priv->resources);
+ pdev = &priv->pdev;
+ pdev->name = KBUILD_MODNAME;
+ pdev->id = i;
+ pdev->dev.release = ndtest_release;
+ rc = platform_device_register(pdev);
+ if (rc) {
+ put_device(&pdev->dev);
+ goto err_register;
+ }
+ get_device(&pdev->dev);
+
+ instances[i] = priv;
+ }
+
+ rc = platform_driver_register(&ndtest_driver);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ pr_err("Error registering platform device\n");
+ cleanup_devices();
+
+ return rc;
+}
+
+static __exit void ndtest_exit(void)
+{
+ cleanup_devices();
+ platform_driver_unregister(&ndtest_driver);
+}
+
+module_init(ndtest_init);
+module_exit(ndtest_exit);
+MODULE_DESCRIPTION("Test non-NFIT devices");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
diff --git a/tools/testing/nvdimm/test/ndtest.h b/tools/testing/nvdimm/test/ndtest.h
new file mode 100644
index 000000000000..8f27ad6f7319
--- /dev/null
+++ b/tools/testing/nvdimm/test/ndtest.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef NDTEST_H
+#define NDTEST_H
+
+#include <linux/platform_device.h>
+#include <linux/libnvdimm.h>
+
+struct ndtest_config;
+
+struct ndtest_priv {
+ struct platform_device pdev;
+ struct device_node *dn;
+ struct list_head resources;
+ struct nvdimm_bus_descriptor bus_desc;
+ struct nvdimm_bus *bus;
+ struct ndtest_config *config;
+
+ dma_addr_t *dcr_dma;
+ dma_addr_t *label_dma;
+ dma_addr_t *dimm_dma;
+};
+
+struct ndtest_blk_mmio {
+ void __iomem *base;
+ u64 size;
+ u64 base_offset;
+ u32 line_size;
+ u32 num_lines;
+ u32 table_size;
+};
+
+struct ndtest_dimm {
+ struct device *dev;
+ struct nvdimm *nvdimm;
+ struct ndtest_blk_mmio *mmio;
+ struct nd_region *blk_region;
+
+ dma_addr_t address;
+ unsigned long long flags;
+ unsigned long config_size;
+ void *label_area;
+ char *uuid_str;
+
+ unsigned int size;
+ unsigned int handle;
+ unsigned int fail_cmd;
+ unsigned int physical_id;
+ unsigned int num_formats;
+ int id;
+ int fail_cmd_code;
+ u8 no_alias;
+};
+
+struct ndtest_mapping {
+ u64 start;
+ u64 size;
+ u8 position;
+ u8 dimm;
+};
+
+struct ndtest_region {
+ struct nd_region *region;
+ struct ndtest_mapping *mapping;
+ u64 size;
+ u8 type;
+ u8 num_mappings;
+ u8 range_index;
+};
+
+struct ndtest_config {
+ struct ndtest_dimm *dimms;
+ struct ndtest_region *regions;
+ unsigned int dimm_count;
+ unsigned int dimm_start;
+ u8 num_regions;
+};
+
+#endif /* NDTEST_H */
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 9b185bf82da8..f87e9f251d13 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -23,8 +23,6 @@
#include "nfit_test.h"
#include "../watermark.h"
-#include <asm/mce.h>
-
/*
* Generate an NFIT table to describe the following topology:
*
@@ -434,7 +432,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
dev_dbg(dev, "%s: transition out verify\n", __func__);
fw->state = FW_STATE_UPDATED;
fw->missed_activate = false;
- /* fall through */
+ fallthrough;
case FW_STATE_UPDATED:
nd_cmd->status = 0;
/* bogus test version */
@@ -672,6 +670,7 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus,
.addr = spa->spa,
.region = NULL,
};
+ struct nfit_mem *nfit_mem;
u64 dpa;
ret = device_for_each_child(&bus->dev, &ctx,
@@ -689,8 +688,12 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus,
*/
nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
nvdimm = nd_mapping->nvdimm;
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ if (!nfit_mem)
+ return -EINVAL;
- spa->devices[0].nfit_device_handle = handle[nvdimm->id];
+ spa->devices[0].nfit_device_handle =
+ __to_nfit_memdev(nfit_mem)->device_handle;
spa->num_nvdimms = 1;
spa->devices[0].dpa = dpa;
@@ -1714,7 +1717,9 @@ static void put_dimms(void *data)
device_unregister(t->dimm_dev[i]);
}
-static struct class *nfit_test_dimm;
+static const struct class nfit_test_dimm = {
+ .name = "nfit_test_dimm",
+};
static int dimm_name_to_id(struct device *dev)
{
@@ -1832,7 +1837,7 @@ static int nfit_test_dimm_init(struct nfit_test *t)
if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
return -ENOMEM;
for (i = 0; i < t->num_dcr; i++) {
- t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
+ t->dimm_dev[i] = device_create_with_groups(&nfit_test_dimm,
&t->pdev.dev, 0, NULL,
nfit_test_dimm_attribute_groups,
"test_dimm%d", i + t->dcr_idx);
@@ -1842,7 +1847,7 @@ static int nfit_test_dimm_init(struct nfit_test *t)
return 0;
}
-static void security_init(struct nfit_test *t)
+static void nfit_security_init(struct nfit_test *t)
{
int i;
@@ -1871,16 +1876,23 @@ static void smart_init(struct nfit_test *t)
}
}
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+ /* until spa location cookie support is added... */
+ return sizeof(*spa) - 8;
+}
+
static int nfit_test0_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
+ struct acpi_nfit_system_address *spa = NULL;
+ struct acpi_nfit_flush_address *flush;
+ size_t nfit_size = sizeof_spa(spa) * NUM_SPA
+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
+ offsetof(struct acpi_nfit_control_region,
window_size) * NUM_DCR
+ sizeof(struct acpi_nfit_data_region) * NUM_BDW
- + (sizeof(struct acpi_nfit_flush_address)
- + sizeof(u64) * NUM_HINTS) * NUM_DCR
+ + struct_size(flush, hint_address, NUM_HINTS) * NUM_DCR
+ sizeof(struct acpi_nfit_capabilities);
int i;
@@ -1931,13 +1943,14 @@ static int nfit_test0_alloc(struct nfit_test *t)
if (nfit_test_dimm_init(t))
return -ENOMEM;
smart_init(t);
- security_init(t);
+ nfit_security_init(t);
return ars_state_init(&t->pdev.dev, &t->ars_state);
}
static int nfit_test1_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
+ struct acpi_nfit_system_address *spa = NULL;
+ size_t nfit_size = sizeof_spa(spa) * 2
+ sizeof(struct acpi_nfit_memory_map) * 2
+ offsetof(struct acpi_nfit_control_region, window_size) * 2;
int i;
@@ -2000,7 +2013,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 0+1;
spa->address = t->spa_set_dma[0];
@@ -2014,7 +2027,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 1+1;
spa->address = t->spa_set_dma[1];
@@ -2024,7 +2037,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa2 (dcr0) dimm0 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 2+1;
spa->address = t->dcr_dma[0];
@@ -2034,7 +2047,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa3 (dcr1) dimm1 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 3+1;
spa->address = t->dcr_dma[1];
@@ -2044,7 +2057,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa4 (dcr2) dimm2 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 4+1;
spa->address = t->dcr_dma[2];
@@ -2054,7 +2067,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa5 (dcr3) dimm3 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 5+1;
spa->address = t->dcr_dma[3];
@@ -2064,7 +2077,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa6 (bdw for dcr0) dimm0 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 6+1;
spa->address = t->dimm_dma[0];
@@ -2074,7 +2087,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa7 (bdw for dcr1) dimm1 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 7+1;
spa->address = t->dimm_dma[1];
@@ -2084,7 +2097,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa8 (bdw for dcr2) dimm2 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 8+1;
spa->address = t->dimm_dma[2];
@@ -2094,7 +2107,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa9 (bdw for dcr3) dimm3 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 9+1;
spa->address = t->dimm_dma[3];
@@ -2581,7 +2594,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa10 (dcr4) dimm4 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 10+1;
spa->address = t->dcr_dma[4];
@@ -2595,7 +2608,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 11+1;
spa->address = t->spa_set_dma[2];
@@ -2605,7 +2618,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa12 (bdw for dcr4) dimm4 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 12+1;
spa->address = t->dimm_dma[4];
@@ -2739,7 +2752,7 @@ static void nfit_test1_setup(struct nfit_test *t)
/* spa0 (flat range with no bdw aliasing) */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 0+1;
spa->address = t->spa_set_dma[0];
@@ -2749,7 +2762,7 @@ static void nfit_test1_setup(struct nfit_test *t)
/* virtual cd region */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
spa->range_index = 0;
spa->address = t->spa_set_dma[1];
@@ -2834,28 +2847,6 @@ static void nfit_test1_setup(struct nfit_test *t)
set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
}
-static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw)
-{
- struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
- struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
- struct nd_region *nd_region = &ndbr->nd_region;
- unsigned int lane;
-
- lane = nd_region_acquire_lane(nd_region);
- if (rw)
- memcpy(mmio->addr.base + dpa, iobuf, len);
- else {
- memcpy(iobuf, mmio->addr.base + dpa, len);
-
- /* give us some some coverage of the arch_invalidate_pmem() API */
- arch_invalidate_pmem(mmio->addr.base + dpa, len);
- }
- nd_region_release_lane(nd_region, lane);
-
- return 0;
-}
-
static unsigned long nfit_ctl_handle;
union acpi_object *result;
@@ -3211,7 +3202,6 @@ static int nfit_test_probe(struct platform_device *pdev)
nfit_test->setup(nfit_test);
acpi_desc = &nfit_test->acpi_desc;
acpi_nfit_desc_init(acpi_desc, &pdev->dev);
- acpi_desc->blk_do_io = nfit_test_blk_do_io;
nd_desc = &acpi_desc->nd_desc;
nd_desc->provider_name = NULL;
nd_desc->module = THIS_MODULE;
@@ -3257,11 +3247,6 @@ static int nfit_test_probe(struct platform_device *pdev)
return 0;
}
-static int nfit_test_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static void nfit_test_release(struct device *dev)
{
struct nfit_test *nfit_test = to_nfit_test(dev);
@@ -3276,7 +3261,6 @@ static const struct platform_device_id nfit_test_id[] = {
static struct platform_driver nfit_test_driver = {
.probe = nfit_test_probe,
- .remove = nfit_test_remove,
.driver = {
.name = KBUILD_MODNAME,
},
@@ -3292,10 +3276,6 @@ static __init int nfit_test_init(void)
acpi_nfit_test();
device_dax_test();
dax_pmem_test();
- dax_pmem_core_test();
-#ifdef CONFIG_DEV_DAX_PMEM_COMPAT
- dax_pmem_compat_test();
-#endif
nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
@@ -3303,11 +3283,9 @@ static __init int nfit_test_init(void)
if (!nfit_wq)
return -ENOMEM;
- nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
- if (IS_ERR(nfit_test_dimm)) {
- rc = PTR_ERR(nfit_test_dimm);
+ rc = class_register(&nfit_test_dimm);
+ if (rc)
goto err_register;
- }
nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
if (!nfit_pool) {
@@ -3394,7 +3372,6 @@ static __exit void nfit_test_exit(void)
{
int i;
- flush_workqueue(nfit_wq);
destroy_workqueue(nfit_wq);
for (i = 0; i < NUM_NFITS; i++)
platform_device_unregister(&instances[i]->pdev);
@@ -3405,10 +3382,11 @@ static __exit void nfit_test_exit(void)
for (i = 0; i < NUM_NFITS; i++)
put_device(&instances[i]->pdev.dev);
- class_destroy(nfit_test_dimm);
+ class_unregister(&nfit_test_dimm);
}
module_init(nfit_test_init);
module_exit(nfit_test_exit);
+MODULE_DESCRIPTION("Test ACPI NFIT devices");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index b5f7a996c4d0..b9047fb8ea4a 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -207,7 +207,35 @@ typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4);
+void __iomem *__wrap_devm_ioremap(struct device *dev,
+ resource_size_t offset, unsigned long size);
+void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+void *__wrap_memremap(resource_size_t offset, size_t size,
+ unsigned long flags);
+void __wrap_devm_memunmap(struct device *dev, void *addr);
+void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size);
+void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size);
void __wrap_iounmap(volatile void __iomem *addr);
+void __wrap_memunmap(void *addr);
+struct resource *__wrap___request_region(struct resource *parent,
+ resource_size_t start, resource_size_t n, const char *name,
+ int flags);
+int __wrap_insert_resource(struct resource *parent, struct resource *res);
+int __wrap_remove_resource(struct resource *res);
+struct resource *__wrap___devm_request_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n, const char *name);
+void __wrap___release_region(struct resource *parent, resource_size_t start,
+ resource_size_t n);
+void __wrap___devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n);
+acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
+ struct acpi_object_list *p, struct acpi_buffer *buf);
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 func, union acpi_object *argv4);
+
void nfit_test_setup(nfit_test_lookup_fn lookup,
nfit_test_evaluate_dsm_fn evaluate);
void nfit_test_teardown(void);
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index d971516401e6..ce167a761981 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+generated/autoconf.h
+generated/bit-length.h
generated/map-shift.h
idr.c
idr-test
@@ -6,3 +8,5 @@ main
multiorder
radix-tree.c
xarray
+maple
+ma_xa_benchmark
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index aa6abfe0749c..b2a6660bbd92 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,57 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
- -fsanitize=undefined
-LDFLAGS += -fsanitize=address -fsanitize=undefined
-LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder xarray
-CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
-OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
- regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
- iteration_check_2.o benchmark.o
-
-ifndef SHIFT
- SHIFT=3
-endif
-
-ifeq ($(BUILD), 32)
- CFLAGS += -m32
- LDFLAGS += -m32
-endif
-
-targets: generated/map-shift.h $(TARGETS)
+.PHONY: clean
+
+TARGETS = main idr-test multiorder xarray maple
+CORE_OFILES = $(SHARED_OFILES) xarray.o maple.o test.o
+OFILES = main.o $(CORE_OFILES) regression1.o regression2.o \
+ regression3.o regression4.o tag_check.o multiorder.o idr-test.o \
+ iteration_check.o iteration_check_2.o benchmark.o
+
+targets: generated/map-shift.h generated/bit-length.h $(TARGETS)
+
+include ../shared/shared.mk
main: $(OFILES)
+xarray.o: ../../../lib/test_xarray.c
idr-test.o: ../../../lib/test_ida.c
idr-test: idr-test.o $(CORE_OFILES)
-xarray: $(CORE_OFILES)
+xarray: $(CORE_OFILES) xarray.o
+
+maple: $(CORE_OFILES) maple.o
multiorder: multiorder.o $(CORE_OFILES)
clean:
- $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
-
-vpath %.c ../../lib
-
-$(OFILES): Makefile *.h */*.h generated/map-shift.h \
- ../../include/linux/*.h \
- ../../include/asm/*.h \
- ../../../include/linux/xarray.h \
- ../../../include/linux/radix-tree.h \
- ../../../include/linux/idr.h
-
-radix-tree.c: ../../../lib/radix-tree.c
- sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
-
-idr.c: ../../../lib/idr.c
- sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
-
-xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
+ $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/*
-generated/map-shift.h:
- @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
- echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
- generated/map-shift.h; \
- fi
+$(OFILES): $(SHARED_DEPS) *.h
diff --git a/tools/testing/radix-tree/bitmap.c b/tools/testing/radix-tree/bitmap.c
deleted file mode 100644
index 66ec4a24a203..000000000000
--- a/tools/testing/radix-tree/bitmap.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/* lib/bitmap.c pulls in at least two other files. */
-
-#include <linux/bitmap.h>
-
-void bitmap_clear(unsigned long *map, unsigned int start, int len)
-{
- unsigned long *p = map + BIT_WORD(start);
- const unsigned int size = start + len;
- int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
- unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
-
- while (len - bits_to_clear >= 0) {
- *p &= ~mask_to_clear;
- len -= bits_to_clear;
- bits_to_clear = BITS_PER_LONG;
- mask_to_clear = ~0UL;
- p++;
- }
- if (len) {
- mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
- *p &= ~mask_to_clear;
- }
-}
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 3b796dd5e577..2f830ff8396c 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
return NULL;
}
+/*
+ * There are always either 1 or 2 objects in the IDR. If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
void idr_find_test_1(int anchor_id, int throbber_id)
{
pthread_t throbber;
time_t start = time(NULL);
- pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
anchor_id + 1, GFP_KERNEL) != anchor_id);
+ pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+ rcu_read_lock();
do {
int id = 0;
void *entry = idr_get_next(&find_idr, &id);
- BUG_ON(entry != xa_mk_value(id));
+ rcu_read_unlock();
+ if ((id != anchor_id && id != throbber_id) ||
+ entry != xa_mk_value(id)) {
+ printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+ throbber_id, entry, id);
+ abort();
+ }
+ rcu_read_lock();
} while (time(NULL) < start + 11);
+ rcu_read_unlock();
pthread_join(throbber, NULL);
@@ -411,6 +424,7 @@ void idr_checks(void)
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
+#define MODULE_DESCRIPTION(X)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
void ida_dump(struct ida *);
@@ -485,19 +499,17 @@ void ida_check_random(void)
goto repeat;
}
-void ida_simple_get_remove_test(void)
+void ida_alloc_free_test(void)
{
DEFINE_IDA(ida);
unsigned long i;
- for (i = 0; i < 10000; i++) {
- assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
- }
- assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
+ for (i = 0; i < 10000; i++)
+ assert(ida_alloc_max(&ida, 20000, GFP_KERNEL) == i);
+ assert(ida_alloc_range(&ida, 5, 30, GFP_KERNEL) < 0);
- for (i = 0; i < 10000; i++) {
- ida_simple_remove(&ida, i);
- }
+ for (i = 0; i < 10000; i++)
+ ida_free(&ida, i);
assert(ida_is_empty(&ida));
ida_destroy(&ida);
@@ -510,7 +522,7 @@ void user_ida_checks(void)
ida_check_nomem();
ida_check_conv_user();
ida_check_random();
- ida_simple_get_remove_test();
+ ida_alloc_free_test();
radix_tree_cpu_dead(1);
}
@@ -577,6 +589,7 @@ void ida_tests(void)
int __weak main(void)
{
+ rcu_register_thread();
radix_tree_init();
idr_checks();
ida_tests();
@@ -584,5 +597,6 @@ int __weak main(void)
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
deleted file mode 100644
index 2d9c59df60de..000000000000
--- a/tools/testing/radix-tree/linux.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdlib.h>
-#include <string.h>
-#include <malloc.h>
-#include <pthread.h>
-#include <unistd.h>
-#include <assert.h>
-
-#include <linux/gfp.h>
-#include <linux/poison.h>
-#include <linux/slab.h>
-#include <linux/radix-tree.h>
-#include <urcu/uatomic.h>
-
-int nr_allocated;
-int preempt_count;
-int kmalloc_verbose;
-int test_verbose;
-
-struct kmem_cache {
- pthread_mutex_t lock;
- unsigned int size;
- unsigned int align;
- int nr_objs;
- void *objs;
- void (*ctor)(void *);
-};
-
-void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
-{
- void *p;
-
- if (!(gfp & __GFP_DIRECT_RECLAIM))
- return NULL;
-
- pthread_mutex_lock(&cachep->lock);
- if (cachep->nr_objs) {
- struct radix_tree_node *node = cachep->objs;
- cachep->nr_objs--;
- cachep->objs = node->parent;
- pthread_mutex_unlock(&cachep->lock);
- node->parent = NULL;
- p = node;
- } else {
- pthread_mutex_unlock(&cachep->lock);
- if (cachep->align)
- posix_memalign(&p, cachep->align, cachep->size);
- else
- p = malloc(cachep->size);
- if (cachep->ctor)
- cachep->ctor(p);
- else if (gfp & __GFP_ZERO)
- memset(p, 0, cachep->size);
- }
-
- uatomic_inc(&nr_allocated);
- if (kmalloc_verbose)
- printf("Allocating %p from slab\n", p);
- return p;
-}
-
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
-{
- assert(objp);
- uatomic_dec(&nr_allocated);
- if (kmalloc_verbose)
- printf("Freeing %p to slab\n", objp);
- pthread_mutex_lock(&cachep->lock);
- if (cachep->nr_objs > 10 || cachep->align) {
- memset(objp, POISON_FREE, cachep->size);
- free(objp);
- } else {
- struct radix_tree_node *node = objp;
- cachep->nr_objs++;
- node->parent = cachep->objs;
- cachep->objs = node;
- }
- pthread_mutex_unlock(&cachep->lock);
-}
-
-void *kmalloc(size_t size, gfp_t gfp)
-{
- void *ret;
-
- if (!(gfp & __GFP_DIRECT_RECLAIM))
- return NULL;
-
- ret = malloc(size);
- uatomic_inc(&nr_allocated);
- if (kmalloc_verbose)
- printf("Allocating %p from malloc\n", ret);
- if (gfp & __GFP_ZERO)
- memset(ret, 0, size);
- return ret;
-}
-
-void kfree(void *p)
-{
- if (!p)
- return;
- uatomic_dec(&nr_allocated);
- if (kmalloc_verbose)
- printf("Freeing %p to malloc\n", p);
- free(p);
-}
-
-struct kmem_cache *
-kmem_cache_create(const char *name, unsigned int size, unsigned int align,
- unsigned int flags, void (*ctor)(void *))
-{
- struct kmem_cache *ret = malloc(sizeof(*ret));
-
- pthread_mutex_init(&ret->lock, NULL);
- ret->size = size;
- ret->align = align;
- ret->nr_objs = 0;
- ret->objs = NULL;
- ret->ctor = ctor;
- return ret;
-}
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
deleted file mode 100644
index 32159c08a52e..000000000000
--- a/tools/testing/radix-tree/linux/gfp.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GFP_H
-#define _GFP_H
-
-#include <linux/types.h>
-
-#define __GFP_BITS_SHIFT 26
-#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-#define __GFP_HIGH 0x20u
-#define __GFP_IO 0x40u
-#define __GFP_FS 0x80u
-#define __GFP_NOWARN 0x200u
-#define __GFP_ZERO 0x8000u
-#define __GFP_ATOMIC 0x80000u
-#define __GFP_ACCOUNT 0x100000u
-#define __GFP_DIRECT_RECLAIM 0x400000u
-#define __GFP_KSWAPD_RECLAIM 0x2000000u
-
-#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
-
-#define GFP_ZONEMASK 0x0fu
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-
-
-static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
-{
- return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
-}
-
-#endif
diff --git a/tools/testing/radix-tree/linux/idr.h b/tools/testing/radix-tree/linux/idr.h
deleted file mode 100644
index 4e342f2e37cf..000000000000
--- a/tools/testing/radix-tree/linux/idr.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../../include/linux/idr.h"
diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h
deleted file mode 100644
index 1bb0afc21309..000000000000
--- a/tools/testing/radix-tree/linux/init.h
+++ /dev/null
@@ -1 +0,0 @@
-#define __init
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h
deleted file mode 100644
index 2958830ce4d7..000000000000
--- a/tools/testing/radix-tree/linux/slab.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef SLAB_H
-#define SLAB_H
-
-#include <linux/types.h>
-#include <linux/gfp.h>
-
-#define SLAB_HWCACHE_ALIGN 1
-#define SLAB_PANIC 2
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
-
-void *kmalloc(size_t size, gfp_t);
-void kfree(void *);
-
-static inline void *kzalloc(size_t size, gfp_t gfp)
-{
- return kmalloc(size, gfp | __GFP_ZERO);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
-void kmem_cache_free(struct kmem_cache *cachep, void *objp);
-
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, unsigned int flags,
- void (*ctor)(void *));
-
-#endif /* SLAB_H */
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
new file mode 100644
index 000000000000..5c1b18e3ed21
--- /dev/null
+++ b/tools/testing/radix-tree/maple.c
@@ -0,0 +1,36227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * maple_tree.c: Userspace testing for maple tree test-suite
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ *
+ * Any tests that require internal knowledge of the tree or threads and other
+ * difficult to handle in kernel tests.
+ */
+
+#define module_init(x)
+#define module_exit(x)
+#define MODULE_AUTHOR(x)
+#define MODULE_DESCRIPTION(X)
+#define MODULE_LICENSE(x)
+#define dump_stack() assert(0)
+
+#include "test.h"
+
+#include "../shared/maple-shim.c"
+#include "../../../lib/test_maple_tree.c"
+
+#define RCU_RANGE_COUNT 1000
+#define RCU_MT_BUG_ON(test, y) {if (y) { test->stop = true; } MT_BUG_ON(test->mt, y); }
+
+struct rcu_test_struct2 {
+ struct maple_tree *mt;
+
+ bool start;
+ bool stop;
+ unsigned int thread_count;
+
+ unsigned int seen_toggle;
+ unsigned int seen_added;
+ unsigned int seen_modified;
+ unsigned int seen_deleted;
+ int pause;
+
+ unsigned long index[RCU_RANGE_COUNT];
+ unsigned long last[RCU_RANGE_COUNT];
+};
+
+struct rcu_test_struct3 {
+ struct maple_tree *mt;
+ unsigned long index;
+ unsigned long last;
+ bool stop;
+};
+
+struct rcu_reader_struct {
+ unsigned int id;
+ int mod;
+ int del;
+ int flip;
+ int add;
+ int next;
+ struct rcu_test_struct2 *test;
+};
+
+/*
+ * Check erasing including RCU.
+ */
+static noinline void __init check_erase(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr);
+}
+
+#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2])
+#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
+#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2])
+
+static noinline void __init check_erase_testset(struct maple_tree *mt)
+{
+ static const unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 6003, 6002, 6008, 6012, 6015,
+ 7003, 7002, 7008, 7012, 7015,
+ 8003, 8002, 8008, 8012, 8015,
+ 9003, 9002, 9008, 9012, 9015,
+ 10003, 10002, 10008, 10012, 10015,
+ 11003, 11002, 11008, 11012, 11015,
+ 12003, 12002, 12008, 12012, 12015,
+ 13003, 13002, 13008, 13012, 13015,
+ 14003, 14002, 14008, 14012, 14015,
+ 15003, 15002, 15008, 15012, 15015,
+ };
+
+
+ void *ptr = &check_erase_testset;
+ void *entry[2] = { ptr, mt };
+ void *root_node;
+
+
+ rcu_register_thread();
+ mt_set_in_rcu(mt);
+ for (int i = 0; i < 4; i++)
+ erase_check_insert(mt, i);
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 1);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
+
+
+ erase_check_erase(mt, 2);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ check_load(mt, set[2], NULL);
+
+ erase_check_insert(mt, 1);
+ erase_check_insert(mt, 2);
+
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ /* Check erase and load without an allocation. */
+ erase_check_load(mt, 3);
+ erase_check_erase(mt, 1);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
+
+ /*
+ * Set the newly erased node. This will produce a different allocated
+ * node to avoid busy slots.
+ */
+ root_node = mt->ma_root;
+ erase_check_insert(mt, 1);
+
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ erase_check_load(mt, 2);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 3);
+
+ erase_check_erase(mt, 2); /* erase 5017 to check append */
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+
+ erase_check_load(mt, 3);
+
+ root_node = mt->ma_root;
+ erase_check_insert(mt, 2);
+
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ erase_check_load(mt, 2);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 3);
+
+ mt_set_non_kernel(1);
+ erase_check_erase(mt, 2); /* erase 5017 to check append */
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[2], NULL);
+ erase_check_erase(mt, 0); /* erase 5015 to check append */
+ check_load(mt, set[0], NULL);
+ check_load(mt, 5016, NULL);
+ erase_check_insert(mt, 4); /* 1000 < Should not split. */
+ check_load(mt, set[0], NULL);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 4);
+ check_load(mt, 999, NULL);
+ check_load(mt, 1001, NULL);
+ erase_check_load(mt, 4);
+ if (mt_in_rcu(mt))
+ MT_BUG_ON(mt, root_node == mt->ma_root);
+ else
+ MT_BUG_ON(mt, root_node != mt->ma_root);
+
+ /* Should not have split. */
+ MT_BUG_ON(mt, !mte_is_leaf(mt->ma_root));
+
+
+ /* Coalesce testing */
+ erase_check_insert(mt, 0);
+ erase_check_insert(mt, 2);
+
+ for (int i = 5; i < 25; i++) {
+ erase_check_insert(mt, i);
+ for (int j = i; j >= 0; j--)
+ erase_check_load(mt, j);
+ }
+
+ erase_check_erase(mt, 14); /*6015 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ erase_check_erase(mt, 16); /*7002 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+
+ mt_set_non_kernel(1);
+ erase_check_erase(mt, 13); /*6012 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14 || i == 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 15); /*7003 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 16 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 17); /*7008 *should* cause coalesce. */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 17 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 18); /*7012 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 18 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 19); /*7015 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 19 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 20); /*8003 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 20 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 21); /*8002 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 21 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 22); /*8008 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 22 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ for (int i = 23; i < 25; i++)
+ erase_check_erase(mt, i);
+
+ for (int i = 0; i < 25; i++) {
+ if (i <= 25 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ /* Shrinking tree test. */
+
+ for (int i = 13; i < ARRAY_SIZE(set); i++)
+ erase_check_insert(mt, i);
+
+ mt_set_non_kernel(99);
+ for (int i = 18; i < ARRAY_SIZE(set); i++) {
+ erase_check_erase(mt, i);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 || j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ mt_set_non_kernel(35);
+ for (int i = 0; i < 18; i++) {
+ erase_check_erase(mt, i);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 && j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ erase_check_insert(mt, 8);
+ erase_check_insert(mt, 9);
+ erase_check_erase(mt, 8);
+ rcu_unregister_thread();
+}
+
+/* End of erase testing */
+
+/* VM Generated Crashes - uses its own tree walk for verification */
+#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, \
+ a[(i)], a[(i + 1)], ptr)
+#define STORE 1
+#define SNULL 2
+#define ERASE 3
+#define ec_type_str(x) \
+ (((x) == STORE) ? \
+ "STORE" : \
+ (((x) == SNULL) ? \
+ "SNULL" : "ERASE") \
+ )
+#define check_erase2_debug 0
+
+/* Calculate the overwritten entries. */
+int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
+ void *s_entry, unsigned long s_min,
+ void *e_entry, unsigned long e_max,
+ const unsigned long *set, int i, bool null_entry)
+{
+ int count = 0, span = 0;
+ unsigned long retry = 0;
+ void *entry;
+ struct ma_state tmp;
+
+
+ /* count slots */
+ memcpy(&tmp, mas_start, sizeof(tmp));
+ entry = mas_next(&tmp, mas_end->last);
+ while (entry) {
+ BUG_ON(retry > 50); /* stop infinite retry on testing. */
+ if (xa_is_zero(s_entry)) {
+ retry++;
+ continue;
+ }
+ count++;
+ span++;
+ entry = mas_next(&tmp, mas_end->last);
+ }
+
+ if (null_entry) {
+ /* Check splitting end. */
+ if (e_entry && (e_max > mas_end->last))
+ count--;
+
+ /* check overwrite of entire start */
+ if (s_entry && (s_min == mas_start->index))
+ count++;
+ } else { /* !null_entry (store) */
+ bool esplit = e_max > mas_end->last;
+ bool ssplit = s_min != mas_start->index;
+
+ if (s_entry && e_entry) {
+ if (esplit && ssplit)
+ count--;
+ else if (ssplit)
+ count--;
+ else if (esplit) {
+ if (span)
+ count--;
+ }
+ } else if (s_entry && !e_entry) {
+ if (ssplit)
+ count--;
+ } else if (!s_entry && e_entry) {
+ if (esplit)
+ count--;
+ count--;
+ } else {
+ count--;
+ }
+ }
+ return count;
+}
+
+/*
+ * mas_node_walk() - Walk a maple node to offset of the index.
+ * @mas: The maple state
+ * @type: The maple node type
+ * @*range_min: Pointer to store the minimum range of the offset
+ * @*range_max: Pointer to store the maximum range of the offset
+ *
+ * The offset will be stored in the maple state.
+ *
+ */
+static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node,
+ enum maple_type type, unsigned long *range_min,
+ unsigned long *range_max)
+
+{
+ unsigned long *pivots;
+ unsigned char count;
+ unsigned long prev, max;
+ unsigned char offset;
+ unsigned long index;
+
+ if (unlikely(ma_is_dense(type))) {
+ (*range_max) = (*range_min) = mas->index;
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ mas->offset = mas->index = mas->min;
+ return;
+ }
+
+ pivots = ma_pivots(node, type);
+ max = pivots[0];
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ offset = 0;
+ prev = mas->min;
+ index = mas->index;
+ if (unlikely(index <= max))
+ goto offset_zero;
+
+ count = mt_pivots[type];
+ while (++offset < count) {
+ prev = max;
+ max = pivots[offset];
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ if (index <= max)
+ goto offset_found;
+ else if (unlikely(!max))
+ goto mas_max;
+ }
+
+ prev = max;
+mas_max:
+ max = mas->max;
+offset_found:
+ prev++;
+offset_zero:
+ mas->offset = offset;
+ if (ma_is_leaf(type)) {
+ *range_max = max;
+ *range_min = prev;
+ } else {
+ mas->max = max;
+ mas->min = prev;
+ }
+}
+
+/*
+ * mas_descend_walk(): Locates a value and sets the mas->node and slot
+ * accordingly. range_min and range_max are set to the range which the entry is
+ * valid.
+ * @mas: The maple state
+ * @*range_min: A pointer to store the minimum of the range
+ * @*range_max: A pointer to store the maximum of the range
+ *
+ * Check mas->node is still valid on return of any value.
+ *
+ * Return: true if pointing to a valid node and offset. False otherwise.
+ */
+static inline bool mas_descend_walk(struct ma_state *mas,
+ unsigned long *range_min, unsigned long *range_max)
+{
+ struct maple_enode *next;
+ struct maple_node *node;
+ enum maple_type type;
+
+ next = mas->node;
+ while (true) {
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ mas_node_walk(mas, node, type, range_min, range_max);
+ next = mas_slot(mas, ma_slots(node, type), mas->offset);
+ if (unlikely(ma_dead_node(node)))
+ return false;
+
+ if (unlikely(ma_is_leaf(type)))
+ return true;
+
+ /* Descend. */
+ mas->node = next;
+ }
+ return false;
+}
+
+/*
+ * mas_tree_walk() - Walk to @mas->index and set the range values.
+ * @mas: The maple state.
+ * @*range_min: The minimum range to be set.
+ * @*range_max: The maximum range to be set.
+ *
+ * Ranges are only valid if there is a valid entry at @mas->index.
+ *
+ * Return: True if a value exists, false otherwise.
+ */
+static inline bool mas_tree_walk(struct ma_state *mas, unsigned long *range_min,
+ unsigned long *range_max)
+{
+ bool ret;
+
+retry:
+ ret = false;
+ mas_start(mas);
+ if (mas_is_none(mas))
+ goto not_found;
+
+ if (mas_is_ptr(mas)) {
+ *range_min = *range_max = 0;
+ if (!mas->index)
+ return true;
+
+ goto not_found;
+ }
+
+ ret = mas_descend_walk(mas, range_min, range_max);
+ if (unlikely(mte_dead_node(mas->node))) {
+ mas->status = ma_start;
+ goto retry;
+ }
+
+ mas->end = mas_data_end(mas);
+ return ret;
+
+not_found:
+ mas->offset = MAPLE_NODE_SLOTS;
+ return false;
+}
+
+static inline void *mas_range_load(struct ma_state *mas,
+ unsigned long *range_min, unsigned long *range_max)
+
+{
+ void *entry = NULL;
+ unsigned long index = mas->index;
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->status = ma_start;
+retry:
+ if (mas_tree_walk(mas, range_min, range_max))
+ if (unlikely(mas->status == ma_root))
+ return mas_root(mas);
+
+ if (likely(mas->offset != MAPLE_NODE_SLOTS))
+ entry = mas_get_slot(mas, mas->offset);
+
+ if (mas_is_active(mas) && mte_dead_node(mas->node)) {
+ mas_set(mas, index);
+ goto retry;
+ }
+
+ return entry;
+}
+
+#if defined(CONFIG_64BIT)
+static noinline void __init check_erase2_testset(struct maple_tree *mt,
+ const unsigned long *set, unsigned long size)
+{
+ int entry_count = 0;
+ int check = 0;
+ void *foo;
+ unsigned long addr = 0;
+ void *s_entry = NULL, *e_entry = NULL;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ for (int i = 0; i < size; i += 3) {
+ unsigned long s_min, s_max;
+ unsigned long e_min, e_max;
+ void *value = NULL;
+
+ MA_STATE(mas_start, mt, set[i+1], set[i+1]);
+ MA_STATE(mas_end, mt, set[i+2], set[i+2]);
+ mt_set_non_kernel(127);
+#if check_erase2_debug
+ pr_err("%s: %d %s %lu - %lu\n", __func__, i,
+ ec_type_str(set[i]),
+ set[i+1], set[i+2]);
+#endif
+ s_entry = mas_range_load(&mas_start, &s_min, &s_max);
+ e_entry = mas_range_load(&mas_end, &e_min, &e_max);
+
+ switch (set[i]) {
+ case SNULL:
+ if ((s_min == set[i+1]) && (s_max == set[i+2])) {
+ if (s_entry)
+ entry_count--;
+ } else if ((s_min != set[i+1]) && (s_max != set[i+2])) {
+ entry_count++;
+ } else if ((mas_start.node != mas_end.node) ||
+ (mas_start.offset != mas_end.offset)) {
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
+ s_entry, s_min,
+ e_entry, e_max, set, i,
+ true);
+ }
+
+
+ erase_check_store_range(mt, set, i + 1, value);
+ break;
+ case STORE:
+ value = xa_mk_value(set[i + 1]);
+ if (mas_start.offset > mt_slot_count(mas_start.node)) {
+ entry_count++; /* appending an entry. */
+ } else if ((s_min == e_min) && (s_max == e_max)) {
+ if (!entry_count)
+ entry_count++;
+
+ else if (s_entry) {
+ if (e_max > mas_end.last)
+ entry_count++;
+
+ if (s_min < mas_start.index)
+ entry_count++;
+
+ } else {
+ entry_count++;
+ }
+ } else {
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
+ s_entry, s_min,
+ e_entry, e_max, set, i,
+ false);
+ }
+
+ erase_check_store_range(mt, set, i + 1, value);
+ break;
+ case ERASE:
+ if (!s_entry)
+ break;
+ check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
+ entry_count--;
+ break;
+ }
+ mt_validate(mt);
+ if (entry_count)
+ MT_BUG_ON(mt, !mt_height(mt));
+#if check_erase2_debug > 1
+ mt_dump(mt, mt_dump_hex);
+#endif
+#if check_erase2_debug
+ pr_err("Done\n");
+#endif
+
+ check = 0;
+ addr = 0;
+ mt_for_each(mt, foo, addr, ULONG_MAX) {
+ check++;
+#if check_erase2_debug > 2
+ pr_err("mt: %lu -> %p (%d)\n", addr+1, foo, check);
+#endif
+ if (check > entry_count)
+ break;
+ }
+
+#if check_erase2_debug > 2
+ pr_err("mt_for_each %d and count %d\n", check, entry_count);
+#endif
+
+ MT_BUG_ON(mt, check != entry_count);
+
+ check = 0;
+ addr = 0;
+ mas_reset(&mas);
+ mas.index = 0;
+ rcu_read_lock();
+ mas_for_each(&mas, foo, ULONG_MAX) {
+ if (xa_is_zero(foo)) {
+ if (addr == mas.index) {
+ mt_dump(mas.tree, mt_dump_hex);
+ pr_err("retry failed %lu - %lu\n",
+ mas.index, mas.last);
+ MT_BUG_ON(mt, 1);
+ }
+ addr = mas.index;
+ continue;
+ }
+#if check_erase2_debug > 2
+ pr_err("mas: %lu -> %p\n", mas.index, foo);
+#endif
+ check++;
+ if (check > entry_count)
+ break;
+ }
+ rcu_read_unlock();
+#if check_erase2_debug > 2
+ pr_err("mas_for_each %d and count %d\n", check, entry_count);
+ mt_validate(mt);
+#endif
+
+ MT_BUG_ON(mt, check != entry_count);
+
+ MT_BUG_ON(mt, mtree_load(mas.tree, 0) != NULL);
+ }
+}
+
+
+/* These tests were pulled from KVM tree modifications which failed. */
+static noinline void __init check_erase2_sets(struct maple_tree *mt)
+{
+ void *entry;
+ unsigned long start = 0;
+ static const unsigned long set[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140721266458624, 140737488351231,
+ERASE, 140721266458624, 140737488351231,
+STORE, 140721266458624, 140721266462719,
+STORE, 94735788949504, 94735789121535,
+ERASE, 94735788949504, 94735789121535,
+STORE, 94735788949504, 94735788965887,
+STORE, 94735788965888, 94735789121535,
+ERASE, 94735788965888, 94735789121535,
+STORE, 94735788965888, 94735789068287,
+STORE, 94735789068288, 94735789109247,
+STORE, 94735789109248, 94735789121535,
+STORE, 140253902692352, 140253902864383,
+ERASE, 140253902692352, 140253902864383,
+STORE, 140253902692352, 140253902696447,
+STORE, 140253902696448, 140253902864383,
+ };
+ static const unsigned long set2[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735933583360, 140737488351231,
+ERASE, 140735933583360, 140737488351231,
+STORE, 140735933583360, 140735933587455,
+STORE, 94811003260928, 94811003432959,
+ERASE, 94811003260928, 94811003432959,
+STORE, 94811003260928, 94811003277311,
+STORE, 94811003277312, 94811003432959,
+ERASE, 94811003277312, 94811003432959,
+STORE, 94811003277312, 94811003379711,
+STORE, 94811003379712, 94811003420671,
+STORE, 94811003420672, 94811003432959,
+STORE, 140277094653952, 140277094825983,
+ERASE, 140277094653952, 140277094825983,
+STORE, 140277094653952, 140277094658047,
+STORE, 140277094658048, 140277094825983,
+ERASE, 140277094658048, 140277094825983,
+STORE, 140277094658048, 140277094780927,
+STORE, 140277094780928, 140277094813695,
+STORE, 140277094813696, 140277094821887,
+STORE, 140277094821888, 140277094825983,
+STORE, 140735933906944, 140735933911039,
+ };
+ static const unsigned long set3[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735790264320, 140737488351231,
+ERASE, 140735790264320, 140737488351231,
+STORE, 140735790264320, 140735790268415,
+STORE, 94016597282816, 94016597454847,
+ERASE, 94016597282816, 94016597454847,
+STORE, 94016597282816, 94016597299199,
+STORE, 94016597299200, 94016597454847,
+ERASE, 94016597299200, 94016597454847,
+STORE, 94016597299200, 94016597401599,
+STORE, 94016597401600, 94016597442559,
+STORE, 94016597442560, 94016597454847,
+STORE, 140496959283200, 140496959455231,
+ERASE, 140496959283200, 140496959455231,
+STORE, 140496959283200, 140496959287295,
+STORE, 140496959287296, 140496959455231,
+ERASE, 140496959287296, 140496959455231,
+STORE, 140496959287296, 140496959410175,
+STORE, 140496959410176, 140496959442943,
+STORE, 140496959442944, 140496959451135,
+STORE, 140496959451136, 140496959455231,
+STORE, 140735791718400, 140735791722495,
+STORE, 140735791706112, 140735791718399,
+STORE, 47135835713536, 47135835721727,
+STORE, 47135835721728, 47135835729919,
+STORE, 47135835729920, 47135835893759,
+ERASE, 47135835729920, 47135835893759,
+STORE, 47135835729920, 47135835742207,
+STORE, 47135835742208, 47135835893759,
+STORE, 47135835840512, 47135835893759,
+STORE, 47135835742208, 47135835840511,
+ERASE, 47135835742208, 47135835840511,
+STORE, 47135835742208, 47135835840511,
+STORE, 47135835885568, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+ERASE, 47135835840512, 47135835885567,
+STORE, 47135835840512, 47135835893759,
+ERASE, 47135835840512, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+STORE, 47135835885568, 47135835893759,
+ };
+
+ static const unsigned long set4[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140728251703296, 140737488351231,
+ERASE, 140728251703296, 140737488351231,
+STORE, 140728251703296, 140728251707391,
+STORE, 94668429205504, 94668429377535,
+ERASE, 94668429205504, 94668429377535,
+STORE, 94668429205504, 94668429221887,
+STORE, 94668429221888, 94668429377535,
+ERASE, 94668429221888, 94668429377535,
+STORE, 94668429221888, 94668429324287,
+STORE, 94668429324288, 94668429365247,
+STORE, 94668429365248, 94668429377535,
+STORE, 47646523273216, 47646523445247,
+ERASE, 47646523273216, 47646523445247,
+STORE, 47646523273216, 47646523277311,
+STORE, 47646523277312, 47646523445247,
+ERASE, 47646523277312, 47646523445247,
+STORE, 47646523277312, 47646523400191,
+ };
+
+ static const unsigned long set5[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140726874062848, 140737488351231,
+ERASE, 140726874062848, 140737488351231,
+STORE, 140726874062848, 140726874066943,
+STORE, 94248892870656, 94248893042687,
+ERASE, 94248892870656, 94248893042687,
+STORE, 94248892870656, 94248892887039,
+STORE, 94248892887040, 94248893042687,
+ERASE, 94248892887040, 94248893042687,
+STORE, 94248892887040, 94248892989439,
+STORE, 94248892989440, 94248893030399,
+STORE, 94248893030400, 94248893042687,
+STORE, 47884786266112, 47884786438143,
+ERASE, 47884786266112, 47884786438143,
+STORE, 47884786266112, 47884786270207,
+STORE, 47884786270208, 47884786438143,
+ERASE, 47884786270208, 47884786438143,
+STORE, 47884786270208, 47884786393087,
+STORE, 47884786393088, 47884786425855,
+STORE, 47884786425856, 47884786434047,
+STORE, 47884786434048, 47884786438143,
+STORE, 140726874513408, 140726874517503,
+STORE, 140726874501120, 140726874513407,
+STORE, 47884786438144, 47884786446335,
+STORE, 47884786446336, 47884786454527,
+STORE, 47884786454528, 47884786618367,
+ERASE, 47884786454528, 47884786618367,
+STORE, 47884786454528, 47884786466815,
+STORE, 47884786466816, 47884786618367,
+STORE, 47884786565120, 47884786618367,
+STORE, 47884786466816, 47884786565119,
+ERASE, 47884786466816, 47884786565119,
+STORE, 47884786466816, 47884786565119,
+STORE, 47884786610176, 47884786618367,
+STORE, 47884786565120, 47884786610175,
+ERASE, 47884786565120, 47884786610175,
+STORE, 47884786565120, 47884786618367,
+ERASE, 47884786565120, 47884786618367,
+STORE, 47884786565120, 47884786610175,
+STORE, 47884786610176, 47884786618367,
+ERASE, 47884786610176, 47884786618367,
+STORE, 47884786610176, 47884786618367,
+STORE, 47884786618368, 47884789669887,
+STORE, 47884787163136, 47884789669887,
+STORE, 47884786618368, 47884787163135,
+ERASE, 47884787163136, 47884789669887,
+STORE, 47884787163136, 47884789448703,
+STORE, 47884789448704, 47884789669887,
+STORE, 47884788858880, 47884789448703,
+STORE, 47884787163136, 47884788858879,
+ERASE, 47884787163136, 47884788858879,
+STORE, 47884787163136, 47884788858879,
+STORE, 47884789444608, 47884789448703,
+STORE, 47884788858880, 47884789444607,
+ERASE, 47884788858880, 47884789444607,
+STORE, 47884788858880, 47884789444607,
+STORE, 47884789653504, 47884789669887,
+STORE, 47884789448704, 47884789653503,
+ERASE, 47884789448704, 47884789653503,
+STORE, 47884789448704, 47884789653503,
+ERASE, 47884789653504, 47884789669887,
+STORE, 47884789653504, 47884789669887,
+STORE, 47884789669888, 47884791508991,
+STORE, 47884789809152, 47884791508991,
+STORE, 47884789669888, 47884789809151,
+ERASE, 47884789809152, 47884791508991,
+STORE, 47884789809152, 47884791468031,
+STORE, 47884791468032, 47884791508991,
+STORE, 47884791152640, 47884791468031,
+STORE, 47884789809152, 47884791152639,
+ERASE, 47884789809152, 47884791152639,
+STORE, 47884789809152, 47884791152639,
+STORE, 47884791463936, 47884791468031,
+STORE, 47884791152640, 47884791463935,
+ERASE, 47884791152640, 47884791463935,
+STORE, 47884791152640, 47884791463935,
+STORE, 47884791492608, 47884791508991,
+STORE, 47884791468032, 47884791492607,
+ERASE, 47884791468032, 47884791492607,
+STORE, 47884791468032, 47884791492607,
+ERASE, 47884791492608, 47884791508991,
+STORE, 47884791492608, 47884791508991,
+STORE, 47884791508992, 47884791644159,
+ERASE, 47884791508992, 47884791644159,
+STORE, 47884791508992, 47884791533567,
+STORE, 47884791533568, 47884791644159,
+STORE, 47884791595008, 47884791644159,
+STORE, 47884791533568, 47884791595007,
+ERASE, 47884791533568, 47884791595007,
+STORE, 47884791533568, 47884791595007,
+STORE, 47884791619584, 47884791644159,
+STORE, 47884791595008, 47884791619583,
+ERASE, 47884791595008, 47884791619583,
+STORE, 47884791595008, 47884791644159,
+ERASE, 47884791595008, 47884791644159,
+STORE, 47884791595008, 47884791619583,
+STORE, 47884791619584, 47884791644159,
+STORE, 47884791627776, 47884791644159,
+STORE, 47884791619584, 47884791627775,
+ERASE, 47884791619584, 47884791627775,
+STORE, 47884791619584, 47884791627775,
+ERASE, 47884791627776, 47884791644159,
+STORE, 47884791627776, 47884791644159,
+STORE, 47884791644160, 47884791664639,
+ERASE, 47884791644160, 47884791664639,
+STORE, 47884791644160, 47884791648255,
+STORE, 47884791648256, 47884791664639,
+STORE, 47884791652352, 47884791664639,
+STORE, 47884791648256, 47884791652351,
+ERASE, 47884791648256, 47884791652351,
+STORE, 47884791648256, 47884791652351,
+STORE, 47884791656448, 47884791664639,
+STORE, 47884791652352, 47884791656447,
+ERASE, 47884791652352, 47884791656447,
+STORE, 47884791652352, 47884791664639,
+ERASE, 47884791652352, 47884791664639,
+STORE, 47884791652352, 47884791656447,
+STORE, 47884791656448, 47884791664639,
+ERASE, 47884791656448, 47884791664639,
+STORE, 47884791656448, 47884791664639,
+STORE, 47884791664640, 47884791672831,
+ERASE, 47884791468032, 47884791492607,
+STORE, 47884791468032, 47884791484415,
+STORE, 47884791484416, 47884791492607,
+ERASE, 47884791656448, 47884791664639,
+STORE, 47884791656448, 47884791660543,
+STORE, 47884791660544, 47884791664639,
+ERASE, 47884791619584, 47884791627775,
+STORE, 47884791619584, 47884791623679,
+STORE, 47884791623680, 47884791627775,
+ };
+
+ static const unsigned long set6[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722999021568, 140737488351231,
+ERASE, 140722999021568, 140737488351231,
+STORE, 140722999021568, 140722999025663,
+STORE, 94901500268544, 94901500440575,
+ERASE, 94901500268544, 94901500440575,
+STORE, 94901500268544, 94901500284927,
+STORE, 94901500284928, 94901500440575,
+ERASE, 94901500284928, 94901500440575,
+STORE, 94901500284928, 94901500387327,
+STORE, 94901500387328, 94901500428287,
+STORE, 94901500428288, 94901500440575,
+STORE, 47430426660864, 47430426832895,
+ERASE, 47430426660864, 47430426832895,
+STORE, 47430426660864, 47430426664959,
+STORE, 47430426664960, 47430426832895,
+ERASE, 47430426664960, 47430426832895,
+STORE, 47430426664960, 47430426787839,
+STORE, 47430426787840, 47430426820607,
+STORE, 47430426820608, 47430426828799,
+STORE, 47430426828800, 47430426832895,
+STORE, 140722999115776, 140722999119871,
+STORE, 140722999103488, 140722999115775,
+STORE, 47430426832896, 47430426841087,
+STORE, 47430426841088, 47430426849279,
+STORE, 47430426849280, 47430427013119,
+ERASE, 47430426849280, 47430427013119,
+STORE, 47430426849280, 47430426861567,
+STORE, 47430426861568, 47430427013119,
+STORE, 47430426959872, 47430427013119,
+STORE, 47430426861568, 47430426959871,
+ERASE, 47430426861568, 47430426959871,
+STORE, 47430426861568, 47430426959871,
+STORE, 47430427004928, 47430427013119,
+STORE, 47430426959872, 47430427004927,
+ERASE, 47430426959872, 47430427004927,
+STORE, 47430426959872, 47430427013119,
+ERASE, 47430426959872, 47430427013119,
+STORE, 47430426959872, 47430427004927,
+STORE, 47430427004928, 47430427013119,
+ERASE, 47430427004928, 47430427013119,
+STORE, 47430427004928, 47430427013119,
+STORE, 47430427013120, 47430430064639,
+STORE, 47430427557888, 47430430064639,
+STORE, 47430427013120, 47430427557887,
+ERASE, 47430427557888, 47430430064639,
+STORE, 47430427557888, 47430429843455,
+STORE, 47430429843456, 47430430064639,
+STORE, 47430429253632, 47430429843455,
+STORE, 47430427557888, 47430429253631,
+ERASE, 47430427557888, 47430429253631,
+STORE, 47430427557888, 47430429253631,
+STORE, 47430429839360, 47430429843455,
+STORE, 47430429253632, 47430429839359,
+ERASE, 47430429253632, 47430429839359,
+STORE, 47430429253632, 47430429839359,
+STORE, 47430430048256, 47430430064639,
+STORE, 47430429843456, 47430430048255,
+ERASE, 47430429843456, 47430430048255,
+STORE, 47430429843456, 47430430048255,
+ERASE, 47430430048256, 47430430064639,
+STORE, 47430430048256, 47430430064639,
+STORE, 47430430064640, 47430431903743,
+STORE, 47430430203904, 47430431903743,
+STORE, 47430430064640, 47430430203903,
+ERASE, 47430430203904, 47430431903743,
+STORE, 47430430203904, 47430431862783,
+STORE, 47430431862784, 47430431903743,
+STORE, 47430431547392, 47430431862783,
+STORE, 47430430203904, 47430431547391,
+ERASE, 47430430203904, 47430431547391,
+STORE, 47430430203904, 47430431547391,
+STORE, 47430431858688, 47430431862783,
+STORE, 47430431547392, 47430431858687,
+ERASE, 47430431547392, 47430431858687,
+STORE, 47430431547392, 47430431858687,
+STORE, 47430431887360, 47430431903743,
+STORE, 47430431862784, 47430431887359,
+ERASE, 47430431862784, 47430431887359,
+STORE, 47430431862784, 47430431887359,
+ERASE, 47430431887360, 47430431903743,
+STORE, 47430431887360, 47430431903743,
+STORE, 47430431903744, 47430432038911,
+ERASE, 47430431903744, 47430432038911,
+STORE, 47430431903744, 47430431928319,
+STORE, 47430431928320, 47430432038911,
+STORE, 47430431989760, 47430432038911,
+STORE, 47430431928320, 47430431989759,
+ERASE, 47430431928320, 47430431989759,
+STORE, 47430431928320, 47430431989759,
+STORE, 47430432014336, 47430432038911,
+STORE, 47430431989760, 47430432014335,
+ERASE, 47430431989760, 47430432014335,
+STORE, 47430431989760, 47430432038911,
+ERASE, 47430431989760, 47430432038911,
+STORE, 47430431989760, 47430432014335,
+STORE, 47430432014336, 47430432038911,
+STORE, 47430432022528, 47430432038911,
+STORE, 47430432014336, 47430432022527,
+ERASE, 47430432014336, 47430432022527,
+STORE, 47430432014336, 47430432022527,
+ERASE, 47430432022528, 47430432038911,
+STORE, 47430432022528, 47430432038911,
+STORE, 47430432038912, 47430432059391,
+ERASE, 47430432038912, 47430432059391,
+STORE, 47430432038912, 47430432043007,
+STORE, 47430432043008, 47430432059391,
+STORE, 47430432047104, 47430432059391,
+STORE, 47430432043008, 47430432047103,
+ERASE, 47430432043008, 47430432047103,
+STORE, 47430432043008, 47430432047103,
+STORE, 47430432051200, 47430432059391,
+STORE, 47430432047104, 47430432051199,
+ERASE, 47430432047104, 47430432051199,
+STORE, 47430432047104, 47430432059391,
+ERASE, 47430432047104, 47430432059391,
+STORE, 47430432047104, 47430432051199,
+STORE, 47430432051200, 47430432059391,
+ERASE, 47430432051200, 47430432059391,
+STORE, 47430432051200, 47430432059391,
+STORE, 47430432059392, 47430432067583,
+ERASE, 47430431862784, 47430431887359,
+STORE, 47430431862784, 47430431879167,
+STORE, 47430431879168, 47430431887359,
+ERASE, 47430432051200, 47430432059391,
+STORE, 47430432051200, 47430432055295,
+STORE, 47430432055296, 47430432059391,
+ERASE, 47430432014336, 47430432022527,
+STORE, 47430432014336, 47430432018431,
+STORE, 47430432018432, 47430432022527,
+ };
+ static const unsigned long set7[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140729808330752, 140737488351231,
+ERASE, 140729808330752, 140737488351231,
+STORE, 140729808330752, 140729808334847,
+STORE, 94629632020480, 94629632192511,
+ERASE, 94629632020480, 94629632192511,
+STORE, 94629632020480, 94629632036863,
+STORE, 94629632036864, 94629632192511,
+ERASE, 94629632036864, 94629632192511,
+STORE, 94629632036864, 94629632139263,
+STORE, 94629632139264, 94629632180223,
+STORE, 94629632180224, 94629632192511,
+STORE, 47439981776896, 47439981948927,
+ERASE, 47439981776896, 47439981948927,
+STORE, 47439981776896, 47439981780991,
+STORE, 47439981780992, 47439981948927,
+ERASE, 47439981780992, 47439981948927,
+STORE, 47439981780992, 47439981903871,
+STORE, 47439981903872, 47439981936639,
+STORE, 47439981936640, 47439981944831,
+STORE, 47439981944832, 47439981948927,
+STORE, 140729808474112, 140729808478207,
+STORE, 140729808461824, 140729808474111,
+STORE, 47439981948928, 47439981957119,
+STORE, 47439981957120, 47439981965311,
+STORE, 47439981965312, 47439982129151,
+ERASE, 47439981965312, 47439982129151,
+STORE, 47439981965312, 47439981977599,
+STORE, 47439981977600, 47439982129151,
+STORE, 47439982075904, 47439982129151,
+STORE, 47439981977600, 47439982075903,
+ERASE, 47439981977600, 47439982075903,
+STORE, 47439981977600, 47439982075903,
+STORE, 47439982120960, 47439982129151,
+STORE, 47439982075904, 47439982120959,
+ERASE, 47439982075904, 47439982120959,
+STORE, 47439982075904, 47439982129151,
+ERASE, 47439982075904, 47439982129151,
+STORE, 47439982075904, 47439982120959,
+STORE, 47439982120960, 47439982129151,
+ERASE, 47439982120960, 47439982129151,
+STORE, 47439982120960, 47439982129151,
+STORE, 47439982129152, 47439985180671,
+STORE, 47439982673920, 47439985180671,
+STORE, 47439982129152, 47439982673919,
+ERASE, 47439982673920, 47439985180671,
+STORE, 47439982673920, 47439984959487,
+STORE, 47439984959488, 47439985180671,
+STORE, 47439984369664, 47439984959487,
+STORE, 47439982673920, 47439984369663,
+ERASE, 47439982673920, 47439984369663,
+STORE, 47439982673920, 47439984369663,
+STORE, 47439984955392, 47439984959487,
+STORE, 47439984369664, 47439984955391,
+ERASE, 47439984369664, 47439984955391,
+STORE, 47439984369664, 47439984955391,
+STORE, 47439985164288, 47439985180671,
+STORE, 47439984959488, 47439985164287,
+ERASE, 47439984959488, 47439985164287,
+STORE, 47439984959488, 47439985164287,
+ERASE, 47439985164288, 47439985180671,
+STORE, 47439985164288, 47439985180671,
+STORE, 47439985180672, 47439987019775,
+STORE, 47439985319936, 47439987019775,
+STORE, 47439985180672, 47439985319935,
+ERASE, 47439985319936, 47439987019775,
+STORE, 47439985319936, 47439986978815,
+STORE, 47439986978816, 47439987019775,
+STORE, 47439986663424, 47439986978815,
+STORE, 47439985319936, 47439986663423,
+ERASE, 47439985319936, 47439986663423,
+STORE, 47439985319936, 47439986663423,
+STORE, 47439986974720, 47439986978815,
+STORE, 47439986663424, 47439986974719,
+ERASE, 47439986663424, 47439986974719,
+STORE, 47439986663424, 47439986974719,
+STORE, 47439987003392, 47439987019775,
+STORE, 47439986978816, 47439987003391,
+ERASE, 47439986978816, 47439987003391,
+STORE, 47439986978816, 47439987003391,
+ERASE, 47439987003392, 47439987019775,
+STORE, 47439987003392, 47439987019775,
+STORE, 47439987019776, 47439987154943,
+ERASE, 47439987019776, 47439987154943,
+STORE, 47439987019776, 47439987044351,
+STORE, 47439987044352, 47439987154943,
+STORE, 47439987105792, 47439987154943,
+STORE, 47439987044352, 47439987105791,
+ERASE, 47439987044352, 47439987105791,
+STORE, 47439987044352, 47439987105791,
+STORE, 47439987130368, 47439987154943,
+STORE, 47439987105792, 47439987130367,
+ERASE, 47439987105792, 47439987130367,
+STORE, 47439987105792, 47439987154943,
+ERASE, 47439987105792, 47439987154943,
+STORE, 47439987105792, 47439987130367,
+STORE, 47439987130368, 47439987154943,
+STORE, 47439987138560, 47439987154943,
+STORE, 47439987130368, 47439987138559,
+ERASE, 47439987130368, 47439987138559,
+STORE, 47439987130368, 47439987138559,
+ERASE, 47439987138560, 47439987154943,
+STORE, 47439987138560, 47439987154943,
+STORE, 47439987154944, 47439987175423,
+ERASE, 47439987154944, 47439987175423,
+STORE, 47439987154944, 47439987159039,
+STORE, 47439987159040, 47439987175423,
+STORE, 47439987163136, 47439987175423,
+STORE, 47439987159040, 47439987163135,
+ERASE, 47439987159040, 47439987163135,
+STORE, 47439987159040, 47439987163135,
+STORE, 47439987167232, 47439987175423,
+STORE, 47439987163136, 47439987167231,
+ERASE, 47439987163136, 47439987167231,
+STORE, 47439987163136, 47439987175423,
+ERASE, 47439987163136, 47439987175423,
+STORE, 47439987163136, 47439987167231,
+STORE, 47439987167232, 47439987175423,
+ERASE, 47439987167232, 47439987175423,
+STORE, 47439987167232, 47439987175423,
+STORE, 47439987175424, 47439987183615,
+ERASE, 47439986978816, 47439987003391,
+STORE, 47439986978816, 47439986995199,
+STORE, 47439986995200, 47439987003391,
+ERASE, 47439987167232, 47439987175423,
+STORE, 47439987167232, 47439987171327,
+STORE, 47439987171328, 47439987175423,
+ERASE, 47439987130368, 47439987138559,
+STORE, 47439987130368, 47439987134463,
+STORE, 47439987134464, 47439987138559,
+ };
+ static const unsigned long set8[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722482974720, 140737488351231,
+ERASE, 140722482974720, 140737488351231,
+STORE, 140722482974720, 140722482978815,
+STORE, 94121505034240, 94121505206271,
+ERASE, 94121505034240, 94121505206271,
+STORE, 94121505034240, 94121505050623,
+STORE, 94121505050624, 94121505206271,
+ERASE, 94121505050624, 94121505206271,
+STORE, 94121505050624, 94121505153023,
+STORE, 94121505153024, 94121505193983,
+STORE, 94121505193984, 94121505206271,
+STORE, 47708483284992, 47708483457023,
+ERASE, 47708483284992, 47708483457023,
+STORE, 47708483284992, 47708483289087,
+STORE, 47708483289088, 47708483457023,
+ERASE, 47708483289088, 47708483457023,
+STORE, 47708483289088, 47708483411967,
+STORE, 47708483411968, 47708483444735,
+STORE, 47708483444736, 47708483452927,
+STORE, 47708483452928, 47708483457023,
+STORE, 140722483142656, 140722483146751,
+STORE, 140722483130368, 140722483142655,
+STORE, 47708483457024, 47708483465215,
+STORE, 47708483465216, 47708483473407,
+STORE, 47708483473408, 47708483637247,
+ERASE, 47708483473408, 47708483637247,
+STORE, 47708483473408, 47708483485695,
+STORE, 47708483485696, 47708483637247,
+STORE, 47708483584000, 47708483637247,
+STORE, 47708483485696, 47708483583999,
+ERASE, 47708483485696, 47708483583999,
+STORE, 47708483485696, 47708483583999,
+STORE, 47708483629056, 47708483637247,
+STORE, 47708483584000, 47708483629055,
+ERASE, 47708483584000, 47708483629055,
+STORE, 47708483584000, 47708483637247,
+ERASE, 47708483584000, 47708483637247,
+STORE, 47708483584000, 47708483629055,
+STORE, 47708483629056, 47708483637247,
+ERASE, 47708483629056, 47708483637247,
+STORE, 47708483629056, 47708483637247,
+STORE, 47708483637248, 47708486688767,
+STORE, 47708484182016, 47708486688767,
+STORE, 47708483637248, 47708484182015,
+ERASE, 47708484182016, 47708486688767,
+STORE, 47708484182016, 47708486467583,
+STORE, 47708486467584, 47708486688767,
+STORE, 47708485877760, 47708486467583,
+STORE, 47708484182016, 47708485877759,
+ERASE, 47708484182016, 47708485877759,
+STORE, 47708484182016, 47708485877759,
+STORE, 47708486463488, 47708486467583,
+STORE, 47708485877760, 47708486463487,
+ERASE, 47708485877760, 47708486463487,
+STORE, 47708485877760, 47708486463487,
+STORE, 47708486672384, 47708486688767,
+STORE, 47708486467584, 47708486672383,
+ERASE, 47708486467584, 47708486672383,
+STORE, 47708486467584, 47708486672383,
+ERASE, 47708486672384, 47708486688767,
+STORE, 47708486672384, 47708486688767,
+STORE, 47708486688768, 47708488527871,
+STORE, 47708486828032, 47708488527871,
+STORE, 47708486688768, 47708486828031,
+ERASE, 47708486828032, 47708488527871,
+STORE, 47708486828032, 47708488486911,
+STORE, 47708488486912, 47708488527871,
+STORE, 47708488171520, 47708488486911,
+STORE, 47708486828032, 47708488171519,
+ERASE, 47708486828032, 47708488171519,
+STORE, 47708486828032, 47708488171519,
+STORE, 47708488482816, 47708488486911,
+STORE, 47708488171520, 47708488482815,
+ERASE, 47708488171520, 47708488482815,
+STORE, 47708488171520, 47708488482815,
+STORE, 47708488511488, 47708488527871,
+STORE, 47708488486912, 47708488511487,
+ERASE, 47708488486912, 47708488511487,
+STORE, 47708488486912, 47708488511487,
+ERASE, 47708488511488, 47708488527871,
+STORE, 47708488511488, 47708488527871,
+STORE, 47708488527872, 47708488663039,
+ERASE, 47708488527872, 47708488663039,
+STORE, 47708488527872, 47708488552447,
+STORE, 47708488552448, 47708488663039,
+STORE, 47708488613888, 47708488663039,
+STORE, 47708488552448, 47708488613887,
+ERASE, 47708488552448, 47708488613887,
+STORE, 47708488552448, 47708488613887,
+STORE, 47708488638464, 47708488663039,
+STORE, 47708488613888, 47708488638463,
+ERASE, 47708488613888, 47708488638463,
+STORE, 47708488613888, 47708488663039,
+ERASE, 47708488613888, 47708488663039,
+STORE, 47708488613888, 47708488638463,
+STORE, 47708488638464, 47708488663039,
+STORE, 47708488646656, 47708488663039,
+STORE, 47708488638464, 47708488646655,
+ERASE, 47708488638464, 47708488646655,
+STORE, 47708488638464, 47708488646655,
+ERASE, 47708488646656, 47708488663039,
+STORE, 47708488646656, 47708488663039,
+STORE, 47708488663040, 47708488683519,
+ERASE, 47708488663040, 47708488683519,
+STORE, 47708488663040, 47708488667135,
+STORE, 47708488667136, 47708488683519,
+STORE, 47708488671232, 47708488683519,
+STORE, 47708488667136, 47708488671231,
+ERASE, 47708488667136, 47708488671231,
+STORE, 47708488667136, 47708488671231,
+STORE, 47708488675328, 47708488683519,
+STORE, 47708488671232, 47708488675327,
+ERASE, 47708488671232, 47708488675327,
+STORE, 47708488671232, 47708488683519,
+ERASE, 47708488671232, 47708488683519,
+STORE, 47708488671232, 47708488675327,
+STORE, 47708488675328, 47708488683519,
+ERASE, 47708488675328, 47708488683519,
+STORE, 47708488675328, 47708488683519,
+STORE, 47708488683520, 47708488691711,
+ERASE, 47708488486912, 47708488511487,
+STORE, 47708488486912, 47708488503295,
+STORE, 47708488503296, 47708488511487,
+ERASE, 47708488675328, 47708488683519,
+STORE, 47708488675328, 47708488679423,
+STORE, 47708488679424, 47708488683519,
+ERASE, 47708488638464, 47708488646655,
+STORE, 47708488638464, 47708488642559,
+STORE, 47708488642560, 47708488646655,
+ };
+
+ static const unsigned long set9[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736427839488, 140737488351231,
+ERASE, 140736427839488, 140736427839488,
+STORE, 140736427839488, 140736427843583,
+STORE, 94071213395968, 94071213567999,
+ERASE, 94071213395968, 94071213395968,
+STORE, 94071213395968, 94071213412351,
+STORE, 94071213412352, 94071213567999,
+ERASE, 94071213412352, 94071213412352,
+STORE, 94071213412352, 94071213514751,
+STORE, 94071213514752, 94071213555711,
+STORE, 94071213555712, 94071213567999,
+STORE, 139968410644480, 139968410816511,
+ERASE, 139968410644480, 139968410644480,
+STORE, 139968410644480, 139968410648575,
+STORE, 139968410648576, 139968410816511,
+ERASE, 139968410648576, 139968410648576,
+STORE, 139968410648576, 139968410771455,
+STORE, 139968410771456, 139968410804223,
+STORE, 139968410804224, 139968410812415,
+STORE, 139968410812416, 139968410816511,
+STORE, 140736429277184, 140736429281279,
+STORE, 140736429264896, 140736429277183,
+STORE, 47664384352256, 47664384360447,
+STORE, 47664384360448, 47664384368639,
+STORE, 47664384368640, 47664384532479,
+ERASE, 47664384368640, 47664384368640,
+STORE, 47664384368640, 47664384380927,
+STORE, 47664384380928, 47664384532479,
+STORE, 47664384479232, 47664384532479,
+STORE, 47664384380928, 47664384479231,
+ERASE, 47664384380928, 47664384380928,
+STORE, 47664384380928, 47664384479231,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384479232, 47664384524287,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384532479,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384524287,
+STORE, 47664384524288, 47664384532479,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384532480, 47664387583999,
+STORE, 47664385077248, 47664387583999,
+STORE, 47664384532480, 47664385077247,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664387362815,
+STORE, 47664387362816, 47664387583999,
+STORE, 47664386772992, 47664387362815,
+STORE, 47664385077248, 47664386772991,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664386772991,
+STORE, 47664387358720, 47664387362815,
+STORE, 47664386772992, 47664387358719,
+ERASE, 47664386772992, 47664386772992,
+STORE, 47664386772992, 47664387358719,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387567616, 47664387567616,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387584000, 47664389423103,
+STORE, 47664387723264, 47664389423103,
+STORE, 47664387584000, 47664387723263,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389382143,
+STORE, 47664389382144, 47664389423103,
+STORE, 47664389066752, 47664389382143,
+STORE, 47664387723264, 47664389066751,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389066751,
+STORE, 47664389378048, 47664389382143,
+STORE, 47664389066752, 47664389378047,
+ERASE, 47664389066752, 47664389066752,
+STORE, 47664389066752, 47664389378047,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389406720, 47664389406720,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389423104, 47664389558271,
+ERASE, 47664389423104, 47664389423104,
+STORE, 47664389423104, 47664389447679,
+STORE, 47664389447680, 47664389558271,
+STORE, 47664389509120, 47664389558271,
+STORE, 47664389447680, 47664389509119,
+ERASE, 47664389447680, 47664389447680,
+STORE, 47664389447680, 47664389509119,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389509120, 47664389533695,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389558271,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389533695,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389541888, 47664389541888,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389558272, 47664389578751,
+ERASE, 47664389558272, 47664389558272,
+STORE, 47664389558272, 47664389562367,
+STORE, 47664389562368, 47664389578751,
+STORE, 47664389566464, 47664389578751,
+STORE, 47664389562368, 47664389566463,
+ERASE, 47664389562368, 47664389562368,
+STORE, 47664389562368, 47664389566463,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389566464, 47664389570559,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389578751,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389570559,
+STORE, 47664389570560, 47664389578751,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389578752, 47664389586943,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389398527,
+STORE, 47664389398528, 47664389406719,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389574655,
+STORE, 47664389574656, 47664389578751,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389537791,
+STORE, 47664389537792, 47664389541887,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387559423,
+STORE, 47664387559424, 47664387567615,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384528383,
+STORE, 47664384528384, 47664384532479,
+ERASE, 94071213555712, 94071213555712,
+STORE, 94071213555712, 94071213563903,
+STORE, 94071213563904, 94071213567999,
+ERASE, 139968410804224, 139968410804224,
+STORE, 139968410804224, 139968410808319,
+STORE, 139968410808320, 139968410812415,
+ERASE, 47664384352256, 47664384352256,
+STORE, 94071244402688, 94071244537855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728271503360, 140737488351231,
+ERASE, 140728271503360, 140728271503360,
+STORE, 140728271503360, 140728271507455,
+STORE, 94410361982976, 94410362155007,
+ERASE, 94410361982976, 94410361982976,
+STORE, 94410361982976, 94410361999359,
+STORE, 94410361999360, 94410362155007,
+ERASE, 94410361999360, 94410361999360,
+STORE, 94410361999360, 94410362101759,
+STORE, 94410362101760, 94410362142719,
+STORE, 94410362142720, 94410362155007,
+STORE, 140351953997824, 140351954169855,
+ERASE, 140351953997824, 140351953997824,
+STORE, 140351953997824, 140351954001919,
+STORE, 140351954001920, 140351954169855,
+ERASE, 140351954001920, 140351954001920,
+STORE, 140351954001920, 140351954124799,
+STORE, 140351954124800, 140351954157567,
+STORE, 140351954157568, 140351954165759,
+STORE, 140351954165760, 140351954169855,
+STORE, 140728272429056, 140728272433151,
+STORE, 140728272416768, 140728272429055,
+STORE, 47280840998912, 47280841007103,
+STORE, 47280841007104, 47280841015295,
+STORE, 47280841015296, 47280841179135,
+ERASE, 47280841015296, 47280841015296,
+STORE, 47280841015296, 47280841027583,
+STORE, 47280841027584, 47280841179135,
+STORE, 47280841125888, 47280841179135,
+STORE, 47280841027584, 47280841125887,
+ERASE, 47280841027584, 47280841027584,
+STORE, 47280841027584, 47280841125887,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841125888, 47280841170943,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841179135,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841170943,
+STORE, 47280841170944, 47280841179135,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841179136, 47280844230655,
+STORE, 47280841723904, 47280844230655,
+STORE, 47280841179136, 47280841723903,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280844009471,
+STORE, 47280844009472, 47280844230655,
+STORE, 47280843419648, 47280844009471,
+STORE, 47280841723904, 47280843419647,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280843419647,
+STORE, 47280844005376, 47280844009471,
+STORE, 47280843419648, 47280844005375,
+ERASE, 47280843419648, 47280843419648,
+STORE, 47280843419648, 47280844005375,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844214272, 47280844214272,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844230656, 47280846069759,
+STORE, 47280844369920, 47280846069759,
+STORE, 47280844230656, 47280844369919,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280846028799,
+STORE, 47280846028800, 47280846069759,
+STORE, 47280845713408, 47280846028799,
+STORE, 47280844369920, 47280845713407,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280845713407,
+STORE, 47280846024704, 47280846028799,
+STORE, 47280845713408, 47280846024703,
+ERASE, 47280845713408, 47280845713408,
+STORE, 47280845713408, 47280846024703,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846053376, 47280846053376,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846069760, 47280846204927,
+ERASE, 47280846069760, 47280846069760,
+STORE, 47280846069760, 47280846094335,
+STORE, 47280846094336, 47280846204927,
+STORE, 47280846155776, 47280846204927,
+STORE, 47280846094336, 47280846155775,
+ERASE, 47280846094336, 47280846094336,
+STORE, 47280846094336, 47280846155775,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846155776, 47280846180351,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846204927,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846180351,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846188544, 47280846188544,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846204928, 47280846225407,
+ERASE, 47280846204928, 47280846204928,
+STORE, 47280846204928, 47280846209023,
+STORE, 47280846209024, 47280846225407,
+STORE, 47280846213120, 47280846225407,
+STORE, 47280846209024, 47280846213119,
+ERASE, 47280846209024, 47280846209024,
+STORE, 47280846209024, 47280846213119,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846213120, 47280846217215,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846225407,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846217215,
+STORE, 47280846217216, 47280846225407,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846225408, 47280846233599,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846045183,
+STORE, 47280846045184, 47280846053375,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846221311,
+STORE, 47280846221312, 47280846225407,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846184447,
+STORE, 47280846184448, 47280846188543,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844206079,
+STORE, 47280844206080, 47280844214271,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841175039,
+STORE, 47280841175040, 47280841179135,
+ERASE, 94410362142720, 94410362142720,
+STORE, 94410362142720, 94410362150911,
+STORE, 94410362150912, 94410362155007,
+ERASE, 140351954157568, 140351954157568,
+STORE, 140351954157568, 140351954161663,
+STORE, 140351954161664, 140351954165759,
+ERASE, 47280840998912, 47280840998912,
+STORE, 94410379456512, 94410379591679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732946362368, 140737488351231,
+ERASE, 140732946362368, 140732946362368,
+STORE, 140732946362368, 140732946366463,
+STORE, 94352937934848, 94352938106879,
+ERASE, 94352937934848, 94352937934848,
+STORE, 94352937934848, 94352937951231,
+STORE, 94352937951232, 94352938106879,
+ERASE, 94352937951232, 94352937951232,
+STORE, 94352937951232, 94352938053631,
+STORE, 94352938053632, 94352938094591,
+STORE, 94352938094592, 94352938106879,
+STORE, 140595518742528, 140595518914559,
+ERASE, 140595518742528, 140595518742528,
+STORE, 140595518742528, 140595518746623,
+STORE, 140595518746624, 140595518914559,
+ERASE, 140595518746624, 140595518746624,
+STORE, 140595518746624, 140595518869503,
+STORE, 140595518869504, 140595518902271,
+STORE, 140595518902272, 140595518910463,
+STORE, 140595518910464, 140595518914559,
+STORE, 140732947468288, 140732947472383,
+STORE, 140732947456000, 140732947468287,
+STORE, 47037276254208, 47037276262399,
+STORE, 47037276262400, 47037276270591,
+STORE, 47037276270592, 47037276434431,
+ERASE, 47037276270592, 47037276270592,
+STORE, 47037276270592, 47037276282879,
+STORE, 47037276282880, 47037276434431,
+STORE, 47037276381184, 47037276434431,
+STORE, 47037276282880, 47037276381183,
+ERASE, 47037276282880, 47037276282880,
+STORE, 47037276282880, 47037276381183,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276381184, 47037276426239,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276434431,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276426239,
+STORE, 47037276426240, 47037276434431,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276434432, 47037279485951,
+STORE, 47037276979200, 47037279485951,
+STORE, 47037276434432, 47037276979199,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037279264767,
+STORE, 47037279264768, 47037279485951,
+STORE, 47037278674944, 47037279264767,
+STORE, 47037276979200, 47037278674943,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037278674943,
+STORE, 47037279260672, 47037279264767,
+STORE, 47037278674944, 47037279260671,
+ERASE, 47037278674944, 47037278674944,
+STORE, 47037278674944, 47037279260671,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279469568, 47037279469568,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279485952, 47037281325055,
+STORE, 47037279625216, 47037281325055,
+STORE, 47037279485952, 47037279625215,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037281284095,
+STORE, 47037281284096, 47037281325055,
+STORE, 47037280968704, 47037281284095,
+STORE, 47037279625216, 47037280968703,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037280968703,
+STORE, 47037281280000, 47037281284095,
+STORE, 47037280968704, 47037281279999,
+ERASE, 47037280968704, 47037280968704,
+STORE, 47037280968704, 47037281279999,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281308672, 47037281308672,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281325056, 47037281460223,
+ERASE, 47037281325056, 47037281325056,
+STORE, 47037281325056, 47037281349631,
+STORE, 47037281349632, 47037281460223,
+STORE, 47037281411072, 47037281460223,
+STORE, 47037281349632, 47037281411071,
+ERASE, 47037281349632, 47037281349632,
+STORE, 47037281349632, 47037281411071,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281411072, 47037281435647,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281460223,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281435647,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281443840, 47037281443840,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281460224, 47037281480703,
+ERASE, 47037281460224, 47037281460224,
+STORE, 47037281460224, 47037281464319,
+STORE, 47037281464320, 47037281480703,
+STORE, 47037281468416, 47037281480703,
+STORE, 47037281464320, 47037281468415,
+ERASE, 47037281464320, 47037281464320,
+STORE, 47037281464320, 47037281468415,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281468416, 47037281472511,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281480703,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281472511,
+STORE, 47037281472512, 47037281480703,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281480704, 47037281488895,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281300479,
+STORE, 47037281300480, 47037281308671,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281476607,
+STORE, 47037281476608, 47037281480703,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281439743,
+STORE, 47037281439744, 47037281443839,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279461375,
+STORE, 47037279461376, 47037279469567,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276430335,
+STORE, 47037276430336, 47037276434431,
+ERASE, 94352938094592, 94352938094592,
+STORE, 94352938094592, 94352938102783,
+STORE, 94352938102784, 94352938106879,
+ERASE, 140595518902272, 140595518902272,
+STORE, 140595518902272, 140595518906367,
+STORE, 140595518906368, 140595518910463,
+ERASE, 47037276254208, 47037276254208,
+STORE, 94352938438656, 94352938573823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733506027520, 140737488351231,
+ERASE, 140733506027520, 140733506027520,
+STORE, 140733506027520, 140733506031615,
+STORE, 94150123073536, 94150123245567,
+ERASE, 94150123073536, 94150123073536,
+STORE, 94150123073536, 94150123089919,
+STORE, 94150123089920, 94150123245567,
+ERASE, 94150123089920, 94150123089920,
+STORE, 94150123089920, 94150123192319,
+STORE, 94150123192320, 94150123233279,
+STORE, 94150123233280, 94150123245567,
+STORE, 140081290375168, 140081290547199,
+ERASE, 140081290375168, 140081290375168,
+STORE, 140081290375168, 140081290379263,
+STORE, 140081290379264, 140081290547199,
+ERASE, 140081290379264, 140081290379264,
+STORE, 140081290379264, 140081290502143,
+STORE, 140081290502144, 140081290534911,
+STORE, 140081290534912, 140081290543103,
+STORE, 140081290543104, 140081290547199,
+STORE, 140733506707456, 140733506711551,
+STORE, 140733506695168, 140733506707455,
+STORE, 47551504621568, 47551504629759,
+STORE, 47551504629760, 47551504637951,
+STORE, 47551504637952, 47551504801791,
+ERASE, 47551504637952, 47551504637952,
+STORE, 47551504637952, 47551504650239,
+STORE, 47551504650240, 47551504801791,
+STORE, 47551504748544, 47551504801791,
+STORE, 47551504650240, 47551504748543,
+ERASE, 47551504650240, 47551504650240,
+STORE, 47551504650240, 47551504748543,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504748544, 47551504793599,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504801791,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504793599,
+STORE, 47551504793600, 47551504801791,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504801792, 47551507853311,
+STORE, 47551505346560, 47551507853311,
+STORE, 47551504801792, 47551505346559,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507632127,
+STORE, 47551507632128, 47551507853311,
+STORE, 47551507042304, 47551507632127,
+STORE, 47551505346560, 47551507042303,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507042303,
+STORE, 47551507628032, 47551507632127,
+STORE, 47551507042304, 47551507628031,
+ERASE, 47551507042304, 47551507042304,
+STORE, 47551507042304, 47551507628031,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507836928, 47551507836928,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507853312, 47551509692415,
+STORE, 47551507992576, 47551509692415,
+STORE, 47551507853312, 47551507992575,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509651455,
+STORE, 47551509651456, 47551509692415,
+STORE, 47551509336064, 47551509651455,
+STORE, 47551507992576, 47551509336063,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509336063,
+STORE, 47551509647360, 47551509651455,
+STORE, 47551509336064, 47551509647359,
+ERASE, 47551509336064, 47551509336064,
+STORE, 47551509336064, 47551509647359,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509676032, 47551509676032,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509692416, 47551509827583,
+ERASE, 47551509692416, 47551509692416,
+STORE, 47551509692416, 47551509716991,
+STORE, 47551509716992, 47551509827583,
+STORE, 47551509778432, 47551509827583,
+STORE, 47551509716992, 47551509778431,
+ERASE, 47551509716992, 47551509716992,
+STORE, 47551509716992, 47551509778431,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509778432, 47551509803007,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509827583,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509803007,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509811200, 47551509811200,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509827584, 47551509848063,
+ERASE, 47551509827584, 47551509827584,
+STORE, 47551509827584, 47551509831679,
+STORE, 47551509831680, 47551509848063,
+STORE, 47551509835776, 47551509848063,
+STORE, 47551509831680, 47551509835775,
+ERASE, 47551509831680, 47551509831680,
+STORE, 47551509831680, 47551509835775,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509835776, 47551509839871,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509848063,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509839871,
+STORE, 47551509839872, 47551509848063,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509848064, 47551509856255,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509667839,
+STORE, 47551509667840, 47551509676031,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509843967,
+STORE, 47551509843968, 47551509848063,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509807103,
+STORE, 47551509807104, 47551509811199,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507828735,
+STORE, 47551507828736, 47551507836927,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504797695,
+STORE, 47551504797696, 47551504801791,
+ERASE, 94150123233280, 94150123233280,
+STORE, 94150123233280, 94150123241471,
+STORE, 94150123241472, 94150123245567,
+ERASE, 140081290534912, 140081290534912,
+STORE, 140081290534912, 140081290539007,
+STORE, 140081290539008, 140081290543103,
+ERASE, 47551504621568, 47551504621568,
+STORE, 94150148112384, 94150148247551,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734389334016, 140737488351231,
+ERASE, 140734389334016, 140734389334016,
+STORE, 140734389334016, 140734389338111,
+STORE, 94844636606464, 94844636778495,
+ERASE, 94844636606464, 94844636606464,
+STORE, 94844636606464, 94844636622847,
+STORE, 94844636622848, 94844636778495,
+ERASE, 94844636622848, 94844636622848,
+STORE, 94844636622848, 94844636725247,
+STORE, 94844636725248, 94844636766207,
+STORE, 94844636766208, 94844636778495,
+STORE, 139922765217792, 139922765389823,
+ERASE, 139922765217792, 139922765217792,
+STORE, 139922765217792, 139922765221887,
+STORE, 139922765221888, 139922765389823,
+ERASE, 139922765221888, 139922765221888,
+STORE, 139922765221888, 139922765344767,
+STORE, 139922765344768, 139922765377535,
+STORE, 139922765377536, 139922765385727,
+STORE, 139922765385728, 139922765389823,
+STORE, 140734389678080, 140734389682175,
+STORE, 140734389665792, 140734389678079,
+STORE, 47710029778944, 47710029787135,
+STORE, 47710029787136, 47710029795327,
+STORE, 47710029795328, 47710029959167,
+ERASE, 47710029795328, 47710029795328,
+STORE, 47710029795328, 47710029807615,
+STORE, 47710029807616, 47710029959167,
+STORE, 47710029905920, 47710029959167,
+STORE, 47710029807616, 47710029905919,
+ERASE, 47710029807616, 47710029807616,
+STORE, 47710029807616, 47710029905919,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029905920, 47710029950975,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029959167,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029950975,
+STORE, 47710029950976, 47710029959167,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029959168, 47710033010687,
+STORE, 47710030503936, 47710033010687,
+STORE, 47710029959168, 47710030503935,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032789503,
+STORE, 47710032789504, 47710033010687,
+STORE, 47710032199680, 47710032789503,
+STORE, 47710030503936, 47710032199679,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032199679,
+STORE, 47710032785408, 47710032789503,
+STORE, 47710032199680, 47710032785407,
+ERASE, 47710032199680, 47710032199680,
+STORE, 47710032199680, 47710032785407,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032994304, 47710032994304,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710033010688, 47710034849791,
+STORE, 47710033149952, 47710034849791,
+STORE, 47710033010688, 47710033149951,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034808831,
+STORE, 47710034808832, 47710034849791,
+STORE, 47710034493440, 47710034808831,
+STORE, 47710033149952, 47710034493439,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034493439,
+STORE, 47710034804736, 47710034808831,
+STORE, 47710034493440, 47710034804735,
+ERASE, 47710034493440, 47710034493440,
+STORE, 47710034493440, 47710034804735,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034833408, 47710034833408,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034849792, 47710034984959,
+ERASE, 47710034849792, 47710034849792,
+STORE, 47710034849792, 47710034874367,
+STORE, 47710034874368, 47710034984959,
+STORE, 47710034935808, 47710034984959,
+STORE, 47710034874368, 47710034935807,
+ERASE, 47710034874368, 47710034874368,
+STORE, 47710034874368, 47710034935807,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034935808, 47710034960383,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034984959,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034960383,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034968576, 47710034968576,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034984960, 47710035005439,
+ERASE, 47710034984960, 47710034984960,
+STORE, 47710034984960, 47710034989055,
+STORE, 47710034989056, 47710035005439,
+STORE, 47710034993152, 47710035005439,
+STORE, 47710034989056, 47710034993151,
+ERASE, 47710034989056, 47710034989056,
+STORE, 47710034989056, 47710034993151,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710034993152, 47710034997247,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710035005439,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710034997247,
+STORE, 47710034997248, 47710035005439,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710035005440, 47710035013631,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034825215,
+STORE, 47710034825216, 47710034833407,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035001343,
+STORE, 47710035001344, 47710035005439,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034964479,
+STORE, 47710034964480, 47710034968575,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032986111,
+STORE, 47710032986112, 47710032994303,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029955071,
+STORE, 47710029955072, 47710029959167,
+ERASE, 94844636766208, 94844636766208,
+STORE, 94844636766208, 94844636774399,
+STORE, 94844636774400, 94844636778495,
+ERASE, 139922765377536, 139922765377536,
+STORE, 139922765377536, 139922765381631,
+STORE, 139922765381632, 139922765385727,
+ERASE, 47710029778944, 47710029778944,
+STORE, 94844641775616, 94844641910783,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732213886976, 140737488351231,
+ERASE, 140732213886976, 140732213886976,
+STORE, 140732213886976, 140732213891071,
+STORE, 94240508887040, 94240509059071,
+ERASE, 94240508887040, 94240508887040,
+STORE, 94240508887040, 94240508903423,
+STORE, 94240508903424, 94240509059071,
+ERASE, 94240508903424, 94240508903424,
+STORE, 94240508903424, 94240509005823,
+STORE, 94240509005824, 94240509046783,
+STORE, 94240509046784, 94240509059071,
+STORE, 140275106516992, 140275106689023,
+ERASE, 140275106516992, 140275106516992,
+STORE, 140275106516992, 140275106521087,
+STORE, 140275106521088, 140275106689023,
+ERASE, 140275106521088, 140275106521088,
+STORE, 140275106521088, 140275106643967,
+STORE, 140275106643968, 140275106676735,
+STORE, 140275106676736, 140275106684927,
+STORE, 140275106684928, 140275106689023,
+STORE, 140732213977088, 140732213981183,
+STORE, 140732213964800, 140732213977087,
+STORE, 47357688479744, 47357688487935,
+STORE, 47357688487936, 47357688496127,
+STORE, 47357688496128, 47357688659967,
+ERASE, 47357688496128, 47357688496128,
+STORE, 47357688496128, 47357688508415,
+STORE, 47357688508416, 47357688659967,
+STORE, 47357688606720, 47357688659967,
+STORE, 47357688508416, 47357688606719,
+ERASE, 47357688508416, 47357688508416,
+STORE, 47357688508416, 47357688606719,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688606720, 47357688651775,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688659967,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688651775,
+STORE, 47357688651776, 47357688659967,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688659968, 47357691711487,
+STORE, 47357689204736, 47357691711487,
+STORE, 47357688659968, 47357689204735,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357691490303,
+STORE, 47357691490304, 47357691711487,
+STORE, 47357690900480, 47357691490303,
+STORE, 47357689204736, 47357690900479,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357690900479,
+STORE, 47357691486208, 47357691490303,
+STORE, 47357690900480, 47357691486207,
+ERASE, 47357690900480, 47357690900480,
+STORE, 47357690900480, 47357691486207,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691695104, 47357691695104,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691711488, 47357693550591,
+STORE, 47357691850752, 47357693550591,
+STORE, 47357691711488, 47357691850751,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693509631,
+STORE, 47357693509632, 47357693550591,
+STORE, 47357693194240, 47357693509631,
+STORE, 47357691850752, 47357693194239,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693194239,
+STORE, 47357693505536, 47357693509631,
+STORE, 47357693194240, 47357693505535,
+ERASE, 47357693194240, 47357693194240,
+STORE, 47357693194240, 47357693505535,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693534208, 47357693534208,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693550592, 47357693685759,
+ERASE, 47357693550592, 47357693550592,
+STORE, 47357693550592, 47357693575167,
+STORE, 47357693575168, 47357693685759,
+STORE, 47357693636608, 47357693685759,
+STORE, 47357693575168, 47357693636607,
+ERASE, 47357693575168, 47357693575168,
+STORE, 47357693575168, 47357693636607,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693636608, 47357693661183,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693685759,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693661183,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693669376, 47357693669376,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693685760, 47357693706239,
+ERASE, 47357693685760, 47357693685760,
+STORE, 47357693685760, 47357693689855,
+STORE, 47357693689856, 47357693706239,
+STORE, 47357693693952, 47357693706239,
+STORE, 47357693689856, 47357693693951,
+ERASE, 47357693689856, 47357693689856,
+STORE, 47357693689856, 47357693693951,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693693952, 47357693698047,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693706239,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693698047,
+STORE, 47357693698048, 47357693706239,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693706240, 47357693714431,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693526015,
+STORE, 47357693526016, 47357693534207,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693702143,
+STORE, 47357693702144, 47357693706239,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693665279,
+STORE, 47357693665280, 47357693669375,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691686911,
+STORE, 47357691686912, 47357691695103,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688655871,
+STORE, 47357688655872, 47357688659967,
+ERASE, 94240509046784, 94240509046784,
+STORE, 94240509046784, 94240509054975,
+STORE, 94240509054976, 94240509059071,
+ERASE, 140275106676736, 140275106676736,
+STORE, 140275106676736, 140275106680831,
+STORE, 140275106680832, 140275106684927,
+ERASE, 47357688479744, 47357688479744,
+STORE, 94240518361088, 94240518496255,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732688277504, 140737488351231,
+ERASE, 140732688277504, 140732688277504,
+STORE, 140732688277504, 140732688281599,
+STORE, 94629171351552, 94629172064255,
+ERASE, 94629171351552, 94629171351552,
+STORE, 94629171351552, 94629171400703,
+STORE, 94629171400704, 94629172064255,
+ERASE, 94629171400704, 94629171400704,
+STORE, 94629171400704, 94629171945471,
+STORE, 94629171945472, 94629172043775,
+STORE, 94629172043776, 94629172064255,
+STORE, 139770707644416, 139770707816447,
+ERASE, 139770707644416, 139770707644416,
+STORE, 139770707644416, 139770707648511,
+STORE, 139770707648512, 139770707816447,
+ERASE, 139770707648512, 139770707648512,
+STORE, 139770707648512, 139770707771391,
+STORE, 139770707771392, 139770707804159,
+STORE, 139770707804160, 139770707812351,
+STORE, 139770707812352, 139770707816447,
+STORE, 140732689121280, 140732689125375,
+STORE, 140732689108992, 140732689121279,
+STORE, 47862087352320, 47862087360511,
+STORE, 47862087360512, 47862087368703,
+STORE, 47862087368704, 47862087475199,
+STORE, 47862087385088, 47862087475199,
+STORE, 47862087368704, 47862087385087,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087458815,
+STORE, 47862087458816, 47862087475199,
+STORE, 47862087438336, 47862087458815,
+STORE, 47862087385088, 47862087438335,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087438335,
+STORE, 47862087454720, 47862087458815,
+STORE, 47862087438336, 47862087454719,
+ERASE, 47862087438336, 47862087438336,
+STORE, 47862087438336, 47862087454719,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087467008, 47862087467008,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087475200, 47862089314303,
+STORE, 47862087614464, 47862089314303,
+STORE, 47862087475200, 47862087614463,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862089273343,
+STORE, 47862089273344, 47862089314303,
+STORE, 47862088957952, 47862089273343,
+STORE, 47862087614464, 47862088957951,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862088957951,
+STORE, 47862089269248, 47862089273343,
+STORE, 47862088957952, 47862089269247,
+ERASE, 47862088957952, 47862088957952,
+STORE, 47862088957952, 47862089269247,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089297920, 47862089297920,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089297920, 47862089326591,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089289727,
+STORE, 47862089289728, 47862089297919,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087462911,
+STORE, 47862087462912, 47862087467007,
+ERASE, 94629172043776, 94629172043776,
+STORE, 94629172043776, 94629172060159,
+STORE, 94629172060160, 94629172064255,
+ERASE, 139770707804160, 139770707804160,
+STORE, 139770707804160, 139770707808255,
+STORE, 139770707808256, 139770707812351,
+ERASE, 47862087352320, 47862087352320,
+STORE, 94629197533184, 94629197668351,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727540711424, 140737488351231,
+ERASE, 140727540711424, 140727540711424,
+STORE, 140727540711424, 140727540715519,
+STORE, 94299865313280, 94299866025983,
+ERASE, 94299865313280, 94299865313280,
+STORE, 94299865313280, 94299865362431,
+STORE, 94299865362432, 94299866025983,
+ERASE, 94299865362432, 94299865362432,
+STORE, 94299865362432, 94299865907199,
+STORE, 94299865907200, 94299866005503,
+STORE, 94299866005504, 94299866025983,
+STORE, 140680268763136, 140680268935167,
+ERASE, 140680268763136, 140680268763136,
+STORE, 140680268763136, 140680268767231,
+STORE, 140680268767232, 140680268935167,
+ERASE, 140680268767232, 140680268767232,
+STORE, 140680268767232, 140680268890111,
+STORE, 140680268890112, 140680268922879,
+STORE, 140680268922880, 140680268931071,
+STORE, 140680268931072, 140680268935167,
+STORE, 140727541424128, 140727541428223,
+STORE, 140727541411840, 140727541424127,
+STORE, 46952526233600, 46952526241791,
+STORE, 46952526241792, 46952526249983,
+STORE, 46952526249984, 46952526356479,
+STORE, 46952526266368, 46952526356479,
+STORE, 46952526249984, 46952526266367,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526340095,
+STORE, 46952526340096, 46952526356479,
+STORE, 46952526319616, 46952526340095,
+STORE, 46952526266368, 46952526319615,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526319615,
+STORE, 46952526336000, 46952526340095,
+STORE, 46952526319616, 46952526335999,
+ERASE, 46952526319616, 46952526319616,
+STORE, 46952526319616, 46952526335999,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526348288, 46952526348288,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526356480, 46952528195583,
+STORE, 46952526495744, 46952528195583,
+STORE, 46952526356480, 46952526495743,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952528154623,
+STORE, 46952528154624, 46952528195583,
+STORE, 46952527839232, 46952528154623,
+STORE, 46952526495744, 46952527839231,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952527839231,
+STORE, 46952528150528, 46952528154623,
+STORE, 46952527839232, 46952528150527,
+ERASE, 46952527839232, 46952527839232,
+STORE, 46952527839232, 46952528150527,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528179200, 46952528179200,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528179200, 46952528207871,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528171007,
+STORE, 46952528171008, 46952528179199,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526344191,
+STORE, 46952526344192, 46952526348287,
+ERASE, 94299866005504, 94299866005504,
+STORE, 94299866005504, 94299866021887,
+STORE, 94299866021888, 94299866025983,
+ERASE, 140680268922880, 140680268922880,
+STORE, 140680268922880, 140680268926975,
+STORE, 140680268926976, 140680268931071,
+ERASE, 46952526233600, 46952526233600,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722874793984, 140737488351231,
+ERASE, 140722874793984, 140722874793984,
+STORE, 140722874793984, 140722874798079,
+STORE, 94448916213760, 94448916926463,
+ERASE, 94448916213760, 94448916213760,
+STORE, 94448916213760, 94448916262911,
+STORE, 94448916262912, 94448916926463,
+ERASE, 94448916262912, 94448916262912,
+STORE, 94448916262912, 94448916807679,
+STORE, 94448916807680, 94448916905983,
+STORE, 94448916905984, 94448916926463,
+STORE, 140389117046784, 140389117218815,
+ERASE, 140389117046784, 140389117046784,
+STORE, 140389117046784, 140389117050879,
+STORE, 140389117050880, 140389117218815,
+ERASE, 140389117050880, 140389117050880,
+STORE, 140389117050880, 140389117173759,
+STORE, 140389117173760, 140389117206527,
+STORE, 140389117206528, 140389117214719,
+STORE, 140389117214720, 140389117218815,
+STORE, 140722875297792, 140722875301887,
+STORE, 140722875285504, 140722875297791,
+STORE, 47243677949952, 47243677958143,
+STORE, 47243677958144, 47243677966335,
+STORE, 47243677966336, 47243678072831,
+STORE, 47243677982720, 47243678072831,
+STORE, 47243677966336, 47243677982719,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678056447,
+STORE, 47243678056448, 47243678072831,
+STORE, 47243678035968, 47243678056447,
+STORE, 47243677982720, 47243678035967,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678035967,
+STORE, 47243678052352, 47243678056447,
+STORE, 47243678035968, 47243678052351,
+ERASE, 47243678035968, 47243678035968,
+STORE, 47243678035968, 47243678052351,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678064640, 47243678064640,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678072832, 47243679911935,
+STORE, 47243678212096, 47243679911935,
+STORE, 47243678072832, 47243678212095,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679870975,
+STORE, 47243679870976, 47243679911935,
+STORE, 47243679555584, 47243679870975,
+STORE, 47243678212096, 47243679555583,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679555583,
+STORE, 47243679866880, 47243679870975,
+STORE, 47243679555584, 47243679866879,
+ERASE, 47243679555584, 47243679555584,
+STORE, 47243679555584, 47243679866879,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679895552, 47243679895552,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679895552, 47243679924223,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679887359,
+STORE, 47243679887360, 47243679895551,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678060543,
+STORE, 47243678060544, 47243678064639,
+ERASE, 94448916905984, 94448916905984,
+STORE, 94448916905984, 94448916922367,
+STORE, 94448916922368, 94448916926463,
+ERASE, 140389117206528, 140389117206528,
+STORE, 140389117206528, 140389117210623,
+STORE, 140389117210624, 140389117214719,
+ERASE, 47243677949952, 47243677949952,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733068505088, 140737488351231,
+ERASE, 140733068505088, 140733068505088,
+STORE, 140733068505088, 140733068509183,
+STORE, 94207145750528, 94207146463231,
+ERASE, 94207145750528, 94207145750528,
+STORE, 94207145750528, 94207145799679,
+STORE, 94207145799680, 94207146463231,
+ERASE, 94207145799680, 94207145799680,
+STORE, 94207145799680, 94207146344447,
+STORE, 94207146344448, 94207146442751,
+STORE, 94207146442752, 94207146463231,
+STORE, 140684504911872, 140684505083903,
+ERASE, 140684504911872, 140684504911872,
+STORE, 140684504911872, 140684504915967,
+STORE, 140684504915968, 140684505083903,
+ERASE, 140684504915968, 140684504915968,
+STORE, 140684504915968, 140684505038847,
+STORE, 140684505038848, 140684505071615,
+STORE, 140684505071616, 140684505079807,
+STORE, 140684505079808, 140684505083903,
+STORE, 140733068607488, 140733068611583,
+STORE, 140733068595200, 140733068607487,
+STORE, 46948290084864, 46948290093055,
+STORE, 46948290093056, 46948290101247,
+STORE, 46948290101248, 46948290207743,
+STORE, 46948290117632, 46948290207743,
+STORE, 46948290101248, 46948290117631,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290191359,
+STORE, 46948290191360, 46948290207743,
+STORE, 46948290170880, 46948290191359,
+STORE, 46948290117632, 46948290170879,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290170879,
+STORE, 46948290187264, 46948290191359,
+STORE, 46948290170880, 46948290187263,
+ERASE, 46948290170880, 46948290170880,
+STORE, 46948290170880, 46948290187263,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290199552, 46948290199552,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290207744, 46948292046847,
+STORE, 46948290347008, 46948292046847,
+STORE, 46948290207744, 46948290347007,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948292005887,
+STORE, 46948292005888, 46948292046847,
+STORE, 46948291690496, 46948292005887,
+STORE, 46948290347008, 46948291690495,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948291690495,
+STORE, 46948292001792, 46948292005887,
+STORE, 46948291690496, 46948292001791,
+ERASE, 46948291690496, 46948291690496,
+STORE, 46948291690496, 46948292001791,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292030464, 46948292030464,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292030464, 46948292059135,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292022271,
+STORE, 46948292022272, 46948292030463,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290195455,
+STORE, 46948290195456, 46948290199551,
+ERASE, 94207146442752, 94207146442752,
+STORE, 94207146442752, 94207146459135,
+STORE, 94207146459136, 94207146463231,
+ERASE, 140684505071616, 140684505071616,
+STORE, 140684505071616, 140684505075711,
+STORE, 140684505075712, 140684505079807,
+ERASE, 46948290084864, 46948290084864,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726367158272, 140737488351231,
+ERASE, 140726367158272, 140726367158272,
+STORE, 140726367158272, 140726367162367,
+STORE, 94436124106752, 94436124819455,
+ERASE, 94436124106752, 94436124106752,
+STORE, 94436124106752, 94436124155903,
+STORE, 94436124155904, 94436124819455,
+ERASE, 94436124155904, 94436124155904,
+STORE, 94436124155904, 94436124700671,
+STORE, 94436124700672, 94436124798975,
+STORE, 94436124798976, 94436124819455,
+STORE, 140049025044480, 140049025216511,
+ERASE, 140049025044480, 140049025044480,
+STORE, 140049025044480, 140049025048575,
+STORE, 140049025048576, 140049025216511,
+ERASE, 140049025048576, 140049025048576,
+STORE, 140049025048576, 140049025171455,
+STORE, 140049025171456, 140049025204223,
+STORE, 140049025204224, 140049025212415,
+STORE, 140049025212416, 140049025216511,
+STORE, 140726367256576, 140726367260671,
+STORE, 140726367244288, 140726367256575,
+STORE, 47583769952256, 47583769960447,
+STORE, 47583769960448, 47583769968639,
+STORE, 47583769968640, 47583770075135,
+STORE, 47583769985024, 47583770075135,
+STORE, 47583769968640, 47583769985023,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770058751,
+STORE, 47583770058752, 47583770075135,
+STORE, 47583770038272, 47583770058751,
+STORE, 47583769985024, 47583770038271,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770038271,
+STORE, 47583770054656, 47583770058751,
+STORE, 47583770038272, 47583770054655,
+ERASE, 47583770038272, 47583770038272,
+STORE, 47583770038272, 47583770054655,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770066944, 47583770066944,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770075136, 47583771914239,
+STORE, 47583770214400, 47583771914239,
+STORE, 47583770075136, 47583770214399,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771873279,
+STORE, 47583771873280, 47583771914239,
+STORE, 47583771557888, 47583771873279,
+STORE, 47583770214400, 47583771557887,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771557887,
+STORE, 47583771869184, 47583771873279,
+STORE, 47583771557888, 47583771869183,
+ERASE, 47583771557888, 47583771557888,
+STORE, 47583771557888, 47583771869183,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771897856, 47583771897856,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771897856, 47583771926527,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771889663,
+STORE, 47583771889664, 47583771897855,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770062847,
+STORE, 47583770062848, 47583770066943,
+ERASE, 94436124798976, 94436124798976,
+STORE, 94436124798976, 94436124815359,
+STORE, 94436124815360, 94436124819455,
+ERASE, 140049025204224, 140049025204224,
+STORE, 140049025204224, 140049025208319,
+STORE, 140049025208320, 140049025212415,
+ERASE, 47583769952256, 47583769952256,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727116099584, 140737488351231,
+ERASE, 140727116099584, 140727116099584,
+STORE, 140727116099584, 140727116103679,
+STORE, 94166319734784, 94166320447487,
+ERASE, 94166319734784, 94166319734784,
+STORE, 94166319734784, 94166319783935,
+STORE, 94166319783936, 94166320447487,
+ERASE, 94166319783936, 94166319783936,
+STORE, 94166319783936, 94166320328703,
+STORE, 94166320328704, 94166320427007,
+STORE, 94166320427008, 94166320447487,
+STORE, 139976559542272, 139976559714303,
+ERASE, 139976559542272, 139976559542272,
+STORE, 139976559542272, 139976559546367,
+STORE, 139976559546368, 139976559714303,
+ERASE, 139976559546368, 139976559546368,
+STORE, 139976559546368, 139976559669247,
+STORE, 139976559669248, 139976559702015,
+STORE, 139976559702016, 139976559710207,
+STORE, 139976559710208, 139976559714303,
+STORE, 140727116222464, 140727116226559,
+STORE, 140727116210176, 140727116222463,
+STORE, 47656235454464, 47656235462655,
+STORE, 47656235462656, 47656235470847,
+STORE, 47656235470848, 47656235577343,
+STORE, 47656235487232, 47656235577343,
+STORE, 47656235470848, 47656235487231,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235560959,
+STORE, 47656235560960, 47656235577343,
+STORE, 47656235540480, 47656235560959,
+STORE, 47656235487232, 47656235540479,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235540479,
+STORE, 47656235556864, 47656235560959,
+STORE, 47656235540480, 47656235556863,
+ERASE, 47656235540480, 47656235540480,
+STORE, 47656235540480, 47656235556863,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235569152, 47656235569152,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235577344, 47656237416447,
+STORE, 47656235716608, 47656237416447,
+STORE, 47656235577344, 47656235716607,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237375487,
+STORE, 47656237375488, 47656237416447,
+STORE, 47656237060096, 47656237375487,
+STORE, 47656235716608, 47656237060095,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237060095,
+STORE, 47656237371392, 47656237375487,
+STORE, 47656237060096, 47656237371391,
+ERASE, 47656237060096, 47656237060096,
+STORE, 47656237060096, 47656237371391,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237400064, 47656237400064,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237400064, 47656237428735,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237391871,
+STORE, 47656237391872, 47656237400063,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235565055,
+STORE, 47656235565056, 47656235569151,
+ERASE, 94166320427008, 94166320427008,
+STORE, 94166320427008, 94166320443391,
+STORE, 94166320443392, 94166320447487,
+ERASE, 139976559702016, 139976559702016,
+STORE, 139976559702016, 139976559706111,
+STORE, 139976559706112, 139976559710207,
+ERASE, 47656235454464, 47656235454464,
+STORE, 94166332153856, 94166332289023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726412816384, 140737488351231,
+ERASE, 140726412816384, 140726412816384,
+STORE, 140726412816384, 140726412820479,
+STORE, 94094884507648, 94094885220351,
+ERASE, 94094884507648, 94094884507648,
+STORE, 94094884507648, 94094884556799,
+STORE, 94094884556800, 94094885220351,
+ERASE, 94094884556800, 94094884556800,
+STORE, 94094884556800, 94094885101567,
+STORE, 94094885101568, 94094885199871,
+STORE, 94094885199872, 94094885220351,
+STORE, 139773773938688, 139773774110719,
+ERASE, 139773773938688, 139773773938688,
+STORE, 139773773938688, 139773773942783,
+STORE, 139773773942784, 139773774110719,
+ERASE, 139773773942784, 139773773942784,
+STORE, 139773773942784, 139773774065663,
+STORE, 139773774065664, 139773774098431,
+STORE, 139773774098432, 139773774106623,
+STORE, 139773774106624, 139773774110719,
+STORE, 140726412963840, 140726412967935,
+STORE, 140726412951552, 140726412963839,
+STORE, 47859021058048, 47859021066239,
+STORE, 47859021066240, 47859021074431,
+STORE, 47859021074432, 47859021180927,
+STORE, 47859021090816, 47859021180927,
+STORE, 47859021074432, 47859021090815,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021164543,
+STORE, 47859021164544, 47859021180927,
+STORE, 47859021144064, 47859021164543,
+STORE, 47859021090816, 47859021144063,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021144063,
+STORE, 47859021160448, 47859021164543,
+STORE, 47859021144064, 47859021160447,
+ERASE, 47859021144064, 47859021144064,
+STORE, 47859021144064, 47859021160447,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021172736, 47859021172736,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021180928, 47859023020031,
+STORE, 47859021320192, 47859023020031,
+STORE, 47859021180928, 47859021320191,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022979071,
+STORE, 47859022979072, 47859023020031,
+STORE, 47859022663680, 47859022979071,
+STORE, 47859021320192, 47859022663679,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022663679,
+STORE, 47859022974976, 47859022979071,
+STORE, 47859022663680, 47859022974975,
+ERASE, 47859022663680, 47859022663680,
+STORE, 47859022663680, 47859022974975,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859023003648, 47859023003648,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859023003648, 47859023032319,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859022995455,
+STORE, 47859022995456, 47859023003647,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021168639,
+STORE, 47859021168640, 47859021172735,
+ERASE, 94094885199872, 94094885199872,
+STORE, 94094885199872, 94094885216255,
+STORE, 94094885216256, 94094885220351,
+ERASE, 139773774098432, 139773774098432,
+STORE, 139773774098432, 139773774102527,
+STORE, 139773774102528, 139773774106623,
+ERASE, 47859021058048, 47859021058048,
+STORE, 94094901108736, 94094901243903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736567963648, 140737488351231,
+ERASE, 140736567963648, 140736567963648,
+STORE, 140736567963648, 140736567967743,
+STORE, 94924425748480, 94924426461183,
+ERASE, 94924425748480, 94924425748480,
+STORE, 94924425748480, 94924425797631,
+STORE, 94924425797632, 94924426461183,
+ERASE, 94924425797632, 94924425797632,
+STORE, 94924425797632, 94924426342399,
+STORE, 94924426342400, 94924426440703,
+STORE, 94924426440704, 94924426461183,
+STORE, 140042126319616, 140042126491647,
+ERASE, 140042126319616, 140042126319616,
+STORE, 140042126319616, 140042126323711,
+STORE, 140042126323712, 140042126491647,
+ERASE, 140042126323712, 140042126323712,
+STORE, 140042126323712, 140042126446591,
+STORE, 140042126446592, 140042126479359,
+STORE, 140042126479360, 140042126487551,
+STORE, 140042126487552, 140042126491647,
+STORE, 140736568672256, 140736568676351,
+STORE, 140736568659968, 140736568672255,
+STORE, 47590668677120, 47590668685311,
+STORE, 47590668685312, 47590668693503,
+STORE, 47590668693504, 47590668799999,
+STORE, 47590668709888, 47590668799999,
+STORE, 47590668693504, 47590668709887,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668783615,
+STORE, 47590668783616, 47590668799999,
+STORE, 47590668763136, 47590668783615,
+STORE, 47590668709888, 47590668763135,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668763135,
+STORE, 47590668779520, 47590668783615,
+STORE, 47590668763136, 47590668779519,
+ERASE, 47590668763136, 47590668763136,
+STORE, 47590668763136, 47590668779519,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668791808, 47590668791808,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668800000, 47590670639103,
+STORE, 47590668939264, 47590670639103,
+STORE, 47590668800000, 47590668939263,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670598143,
+STORE, 47590670598144, 47590670639103,
+STORE, 47590670282752, 47590670598143,
+STORE, 47590668939264, 47590670282751,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670282751,
+STORE, 47590670594048, 47590670598143,
+STORE, 47590670282752, 47590670594047,
+ERASE, 47590670282752, 47590670282752,
+STORE, 47590670282752, 47590670594047,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670622720, 47590670622720,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670622720, 47590670651391,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670614527,
+STORE, 47590670614528, 47590670622719,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668787711,
+STORE, 47590668787712, 47590668791807,
+ERASE, 94924426440704, 94924426440704,
+STORE, 94924426440704, 94924426457087,
+STORE, 94924426457088, 94924426461183,
+ERASE, 140042126479360, 140042126479360,
+STORE, 140042126479360, 140042126483455,
+STORE, 140042126483456, 140042126487551,
+ERASE, 47590668677120, 47590668677120,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733281439744, 140737488351231,
+ERASE, 140733281439744, 140733281439744,
+STORE, 140733281439744, 140733281443839,
+STORE, 94490667069440, 94490667782143,
+ERASE, 94490667069440, 94490667069440,
+STORE, 94490667069440, 94490667118591,
+STORE, 94490667118592, 94490667782143,
+ERASE, 94490667118592, 94490667118592,
+STORE, 94490667118592, 94490667663359,
+STORE, 94490667663360, 94490667761663,
+STORE, 94490667761664, 94490667782143,
+STORE, 139878215118848, 139878215290879,
+ERASE, 139878215118848, 139878215118848,
+STORE, 139878215118848, 139878215122943,
+STORE, 139878215122944, 139878215290879,
+ERASE, 139878215122944, 139878215122944,
+STORE, 139878215122944, 139878215245823,
+STORE, 139878215245824, 139878215278591,
+STORE, 139878215278592, 139878215286783,
+STORE, 139878215286784, 139878215290879,
+STORE, 140733281464320, 140733281468415,
+STORE, 140733281452032, 140733281464319,
+STORE, 47754579877888, 47754579886079,
+STORE, 47754579886080, 47754579894271,
+STORE, 47754579894272, 47754580000767,
+STORE, 47754579910656, 47754580000767,
+STORE, 47754579894272, 47754579910655,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579984383,
+STORE, 47754579984384, 47754580000767,
+STORE, 47754579963904, 47754579984383,
+STORE, 47754579910656, 47754579963903,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579963903,
+STORE, 47754579980288, 47754579984383,
+STORE, 47754579963904, 47754579980287,
+ERASE, 47754579963904, 47754579963904,
+STORE, 47754579963904, 47754579980287,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579992576, 47754579992576,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754580000768, 47754581839871,
+STORE, 47754580140032, 47754581839871,
+STORE, 47754580000768, 47754580140031,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581798911,
+STORE, 47754581798912, 47754581839871,
+STORE, 47754581483520, 47754581798911,
+STORE, 47754580140032, 47754581483519,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581483519,
+STORE, 47754581794816, 47754581798911,
+STORE, 47754581483520, 47754581794815,
+ERASE, 47754581483520, 47754581483520,
+STORE, 47754581483520, 47754581794815,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581823488, 47754581823488,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581823488, 47754581852159,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581815295,
+STORE, 47754581815296, 47754581823487,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579988479,
+STORE, 47754579988480, 47754579992575,
+ERASE, 94490667761664, 94490667761664,
+STORE, 94490667761664, 94490667778047,
+STORE, 94490667778048, 94490667782143,
+ERASE, 139878215278592, 139878215278592,
+STORE, 139878215278592, 139878215282687,
+STORE, 139878215282688, 139878215286783,
+ERASE, 47754579877888, 47754579877888,
+STORE, 94490669649920, 94490669785087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140735382188032, 140737488351231,
+ERASE, 140735382188032, 140735382188032,
+STORE, 140735382188032, 140735382192127,
+STORE, 94150181302272, 94150182014975,
+ERASE, 94150181302272, 94150181302272,
+STORE, 94150181302272, 94150181351423,
+STORE, 94150181351424, 94150182014975,
+ERASE, 94150181351424, 94150181351424,
+STORE, 94150181351424, 94150181896191,
+STORE, 94150181896192, 94150181994495,
+STORE, 94150181994496, 94150182014975,
+STORE, 139679752458240, 139679752630271,
+ERASE, 139679752458240, 139679752458240,
+STORE, 139679752458240, 139679752462335,
+STORE, 139679752462336, 139679752630271,
+ERASE, 139679752462336, 139679752462336,
+STORE, 139679752462336, 139679752585215,
+STORE, 139679752585216, 139679752617983,
+STORE, 139679752617984, 139679752626175,
+STORE, 139679752626176, 139679752630271,
+STORE, 140735382536192, 140735382540287,
+STORE, 140735382523904, 140735382536191,
+STORE, 47953042538496, 47953042546687,
+STORE, 47953042546688, 47953042554879,
+STORE, 47953042554880, 47953042661375,
+STORE, 47953042571264, 47953042661375,
+STORE, 47953042554880, 47953042571263,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042644991,
+STORE, 47953042644992, 47953042661375,
+STORE, 47953042624512, 47953042644991,
+STORE, 47953042571264, 47953042624511,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042624511,
+STORE, 47953042640896, 47953042644991,
+STORE, 47953042624512, 47953042640895,
+ERASE, 47953042624512, 47953042624512,
+STORE, 47953042624512, 47953042640895,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042653184, 47953042653184,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042661376, 47953044500479,
+STORE, 47953042800640, 47953044500479,
+STORE, 47953042661376, 47953042800639,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044459519,
+STORE, 47953044459520, 47953044500479,
+STORE, 47953044144128, 47953044459519,
+STORE, 47953042800640, 47953044144127,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044144127,
+STORE, 47953044455424, 47953044459519,
+STORE, 47953044144128, 47953044455423,
+ERASE, 47953044144128, 47953044144128,
+STORE, 47953044144128, 47953044455423,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044484096, 47953044484096,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044484096, 47953044512767,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044475903,
+STORE, 47953044475904, 47953044484095,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042649087,
+STORE, 47953042649088, 47953042653183,
+ERASE, 94150181994496, 94150181994496,
+STORE, 94150181994496, 94150182010879,
+STORE, 94150182010880, 94150182014975,
+ERASE, 139679752617984, 139679752617984,
+STORE, 139679752617984, 139679752622079,
+STORE, 139679752622080, 139679752626175,
+ERASE, 47953042538496, 47953042538496,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737044123648, 140737488351231,
+ERASE, 140737044123648, 140737044123648,
+STORE, 140737044123648, 140737044127743,
+STORE, 94425324294144, 94425325006847,
+ERASE, 94425324294144, 94425324294144,
+STORE, 94425324294144, 94425324343295,
+STORE, 94425324343296, 94425325006847,
+ERASE, 94425324343296, 94425324343296,
+STORE, 94425324343296, 94425324888063,
+STORE, 94425324888064, 94425324986367,
+STORE, 94425324986368, 94425325006847,
+STORE, 140382015016960, 140382015188991,
+ERASE, 140382015016960, 140382015016960,
+STORE, 140382015016960, 140382015021055,
+STORE, 140382015021056, 140382015188991,
+ERASE, 140382015021056, 140382015021056,
+STORE, 140382015021056, 140382015143935,
+STORE, 140382015143936, 140382015176703,
+STORE, 140382015176704, 140382015184895,
+STORE, 140382015184896, 140382015188991,
+STORE, 140737045585920, 140737045590015,
+STORE, 140737045573632, 140737045585919,
+STORE, 47250779979776, 47250779987967,
+STORE, 47250779987968, 47250779996159,
+STORE, 47250779996160, 47250780102655,
+STORE, 47250780012544, 47250780102655,
+STORE, 47250779996160, 47250780012543,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780086271,
+STORE, 47250780086272, 47250780102655,
+STORE, 47250780065792, 47250780086271,
+STORE, 47250780012544, 47250780065791,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780065791,
+STORE, 47250780082176, 47250780086271,
+STORE, 47250780065792, 47250780082175,
+ERASE, 47250780065792, 47250780065792,
+STORE, 47250780065792, 47250780082175,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780094464, 47250780094464,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780102656, 47250781941759,
+STORE, 47250780241920, 47250781941759,
+STORE, 47250780102656, 47250780241919,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781900799,
+STORE, 47250781900800, 47250781941759,
+STORE, 47250781585408, 47250781900799,
+STORE, 47250780241920, 47250781585407,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781585407,
+STORE, 47250781896704, 47250781900799,
+STORE, 47250781585408, 47250781896703,
+ERASE, 47250781585408, 47250781585408,
+STORE, 47250781585408, 47250781896703,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781925376, 47250781925376,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781925376, 47250781954047,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781917183,
+STORE, 47250781917184, 47250781925375,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780090367,
+STORE, 47250780090368, 47250780094463,
+ERASE, 94425324986368, 94425324986368,
+STORE, 94425324986368, 94425325002751,
+STORE, 94425325002752, 94425325006847,
+ERASE, 140382015176704, 140382015176704,
+STORE, 140382015176704, 140382015180799,
+STORE, 140382015180800, 140382015184895,
+ERASE, 47250779979776, 47250779979776,
+STORE, 94425351438336, 94425351573503,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736801144832, 140737488351231,
+ERASE, 140736801144832, 140736801144832,
+STORE, 140736801144832, 140736801148927,
+STORE, 94629429358592, 94629430071295,
+ERASE, 94629429358592, 94629429358592,
+STORE, 94629429358592, 94629429407743,
+STORE, 94629429407744, 94629430071295,
+ERASE, 94629429407744, 94629429407744,
+STORE, 94629429407744, 94629429952511,
+STORE, 94629429952512, 94629430050815,
+STORE, 94629430050816, 94629430071295,
+STORE, 139801685483520, 139801685655551,
+ERASE, 139801685483520, 139801685483520,
+STORE, 139801685483520, 139801685487615,
+STORE, 139801685487616, 139801685655551,
+ERASE, 139801685487616, 139801685487616,
+STORE, 139801685487616, 139801685610495,
+STORE, 139801685610496, 139801685643263,
+STORE, 139801685643264, 139801685651455,
+STORE, 139801685651456, 139801685655551,
+STORE, 140736801198080, 140736801202175,
+STORE, 140736801185792, 140736801198079,
+STORE, 47831109513216, 47831109521407,
+STORE, 47831109521408, 47831109529599,
+STORE, 47831109529600, 47831109636095,
+STORE, 47831109545984, 47831109636095,
+STORE, 47831109529600, 47831109545983,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109619711,
+STORE, 47831109619712, 47831109636095,
+STORE, 47831109599232, 47831109619711,
+STORE, 47831109545984, 47831109599231,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109599231,
+STORE, 47831109615616, 47831109619711,
+STORE, 47831109599232, 47831109615615,
+ERASE, 47831109599232, 47831109599232,
+STORE, 47831109599232, 47831109615615,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109627904, 47831109627904,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109636096, 47831111475199,
+STORE, 47831109775360, 47831111475199,
+STORE, 47831109636096, 47831109775359,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111434239,
+STORE, 47831111434240, 47831111475199,
+STORE, 47831111118848, 47831111434239,
+STORE, 47831109775360, 47831111118847,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111118847,
+STORE, 47831111430144, 47831111434239,
+STORE, 47831111118848, 47831111430143,
+ERASE, 47831111118848, 47831111118848,
+STORE, 47831111118848, 47831111430143,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111458816, 47831111458816,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111458816, 47831111487487,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111450623,
+STORE, 47831111450624, 47831111458815,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109623807,
+STORE, 47831109623808, 47831109627903,
+ERASE, 94629430050816, 94629430050816,
+STORE, 94629430050816, 94629430067199,
+STORE, 94629430067200, 94629430071295,
+ERASE, 139801685643264, 139801685643264,
+STORE, 139801685643264, 139801685647359,
+STORE, 139801685647360, 139801685651455,
+ERASE, 47831109513216, 47831109513216,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729419612160, 140737488351231,
+ERASE, 140729419612160, 140729419612160,
+STORE, 140729419612160, 140729419616255,
+STORE, 94443354148864, 94443354861567,
+ERASE, 94443354148864, 94443354148864,
+STORE, 94443354148864, 94443354198015,
+STORE, 94443354198016, 94443354861567,
+ERASE, 94443354198016, 94443354198016,
+STORE, 94443354198016, 94443354742783,
+STORE, 94443354742784, 94443354841087,
+STORE, 94443354841088, 94443354861567,
+STORE, 139741700038656, 139741700210687,
+ERASE, 139741700038656, 139741700038656,
+STORE, 139741700038656, 139741700042751,
+STORE, 139741700042752, 139741700210687,
+ERASE, 139741700042752, 139741700042752,
+STORE, 139741700042752, 139741700165631,
+STORE, 139741700165632, 139741700198399,
+STORE, 139741700198400, 139741700206591,
+STORE, 139741700206592, 139741700210687,
+STORE, 140729420574720, 140729420578815,
+STORE, 140729420562432, 140729420574719,
+STORE, 47891094958080, 47891094966271,
+STORE, 47891094966272, 47891094974463,
+STORE, 47891094974464, 47891095080959,
+STORE, 47891094990848, 47891095080959,
+STORE, 47891094974464, 47891094990847,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095064575,
+STORE, 47891095064576, 47891095080959,
+STORE, 47891095044096, 47891095064575,
+STORE, 47891094990848, 47891095044095,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095044095,
+STORE, 47891095060480, 47891095064575,
+STORE, 47891095044096, 47891095060479,
+ERASE, 47891095044096, 47891095044096,
+STORE, 47891095044096, 47891095060479,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095072768, 47891095072768,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095080960, 47891096920063,
+STORE, 47891095220224, 47891096920063,
+STORE, 47891095080960, 47891095220223,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096879103,
+STORE, 47891096879104, 47891096920063,
+STORE, 47891096563712, 47891096879103,
+STORE, 47891095220224, 47891096563711,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096563711,
+STORE, 47891096875008, 47891096879103,
+STORE, 47891096563712, 47891096875007,
+ERASE, 47891096563712, 47891096563712,
+STORE, 47891096563712, 47891096875007,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096903680, 47891096903680,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096903680, 47891096932351,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096895487,
+STORE, 47891096895488, 47891096903679,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095068671,
+STORE, 47891095068672, 47891095072767,
+ERASE, 94443354841088, 94443354841088,
+STORE, 94443354841088, 94443354857471,
+STORE, 94443354857472, 94443354861567,
+ERASE, 139741700198400, 139741700198400,
+STORE, 139741700198400, 139741700202495,
+STORE, 139741700202496, 139741700206591,
+ERASE, 47891094958080, 47891094958080,
+STORE, 94443360825344, 94443360960511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722961661952, 140737488351231,
+ERASE, 140722961661952, 140722961661952,
+STORE, 140722961661952, 140722961666047,
+STORE, 94878388944896, 94878389657599,
+ERASE, 94878388944896, 94878388944896,
+STORE, 94878388944896, 94878388994047,
+STORE, 94878388994048, 94878389657599,
+ERASE, 94878388994048, 94878388994048,
+STORE, 94878388994048, 94878389538815,
+STORE, 94878389538816, 94878389637119,
+STORE, 94878389637120, 94878389657599,
+STORE, 140210690056192, 140210690228223,
+ERASE, 140210690056192, 140210690056192,
+STORE, 140210690056192, 140210690060287,
+STORE, 140210690060288, 140210690228223,
+ERASE, 140210690060288, 140210690060288,
+STORE, 140210690060288, 140210690183167,
+STORE, 140210690183168, 140210690215935,
+STORE, 140210690215936, 140210690224127,
+STORE, 140210690224128, 140210690228223,
+STORE, 140722963148800, 140722963152895,
+STORE, 140722963136512, 140722963148799,
+STORE, 47422104940544, 47422104948735,
+STORE, 47422104948736, 47422104956927,
+STORE, 47422104956928, 47422105063423,
+STORE, 47422104973312, 47422105063423,
+STORE, 47422104956928, 47422104973311,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105047039,
+STORE, 47422105047040, 47422105063423,
+STORE, 47422105026560, 47422105047039,
+STORE, 47422104973312, 47422105026559,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105026559,
+STORE, 47422105042944, 47422105047039,
+STORE, 47422105026560, 47422105042943,
+ERASE, 47422105026560, 47422105026560,
+STORE, 47422105026560, 47422105042943,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105055232, 47422105055232,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105063424, 47422106902527,
+STORE, 47422105202688, 47422106902527,
+STORE, 47422105063424, 47422105202687,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106861567,
+STORE, 47422106861568, 47422106902527,
+STORE, 47422106546176, 47422106861567,
+STORE, 47422105202688, 47422106546175,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106546175,
+STORE, 47422106857472, 47422106861567,
+STORE, 47422106546176, 47422106857471,
+ERASE, 47422106546176, 47422106546176,
+STORE, 47422106546176, 47422106857471,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106886144, 47422106886144,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106886144, 47422106914815,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106877951,
+STORE, 47422106877952, 47422106886143,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105051135,
+STORE, 47422105051136, 47422105055231,
+ERASE, 94878389637120, 94878389637120,
+STORE, 94878389637120, 94878389653503,
+STORE, 94878389653504, 94878389657599,
+ERASE, 140210690215936, 140210690215936,
+STORE, 140210690215936, 140210690220031,
+STORE, 140210690220032, 140210690224127,
+ERASE, 47422104940544, 47422104940544,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727690309632, 140737488351231,
+ERASE, 140727690309632, 140727690309632,
+STORE, 140727690309632, 140727690313727,
+STORE, 94121892208640, 94121892921343,
+ERASE, 94121892208640, 94121892208640,
+STORE, 94121892208640, 94121892257791,
+STORE, 94121892257792, 94121892921343,
+ERASE, 94121892257792, 94121892257792,
+STORE, 94121892257792, 94121892802559,
+STORE, 94121892802560, 94121892900863,
+STORE, 94121892900864, 94121892921343,
+STORE, 140662438326272, 140662438498303,
+ERASE, 140662438326272, 140662438326272,
+STORE, 140662438326272, 140662438330367,
+STORE, 140662438330368, 140662438498303,
+ERASE, 140662438330368, 140662438330368,
+STORE, 140662438330368, 140662438453247,
+STORE, 140662438453248, 140662438486015,
+STORE, 140662438486016, 140662438494207,
+STORE, 140662438494208, 140662438498303,
+STORE, 140727690379264, 140727690383359,
+STORE, 140727690366976, 140727690379263,
+STORE, 46970356670464, 46970356678655,
+STORE, 46970356678656, 46970356686847,
+STORE, 46970356686848, 46970356793343,
+STORE, 46970356703232, 46970356793343,
+STORE, 46970356686848, 46970356703231,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356776959,
+STORE, 46970356776960, 46970356793343,
+STORE, 46970356756480, 46970356776959,
+STORE, 46970356703232, 46970356756479,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356756479,
+STORE, 46970356772864, 46970356776959,
+STORE, 46970356756480, 46970356772863,
+ERASE, 46970356756480, 46970356756480,
+STORE, 46970356756480, 46970356772863,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356785152, 46970356785152,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356793344, 46970358632447,
+STORE, 46970356932608, 46970358632447,
+STORE, 46970356793344, 46970356932607,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358591487,
+STORE, 46970358591488, 46970358632447,
+STORE, 46970358276096, 46970358591487,
+STORE, 46970356932608, 46970358276095,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358276095,
+STORE, 46970358587392, 46970358591487,
+STORE, 46970358276096, 46970358587391,
+ERASE, 46970358276096, 46970358276096,
+STORE, 46970358276096, 46970358587391,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358616064, 46970358616064,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358616064, 46970358644735,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358607871,
+STORE, 46970358607872, 46970358616063,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356781055,
+STORE, 46970356781056, 46970356785151,
+ERASE, 94121892900864, 94121892900864,
+STORE, 94121892900864, 94121892917247,
+STORE, 94121892917248, 94121892921343,
+ERASE, 140662438486016, 140662438486016,
+STORE, 140662438486016, 140662438490111,
+STORE, 140662438490112, 140662438494207,
+ERASE, 46970356670464, 46970356670464,
+STORE, 94121898610688, 94121898745855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737189351424, 140737488351231,
+ERASE, 140737189351424, 140737189351424,
+STORE, 140737189351424, 140737189355519,
+STORE, 93847948832768, 93847949545471,
+ERASE, 93847948832768, 93847948832768,
+STORE, 93847948832768, 93847948881919,
+STORE, 93847948881920, 93847949545471,
+ERASE, 93847948881920, 93847948881920,
+STORE, 93847948881920, 93847949426687,
+STORE, 93847949426688, 93847949524991,
+STORE, 93847949524992, 93847949545471,
+STORE, 139698989985792, 139698990157823,
+ERASE, 139698989985792, 139698989985792,
+STORE, 139698989985792, 139698989989887,
+STORE, 139698989989888, 139698990157823,
+ERASE, 139698989989888, 139698989989888,
+STORE, 139698989989888, 139698990112767,
+STORE, 139698990112768, 139698990145535,
+STORE, 139698990145536, 139698990153727,
+STORE, 139698990153728, 139698990157823,
+STORE, 140737189744640, 140737189748735,
+STORE, 140737189732352, 140737189744639,
+STORE, 47933805010944, 47933805019135,
+STORE, 47933805019136, 47933805027327,
+STORE, 47933805027328, 47933805133823,
+STORE, 47933805043712, 47933805133823,
+STORE, 47933805027328, 47933805043711,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805117439,
+STORE, 47933805117440, 47933805133823,
+STORE, 47933805096960, 47933805117439,
+STORE, 47933805043712, 47933805096959,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805096959,
+STORE, 47933805113344, 47933805117439,
+STORE, 47933805096960, 47933805113343,
+ERASE, 47933805096960, 47933805096960,
+STORE, 47933805096960, 47933805113343,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805125632, 47933805125632,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805133824, 47933806972927,
+STORE, 47933805273088, 47933806972927,
+STORE, 47933805133824, 47933805273087,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806931967,
+STORE, 47933806931968, 47933806972927,
+STORE, 47933806616576, 47933806931967,
+STORE, 47933805273088, 47933806616575,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806616575,
+STORE, 47933806927872, 47933806931967,
+STORE, 47933806616576, 47933806927871,
+ERASE, 47933806616576, 47933806616576,
+STORE, 47933806616576, 47933806927871,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806956544, 47933806956544,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806956544, 47933806985215,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806948351,
+STORE, 47933806948352, 47933806956543,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805121535,
+STORE, 47933805121536, 47933805125631,
+ERASE, 93847949524992, 93847949524992,
+STORE, 93847949524992, 93847949541375,
+STORE, 93847949541376, 93847949545471,
+ERASE, 139698990145536, 139698990145536,
+STORE, 139698990145536, 139698990149631,
+STORE, 139698990149632, 139698990153727,
+ERASE, 47933805010944, 47933805010944,
+STORE, 140737488347136, 140737488351231,
+STORE, 140725553991680, 140737488351231,
+ERASE, 140725553991680, 140725553991680,
+STORE, 140725553991680, 140725553995775,
+STORE, 93980056248320, 93980056961023,
+ERASE, 93980056248320, 93980056248320,
+STORE, 93980056248320, 93980056297471,
+STORE, 93980056297472, 93980056961023,
+ERASE, 93980056297472, 93980056297472,
+STORE, 93980056297472, 93980056842239,
+STORE, 93980056842240, 93980056940543,
+STORE, 93980056940544, 93980056961023,
+STORE, 140146588971008, 140146589143039,
+ERASE, 140146588971008, 140146588971008,
+STORE, 140146588971008, 140146588975103,
+STORE, 140146588975104, 140146589143039,
+ERASE, 140146588975104, 140146588975104,
+STORE, 140146588975104, 140146589097983,
+STORE, 140146589097984, 140146589130751,
+STORE, 140146589130752, 140146589138943,
+STORE, 140146589138944, 140146589143039,
+STORE, 140725554860032, 140725554864127,
+STORE, 140725554847744, 140725554860031,
+STORE, 47486206025728, 47486206033919,
+STORE, 47486206033920, 47486206042111,
+STORE, 47486206042112, 47486206148607,
+STORE, 47486206058496, 47486206148607,
+STORE, 47486206042112, 47486206058495,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206132223,
+STORE, 47486206132224, 47486206148607,
+STORE, 47486206111744, 47486206132223,
+STORE, 47486206058496, 47486206111743,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206111743,
+STORE, 47486206128128, 47486206132223,
+STORE, 47486206111744, 47486206128127,
+ERASE, 47486206111744, 47486206111744,
+STORE, 47486206111744, 47486206128127,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206140416, 47486206140416,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206148608, 47486207987711,
+STORE, 47486206287872, 47486207987711,
+STORE, 47486206148608, 47486206287871,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207946751,
+STORE, 47486207946752, 47486207987711,
+STORE, 47486207631360, 47486207946751,
+STORE, 47486206287872, 47486207631359,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207631359,
+STORE, 47486207942656, 47486207946751,
+STORE, 47486207631360, 47486207942655,
+ERASE, 47486207631360, 47486207631360,
+STORE, 47486207631360, 47486207942655,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207971328, 47486207971328,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207971328, 47486207999999,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207963135,
+STORE, 47486207963136, 47486207971327,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206136319,
+STORE, 47486206136320, 47486206140415,
+ERASE, 93980056940544, 93980056940544,
+STORE, 93980056940544, 93980056956927,
+STORE, 93980056956928, 93980056961023,
+ERASE, 140146589130752, 140146589130752,
+STORE, 140146589130752, 140146589134847,
+STORE, 140146589134848, 140146589138943,
+ERASE, 47486206025728, 47486206025728,
+STORE, 93980070006784, 93980070141951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727334776832, 140737488351231,
+ERASE, 140727334776832, 140727334776832,
+STORE, 140727334776832, 140727334780927,
+STORE, 94049747247104, 94049747959807,
+ERASE, 94049747247104, 94049747247104,
+STORE, 94049747247104, 94049747296255,
+STORE, 94049747296256, 94049747959807,
+ERASE, 94049747296256, 94049747296256,
+STORE, 94049747296256, 94049747841023,
+STORE, 94049747841024, 94049747939327,
+STORE, 94049747939328, 94049747959807,
+STORE, 140227307216896, 140227307388927,
+ERASE, 140227307216896, 140227307216896,
+STORE, 140227307216896, 140227307220991,
+STORE, 140227307220992, 140227307388927,
+ERASE, 140227307220992, 140227307220992,
+STORE, 140227307220992, 140227307343871,
+STORE, 140227307343872, 140227307376639,
+STORE, 140227307376640, 140227307384831,
+STORE, 140227307384832, 140227307388927,
+STORE, 140727335337984, 140727335342079,
+STORE, 140727335325696, 140727335337983,
+STORE, 47405487779840, 47405487788031,
+STORE, 47405487788032, 47405487796223,
+STORE, 47405487796224, 47405487902719,
+STORE, 47405487812608, 47405487902719,
+STORE, 47405487796224, 47405487812607,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487886335,
+STORE, 47405487886336, 47405487902719,
+STORE, 47405487865856, 47405487886335,
+STORE, 47405487812608, 47405487865855,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487865855,
+STORE, 47405487882240, 47405487886335,
+STORE, 47405487865856, 47405487882239,
+ERASE, 47405487865856, 47405487865856,
+STORE, 47405487865856, 47405487882239,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487894528, 47405487894528,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487902720, 47405489741823,
+STORE, 47405488041984, 47405489741823,
+STORE, 47405487902720, 47405488041983,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489700863,
+STORE, 47405489700864, 47405489741823,
+STORE, 47405489385472, 47405489700863,
+STORE, 47405488041984, 47405489385471,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489385471,
+STORE, 47405489696768, 47405489700863,
+STORE, 47405489385472, 47405489696767,
+ERASE, 47405489385472, 47405489385472,
+STORE, 47405489385472, 47405489696767,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489725440, 47405489725440,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489725440, 47405489754111,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489717247,
+STORE, 47405489717248, 47405489725439,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487890431,
+STORE, 47405487890432, 47405487894527,
+ERASE, 94049747939328, 94049747939328,
+STORE, 94049747939328, 94049747955711,
+STORE, 94049747955712, 94049747959807,
+ERASE, 140227307376640, 140227307376640,
+STORE, 140227307376640, 140227307380735,
+STORE, 140227307380736, 140227307384831,
+ERASE, 47405487779840, 47405487779840,
+STORE, 94049758810112, 94049758945279,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727079718912, 140737488351231,
+ERASE, 140727079718912, 140727079718912,
+STORE, 140727079718912, 140727079723007,
+STORE, 94250996527104, 94250997239807,
+ERASE, 94250996527104, 94250996527104,
+STORE, 94250996527104, 94250996576255,
+STORE, 94250996576256, 94250997239807,
+ERASE, 94250996576256, 94250996576256,
+STORE, 94250996576256, 94250997121023,
+STORE, 94250997121024, 94250997219327,
+STORE, 94250997219328, 94250997239807,
+STORE, 140060022587392, 140060022759423,
+ERASE, 140060022587392, 140060022587392,
+STORE, 140060022587392, 140060022591487,
+STORE, 140060022591488, 140060022759423,
+ERASE, 140060022591488, 140060022591488,
+STORE, 140060022591488, 140060022714367,
+STORE, 140060022714368, 140060022747135,
+STORE, 140060022747136, 140060022755327,
+STORE, 140060022755328, 140060022759423,
+STORE, 140727079788544, 140727079792639,
+STORE, 140727079776256, 140727079788543,
+/* this next one caused issues when lowering the efficiency */
+STORE, 47572772409344, 47572772417535,
+STORE, 47572772417536, 47572772425727,
+STORE, 47572772425728, 47572772532223,
+STORE, 47572772442112, 47572772532223,
+STORE, 47572772425728, 47572772442111,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772515839,
+STORE, 47572772515840, 47572772532223,
+STORE, 47572772495360, 47572772515839,
+STORE, 47572772442112, 47572772495359,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772495359,
+STORE, 47572772511744, 47572772515839,
+STORE, 47572772495360, 47572772511743,
+ERASE, 47572772495360, 47572772495360,
+STORE, 47572772495360, 47572772511743,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772524032, 47572772524032,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772532224, 47572774371327,
+STORE, 47572772671488, 47572774371327,
+STORE, 47572772532224, 47572772671487,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774330367,
+STORE, 47572774330368, 47572774371327,
+STORE, 47572774014976, 47572774330367,
+STORE, 47572772671488, 47572774014975,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774014975,
+STORE, 47572774326272, 47572774330367,
+STORE, 47572774014976, 47572774326271,
+ERASE, 47572774014976, 47572774014976,
+STORE, 47572774014976, 47572774326271,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774354944, 47572774354944,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774354944, 47572774383615,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774346751,
+STORE, 47572774346752, 47572774354943,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772519935,
+STORE, 47572772519936, 47572772524031,
+ERASE, 94250997219328, 94250997219328,
+STORE, 94250997219328, 94250997235711,
+STORE, 94250997235712, 94250997239807,
+ERASE, 140060022747136, 140060022747136,
+STORE, 140060022747136, 140060022751231,
+STORE, 140060022751232, 140060022755327,
+ERASE, 47572772409344, 47572772409344,
+STORE, 94251018305536, 94251018440703,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730012389376, 140737488351231,
+ERASE, 140730012389376, 140730012389376,
+STORE, 140730012389376, 140730012393471,
+STORE, 94382607675392, 94382607695871,
+ERASE, 94382607675392, 94382607675392,
+STORE, 94382607675392, 94382607679487,
+STORE, 94382607679488, 94382607695871,
+ERASE, 94382607679488, 94382607679488,
+STORE, 94382607679488, 94382607683583,
+STORE, 94382607683584, 94382607687679,
+STORE, 94382607687680, 94382607695871,
+STORE, 140252451454976, 140252451627007,
+ERASE, 140252451454976, 140252451454976,
+STORE, 140252451454976, 140252451459071,
+STORE, 140252451459072, 140252451627007,
+ERASE, 140252451459072, 140252451459072,
+STORE, 140252451459072, 140252451581951,
+STORE, 140252451581952, 140252451614719,
+STORE, 140252451614720, 140252451622911,
+STORE, 140252451622912, 140252451627007,
+STORE, 140730013548544, 140730013552639,
+STORE, 140730013536256, 140730013548543,
+STORE, 47380343541760, 47380343549951,
+STORE, 47380343549952, 47380343558143,
+STORE, 47380343558144, 47380345397247,
+STORE, 47380343697408, 47380345397247,
+STORE, 47380343558144, 47380343697407,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345356287,
+STORE, 47380345356288, 47380345397247,
+STORE, 47380345040896, 47380345356287,
+STORE, 47380343697408, 47380345040895,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345040895,
+STORE, 47380345352192, 47380345356287,
+STORE, 47380345040896, 47380345352191,
+ERASE, 47380345040896, 47380345040896,
+STORE, 47380345040896, 47380345352191,
+STORE, 47380345380864, 47380345397247,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345380864, 47380345380864,
+STORE, 47380345380864, 47380345397247,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345372671,
+STORE, 47380345372672, 47380345380863,
+ERASE, 94382607687680, 94382607687680,
+STORE, 94382607687680, 94382607691775,
+STORE, 94382607691776, 94382607695871,
+ERASE, 140252451614720, 140252451614720,
+STORE, 140252451614720, 140252451618815,
+STORE, 140252451618816, 140252451622911,
+ERASE, 47380343541760, 47380343541760,
+STORE, 94382626803712, 94382626938879,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730900271104, 140737488351231,
+ERASE, 140730900271104, 140730900271104,
+STORE, 140730900271104, 140730900275199,
+STORE, 93855478120448, 93855478337535,
+ERASE, 93855478120448, 93855478120448,
+STORE, 93855478120448, 93855478198271,
+STORE, 93855478198272, 93855478337535,
+ERASE, 93855478198272, 93855478198272,
+STORE, 93855478198272, 93855478243327,
+STORE, 93855478243328, 93855478288383,
+STORE, 93855478288384, 93855478337535,
+STORE, 140092686573568, 140092686745599,
+ERASE, 140092686573568, 140092686573568,
+STORE, 140092686573568, 140092686577663,
+STORE, 140092686577664, 140092686745599,
+ERASE, 140092686577664, 140092686577664,
+STORE, 140092686577664, 140092686700543,
+STORE, 140092686700544, 140092686733311,
+STORE, 140092686733312, 140092686741503,
+STORE, 140092686741504, 140092686745599,
+STORE, 140730900537344, 140730900541439,
+STORE, 140730900525056, 140730900537343,
+STORE, 47540108423168, 47540108431359,
+STORE, 47540108431360, 47540108439551,
+STORE, 47540108439552, 47540110278655,
+STORE, 47540108578816, 47540110278655,
+STORE, 47540108439552, 47540108578815,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540110237695,
+STORE, 47540110237696, 47540110278655,
+STORE, 47540109922304, 47540110237695,
+STORE, 47540108578816, 47540109922303,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540109922303,
+STORE, 47540110233600, 47540110237695,
+STORE, 47540109922304, 47540110233599,
+ERASE, 47540109922304, 47540109922304,
+STORE, 47540109922304, 47540110233599,
+STORE, 47540110262272, 47540110278655,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110262272, 47540110262272,
+STORE, 47540110262272, 47540110278655,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110254079,
+STORE, 47540110254080, 47540110262271,
+ERASE, 93855478288384, 93855478288384,
+STORE, 93855478288384, 93855478333439,
+STORE, 93855478333440, 93855478337535,
+ERASE, 140092686733312, 140092686733312,
+STORE, 140092686733312, 140092686737407,
+STORE, 140092686737408, 140092686741503,
+ERASE, 47540108423168, 47540108423168,
+STORE, 93855492222976, 93855492358143,
+STORE, 93855492222976, 93855492493311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733498146816, 140737488351231,
+ERASE, 140733498146816, 140733498146816,
+STORE, 140733498146816, 140733498150911,
+STORE, 94170739654656, 94170740367359,
+ERASE, 94170739654656, 94170739654656,
+STORE, 94170739654656, 94170739703807,
+STORE, 94170739703808, 94170740367359,
+ERASE, 94170739703808, 94170739703808,
+STORE, 94170739703808, 94170740248575,
+STORE, 94170740248576, 94170740346879,
+STORE, 94170740346880, 94170740367359,
+STORE, 140024788877312, 140024789049343,
+ERASE, 140024788877312, 140024788877312,
+STORE, 140024788877312, 140024788881407,
+STORE, 140024788881408, 140024789049343,
+ERASE, 140024788881408, 140024788881408,
+STORE, 140024788881408, 140024789004287,
+STORE, 140024789004288, 140024789037055,
+STORE, 140024789037056, 140024789045247,
+STORE, 140024789045248, 140024789049343,
+STORE, 140733499023360, 140733499027455,
+STORE, 140733499011072, 140733499023359,
+STORE, 47608006119424, 47608006127615,
+STORE, 47608006127616, 47608006135807,
+STORE, 47608006135808, 47608006242303,
+STORE, 47608006152192, 47608006242303,
+STORE, 47608006135808, 47608006152191,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006225919,
+STORE, 47608006225920, 47608006242303,
+STORE, 47608006205440, 47608006225919,
+STORE, 47608006152192, 47608006205439,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006205439,
+STORE, 47608006221824, 47608006225919,
+STORE, 47608006205440, 47608006221823,
+ERASE, 47608006205440, 47608006205440,
+STORE, 47608006205440, 47608006221823,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006234112, 47608006234112,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006242304, 47608008081407,
+STORE, 47608006381568, 47608008081407,
+STORE, 47608006242304, 47608006381567,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608008040447,
+STORE, 47608008040448, 47608008081407,
+STORE, 47608007725056, 47608008040447,
+STORE, 47608006381568, 47608007725055,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608007725055,
+STORE, 47608008036352, 47608008040447,
+STORE, 47608007725056, 47608008036351,
+ERASE, 47608007725056, 47608007725056,
+STORE, 47608007725056, 47608008036351,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008065024, 47608008065024,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008065024, 47608008093695,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008056831,
+STORE, 47608008056832, 47608008065023,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006230015,
+STORE, 47608006230016, 47608006234111,
+ERASE, 94170740346880, 94170740346880,
+STORE, 94170740346880, 94170740363263,
+STORE, 94170740363264, 94170740367359,
+ERASE, 140024789037056, 140024789037056,
+STORE, 140024789037056, 140024789041151,
+STORE, 140024789041152, 140024789045247,
+ERASE, 47608006119424, 47608006119424,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730264326144, 140737488351231,
+ERASE, 140730264326144, 140730264326144,
+STORE, 140730264326144, 140730264330239,
+STORE, 94653216407552, 94653217120255,
+ERASE, 94653216407552, 94653216407552,
+STORE, 94653216407552, 94653216456703,
+STORE, 94653216456704, 94653217120255,
+ERASE, 94653216456704, 94653216456704,
+STORE, 94653216456704, 94653217001471,
+STORE, 94653217001472, 94653217099775,
+STORE, 94653217099776, 94653217120255,
+STORE, 140103617011712, 140103617183743,
+ERASE, 140103617011712, 140103617011712,
+STORE, 140103617011712, 140103617015807,
+STORE, 140103617015808, 140103617183743,
+ERASE, 140103617015808, 140103617015808,
+STORE, 140103617015808, 140103617138687,
+STORE, 140103617138688, 140103617171455,
+STORE, 140103617171456, 140103617179647,
+STORE, 140103617179648, 140103617183743,
+STORE, 140730265427968, 140730265432063,
+STORE, 140730265415680, 140730265427967,
+STORE, 47529177985024, 47529177993215,
+STORE, 47529177993216, 47529178001407,
+STORE, 47529178001408, 47529178107903,
+STORE, 47529178017792, 47529178107903,
+STORE, 47529178001408, 47529178017791,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178091519,
+STORE, 47529178091520, 47529178107903,
+STORE, 47529178071040, 47529178091519,
+STORE, 47529178017792, 47529178071039,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178071039,
+STORE, 47529178087424, 47529178091519,
+STORE, 47529178071040, 47529178087423,
+ERASE, 47529178071040, 47529178071040,
+STORE, 47529178071040, 47529178087423,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178099712, 47529178099712,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178107904, 47529179947007,
+STORE, 47529178247168, 47529179947007,
+STORE, 47529178107904, 47529178247167,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179906047,
+STORE, 47529179906048, 47529179947007,
+STORE, 47529179590656, 47529179906047,
+STORE, 47529178247168, 47529179590655,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179590655,
+STORE, 47529179901952, 47529179906047,
+STORE, 47529179590656, 47529179901951,
+ERASE, 47529179590656, 47529179590656,
+STORE, 47529179590656, 47529179901951,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179930624, 47529179930624,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179930624, 47529179959295,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179922431,
+STORE, 47529179922432, 47529179930623,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178095615,
+STORE, 47529178095616, 47529178099711,
+ERASE, 94653217099776, 94653217099776,
+STORE, 94653217099776, 94653217116159,
+STORE, 94653217116160, 94653217120255,
+ERASE, 140103617171456, 140103617171456,
+STORE, 140103617171456, 140103617175551,
+STORE, 140103617175552, 140103617179647,
+ERASE, 47529177985024, 47529177985024,
+STORE, 94653241135104, 94653241270271,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736284549120, 140737488351231,
+ERASE, 140736284549120, 140736284549120,
+STORE, 140736284549120, 140736284553215,
+STORE, 93963663822848, 93963664506879,
+ERASE, 93963663822848, 93963663822848,
+STORE, 93963663822848, 93963663884287,
+STORE, 93963663884288, 93963664506879,
+ERASE, 93963663884288, 93963663884288,
+STORE, 93963663884288, 93963664240639,
+STORE, 93963664240640, 93963664379903,
+STORE, 93963664379904, 93963664506879,
+STORE, 140450188439552, 140450188611583,
+ERASE, 140450188439552, 140450188439552,
+STORE, 140450188439552, 140450188443647,
+STORE, 140450188443648, 140450188611583,
+ERASE, 140450188443648, 140450188443648,
+STORE, 140450188443648, 140450188566527,
+STORE, 140450188566528, 140450188599295,
+STORE, 140450188599296, 140450188607487,
+STORE, 140450188607488, 140450188611583,
+STORE, 140736284577792, 140736284581887,
+STORE, 140736284565504, 140736284577791,
+STORE, 47182606557184, 47182606565375,
+STORE, 47182606565376, 47182606573567,
+STORE, 47182606573568, 47182608412671,
+STORE, 47182606712832, 47182608412671,
+STORE, 47182606573568, 47182606712831,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608371711,
+STORE, 47182608371712, 47182608412671,
+STORE, 47182608056320, 47182608371711,
+STORE, 47182606712832, 47182608056319,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608056319,
+STORE, 47182608367616, 47182608371711,
+STORE, 47182608056320, 47182608367615,
+ERASE, 47182608056320, 47182608056320,
+STORE, 47182608056320, 47182608367615,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608396288, 47182608396288,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608412672, 47182608523263,
+STORE, 47182608429056, 47182608523263,
+STORE, 47182608412672, 47182608429055,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608515071,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608490496, 47182608515071,
+STORE, 47182608429056, 47182608490495,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608490495,
+STORE, 47182608510976, 47182608515071,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608490496, 47182608490496,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608523264, 47182608568319,
+ERASE, 47182608523264, 47182608523264,
+STORE, 47182608523264, 47182608531455,
+STORE, 47182608531456, 47182608568319,
+STORE, 47182608551936, 47182608568319,
+STORE, 47182608531456, 47182608551935,
+ERASE, 47182608531456, 47182608531456,
+STORE, 47182608531456, 47182608551935,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608551936, 47182608560127,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608568319,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608560127,
+STORE, 47182608560128, 47182608568319,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608568320, 47182608916479,
+STORE, 47182608609280, 47182608916479,
+STORE, 47182608568320, 47182608609279,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608891903,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608822272, 47182608891903,
+STORE, 47182608609280, 47182608822271,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608822271,
+STORE, 47182608887808, 47182608891903,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608822272, 47182608822272,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608916480, 47182611177471,
+STORE, 47182609068032, 47182611177471,
+STORE, 47182608916480, 47182609068031,
+ERASE, 47182609068032, 47182609068032,
+STORE, 47182609068032, 47182611161087,
+STORE, 47182611161088, 47182611177471,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611169280, 47182611169280,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611177472, 47182611312639,
+ERASE, 47182611177472, 47182611177472,
+STORE, 47182611177472, 47182611202047,
+STORE, 47182611202048, 47182611312639,
+STORE, 47182611263488, 47182611312639,
+STORE, 47182611202048, 47182611263487,
+ERASE, 47182611202048, 47182611202048,
+STORE, 47182611202048, 47182611263487,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611263488, 47182611288063,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611312639,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611288063,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611296256, 47182611296256,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611296256, 47182611320831,
+STORE, 47182611320832, 47182611484671,
+ERASE, 47182611320832, 47182611320832,
+STORE, 47182611320832, 47182611333119,
+STORE, 47182611333120, 47182611484671,
+STORE, 47182611431424, 47182611484671,
+STORE, 47182611333120, 47182611431423,
+ERASE, 47182611333120, 47182611333120,
+STORE, 47182611333120, 47182611431423,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611431424, 47182611476479,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611484671,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611476479,
+STORE, 47182611476480, 47182611484671,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611484672, 47182612082687,
+STORE, 47182611603456, 47182612082687,
+STORE, 47182611484672, 47182611603455,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182612029439,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182611918848, 47182612029439,
+STORE, 47182611603456, 47182611918847,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182611918847,
+STORE, 47182612025344, 47182612029439,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182611918848, 47182611918848,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182612082688, 47182615134207,
+STORE, 47182612627456, 47182615134207,
+STORE, 47182612082688, 47182612627455,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614913023,
+STORE, 47182614913024, 47182615134207,
+STORE, 47182614323200, 47182614913023,
+STORE, 47182612627456, 47182614323199,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614323199,
+STORE, 47182614908928, 47182614913023,
+STORE, 47182614323200, 47182614908927,
+ERASE, 47182614323200, 47182614323200,
+STORE, 47182614323200, 47182614908927,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182615117824, 47182615117824,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182615134208, 47182615166975,
+ERASE, 47182615134208, 47182615134208,
+STORE, 47182615134208, 47182615142399,
+STORE, 47182615142400, 47182615166975,
+STORE, 47182615154688, 47182615166975,
+STORE, 47182615142400, 47182615154687,
+ERASE, 47182615142400, 47182615142400,
+STORE, 47182615142400, 47182615154687,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615154688, 47182615158783,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615166975,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615158783,
+STORE, 47182615158784, 47182615166975,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615166976, 47182615203839,
+ERASE, 47182615166976, 47182615166976,
+STORE, 47182615166976, 47182615175167,
+STORE, 47182615175168, 47182615203839,
+STORE, 47182615191552, 47182615203839,
+STORE, 47182615175168, 47182615191551,
+ERASE, 47182615175168, 47182615175168,
+STORE, 47182615175168, 47182615191551,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615191552, 47182615195647,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615203839,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615195647,
+STORE, 47182615195648, 47182615203839,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615203840, 47182615678975,
+ERASE, 47182615203840, 47182615203840,
+STORE, 47182615203840, 47182615212031,
+STORE, 47182615212032, 47182615678975,
+STORE, 47182615547904, 47182615678975,
+STORE, 47182615212032, 47182615547903,
+ERASE, 47182615212032, 47182615212032,
+STORE, 47182615212032, 47182615547903,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615547904, 47182615670783,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615678975,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615670783,
+STORE, 47182615670784, 47182615678975,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615678976, 47182615687167,
+STORE, 47182615687168, 47182615707647,
+ERASE, 47182615687168, 47182615687168,
+STORE, 47182615687168, 47182615691263,
+STORE, 47182615691264, 47182615707647,
+STORE, 47182615695360, 47182615707647,
+STORE, 47182615691264, 47182615695359,
+ERASE, 47182615691264, 47182615691264,
+STORE, 47182615691264, 47182615695359,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615695360, 47182615699455,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615707647,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615699455,
+STORE, 47182615699456, 47182615707647,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615707648, 47182615715839,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608388095,
+STORE, 47182608388096, 47182608396287,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615703551,
+STORE, 47182615703552, 47182615707647,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611292159,
+STORE, 47182611292160, 47182611296255,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615674879,
+STORE, 47182615674880, 47182615678975,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615199743,
+STORE, 47182615199744, 47182615203839,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615162879,
+STORE, 47182615162880, 47182615166975,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615109631,
+STORE, 47182615109632, 47182615117823,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612066303,
+STORE, 47182612066304, 47182612082687,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611480575,
+STORE, 47182611480576, 47182611484671,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611165183,
+STORE, 47182611165184, 47182611169279,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608912383,
+STORE, 47182608912384, 47182608916479,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608564223,
+STORE, 47182608564224, 47182608568319,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608519167,
+STORE, 47182608519168, 47182608523263,
+ERASE, 93963664379904, 93963664379904,
+STORE, 93963664379904, 93963664502783,
+STORE, 93963664502784, 93963664506879,
+ERASE, 140450188599296, 140450188599296,
+STORE, 140450188599296, 140450188603391,
+STORE, 140450188603392, 140450188607487,
+ERASE, 47182606557184, 47182606557184,
+STORE, 93963694723072, 93963694858239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730313261056, 140737488351231,
+ERASE, 140730313261056, 140730313261056,
+STORE, 140730313261056, 140730313265151,
+STORE, 94386579017728, 94386579697663,
+ERASE, 94386579017728, 94386579017728,
+STORE, 94386579017728, 94386579083263,
+STORE, 94386579083264, 94386579697663,
+ERASE, 94386579083264, 94386579083264,
+STORE, 94386579083264, 94386579431423,
+STORE, 94386579431424, 94386579570687,
+STORE, 94386579570688, 94386579697663,
+STORE, 140124810838016, 140124811010047,
+ERASE, 140124810838016, 140124810838016,
+STORE, 140124810838016, 140124810842111,
+STORE, 140124810842112, 140124811010047,
+ERASE, 140124810842112, 140124810842112,
+STORE, 140124810842112, 140124810964991,
+STORE, 140124810964992, 140124810997759,
+STORE, 140124810997760, 140124811005951,
+STORE, 140124811005952, 140124811010047,
+STORE, 140730313601024, 140730313605119,
+STORE, 140730313588736, 140730313601023,
+STORE, 47507984158720, 47507984166911,
+STORE, 47507984166912, 47507984175103,
+STORE, 47507984175104, 47507986014207,
+STORE, 47507984314368, 47507986014207,
+STORE, 47507984175104, 47507984314367,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985973247,
+STORE, 47507985973248, 47507986014207,
+STORE, 47507985657856, 47507985973247,
+STORE, 47507984314368, 47507985657855,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985657855,
+STORE, 47507985969152, 47507985973247,
+STORE, 47507985657856, 47507985969151,
+ERASE, 47507985657856, 47507985657856,
+STORE, 47507985657856, 47507985969151,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985997824, 47507985997824,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507986014208, 47507986124799,
+STORE, 47507986030592, 47507986124799,
+STORE, 47507986014208, 47507986030591,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986116607,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986092032, 47507986116607,
+STORE, 47507986030592, 47507986092031,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986092031,
+STORE, 47507986112512, 47507986116607,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986092032, 47507986092032,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986124800, 47507986169855,
+ERASE, 47507986124800, 47507986124800,
+STORE, 47507986124800, 47507986132991,
+STORE, 47507986132992, 47507986169855,
+STORE, 47507986153472, 47507986169855,
+STORE, 47507986132992, 47507986153471,
+ERASE, 47507986132992, 47507986132992,
+STORE, 47507986132992, 47507986153471,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986153472, 47507986161663,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986169855,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986161663,
+STORE, 47507986161664, 47507986169855,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986169856, 47507986518015,
+STORE, 47507986210816, 47507986518015,
+STORE, 47507986169856, 47507986210815,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986493439,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986423808, 47507986493439,
+STORE, 47507986210816, 47507986423807,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986423807,
+STORE, 47507986489344, 47507986493439,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986423808, 47507986423808,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986518016, 47507988779007,
+STORE, 47507986669568, 47507988779007,
+STORE, 47507986518016, 47507986669567,
+ERASE, 47507986669568, 47507986669568,
+STORE, 47507986669568, 47507988762623,
+STORE, 47507988762624, 47507988779007,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988770816, 47507988770816,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988779008, 47507988914175,
+ERASE, 47507988779008, 47507988779008,
+STORE, 47507988779008, 47507988803583,
+STORE, 47507988803584, 47507988914175,
+STORE, 47507988865024, 47507988914175,
+STORE, 47507988803584, 47507988865023,
+ERASE, 47507988803584, 47507988803584,
+STORE, 47507988803584, 47507988865023,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988865024, 47507988889599,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988914175,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988889599,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988897792, 47507988897792,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988897792, 47507988922367,
+STORE, 47507988922368, 47507989086207,
+ERASE, 47507988922368, 47507988922368,
+STORE, 47507988922368, 47507988934655,
+STORE, 47507988934656, 47507989086207,
+STORE, 47507989032960, 47507989086207,
+STORE, 47507988934656, 47507989032959,
+ERASE, 47507988934656, 47507988934656,
+STORE, 47507988934656, 47507989032959,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989032960, 47507989078015,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989086207,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989078015,
+STORE, 47507989078016, 47507989086207,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989086208, 47507989684223,
+STORE, 47507989204992, 47507989684223,
+STORE, 47507989086208, 47507989204991,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989630975,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989520384, 47507989630975,
+STORE, 47507989204992, 47507989520383,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989520383,
+STORE, 47507989626880, 47507989630975,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989520384, 47507989520384,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989684224, 47507992735743,
+STORE, 47507990228992, 47507992735743,
+STORE, 47507989684224, 47507990228991,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507992514559,
+STORE, 47507992514560, 47507992735743,
+STORE, 47507991924736, 47507992514559,
+STORE, 47507990228992, 47507991924735,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507991924735,
+STORE, 47507992510464, 47507992514559,
+STORE, 47507991924736, 47507992510463,
+ERASE, 47507991924736, 47507991924736,
+STORE, 47507991924736, 47507992510463,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992719360, 47507992719360,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992735744, 47507992768511,
+ERASE, 47507992735744, 47507992735744,
+STORE, 47507992735744, 47507992743935,
+STORE, 47507992743936, 47507992768511,
+STORE, 47507992756224, 47507992768511,
+STORE, 47507992743936, 47507992756223,
+ERASE, 47507992743936, 47507992743936,
+STORE, 47507992743936, 47507992756223,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992756224, 47507992760319,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992768511,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992760319,
+STORE, 47507992760320, 47507992768511,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992768512, 47507992805375,
+ERASE, 47507992768512, 47507992768512,
+STORE, 47507992768512, 47507992776703,
+STORE, 47507992776704, 47507992805375,
+STORE, 47507992793088, 47507992805375,
+STORE, 47507992776704, 47507992793087,
+ERASE, 47507992776704, 47507992776704,
+STORE, 47507992776704, 47507992793087,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992793088, 47507992797183,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992805375,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992797183,
+STORE, 47507992797184, 47507992805375,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992805376, 47507993280511,
+ERASE, 47507992805376, 47507992805376,
+STORE, 47507992805376, 47507992813567,
+STORE, 47507992813568, 47507993280511,
+STORE, 47507993149440, 47507993280511,
+STORE, 47507992813568, 47507993149439,
+ERASE, 47507992813568, 47507992813568,
+STORE, 47507992813568, 47507993149439,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993149440, 47507993272319,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993280511,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993272319,
+STORE, 47507993272320, 47507993280511,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993280512, 47507993288703,
+STORE, 47507993288704, 47507993309183,
+ERASE, 47507993288704, 47507993288704,
+STORE, 47507993288704, 47507993292799,
+STORE, 47507993292800, 47507993309183,
+STORE, 47507993296896, 47507993309183,
+STORE, 47507993292800, 47507993296895,
+ERASE, 47507993292800, 47507993292800,
+STORE, 47507993292800, 47507993296895,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993296896, 47507993300991,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993309183,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993300991,
+STORE, 47507993300992, 47507993309183,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993309184, 47507993317375,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985989631,
+STORE, 47507985989632, 47507985997823,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993305087,
+STORE, 47507993305088, 47507993309183,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988893695,
+STORE, 47507988893696, 47507988897791,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993276415,
+STORE, 47507993276416, 47507993280511,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992801279,
+STORE, 47507992801280, 47507992805375,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992764415,
+STORE, 47507992764416, 47507992768511,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992711167,
+STORE, 47507992711168, 47507992719359,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989667839,
+STORE, 47507989667840, 47507989684223,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989082111,
+STORE, 47507989082112, 47507989086207,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988766719,
+STORE, 47507988766720, 47507988770815,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986513919,
+STORE, 47507986513920, 47507986518015,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986165759,
+STORE, 47507986165760, 47507986169855,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986120703,
+STORE, 47507986120704, 47507986124799,
+ERASE, 94386579570688, 94386579570688,
+STORE, 94386579570688, 94386579693567,
+STORE, 94386579693568, 94386579697663,
+ERASE, 140124810997760, 140124810997760,
+STORE, 140124810997760, 140124811001855,
+STORE, 140124811001856, 140124811005951,
+ERASE, 47507984158720, 47507984158720,
+STORE, 94386583982080, 94386584117247,
+STORE, 94386583982080, 94386584256511,
+ERASE, 94386583982080, 94386583982080,
+STORE, 94386583982080, 94386584223743,
+STORE, 94386584223744, 94386584256511,
+ERASE, 94386584223744, 94386584223744,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733763395584, 140737488351231,
+ERASE, 140733763395584, 140733763395584,
+STORE, 140733763395584, 140733763399679,
+STORE, 94011546472448, 94011547152383,
+ERASE, 94011546472448, 94011546472448,
+STORE, 94011546472448, 94011546537983,
+STORE, 94011546537984, 94011547152383,
+ERASE, 94011546537984, 94011546537984,
+STORE, 94011546537984, 94011546886143,
+STORE, 94011546886144, 94011547025407,
+STORE, 94011547025408, 94011547152383,
+STORE, 139757597949952, 139757598121983,
+ERASE, 139757597949952, 139757597949952,
+STORE, 139757597949952, 139757597954047,
+STORE, 139757597954048, 139757598121983,
+ERASE, 139757597954048, 139757597954048,
+STORE, 139757597954048, 139757598076927,
+STORE, 139757598076928, 139757598109695,
+STORE, 139757598109696, 139757598117887,
+STORE, 139757598117888, 139757598121983,
+STORE, 140733763596288, 140733763600383,
+STORE, 140733763584000, 140733763596287,
+STORE, 47875197046784, 47875197054975,
+STORE, 47875197054976, 47875197063167,
+STORE, 47875197063168, 47875198902271,
+STORE, 47875197202432, 47875198902271,
+STORE, 47875197063168, 47875197202431,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198861311,
+STORE, 47875198861312, 47875198902271,
+STORE, 47875198545920, 47875198861311,
+STORE, 47875197202432, 47875198545919,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198545919,
+STORE, 47875198857216, 47875198861311,
+STORE, 47875198545920, 47875198857215,
+ERASE, 47875198545920, 47875198545920,
+STORE, 47875198545920, 47875198857215,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198885888, 47875198885888,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198902272, 47875199012863,
+STORE, 47875198918656, 47875199012863,
+STORE, 47875198902272, 47875198918655,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875199004671,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875198980096, 47875199004671,
+STORE, 47875198918656, 47875198980095,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875198980095,
+STORE, 47875199000576, 47875199004671,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875198980096, 47875198980096,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875199012864, 47875199057919,
+ERASE, 47875199012864, 47875199012864,
+STORE, 47875199012864, 47875199021055,
+STORE, 47875199021056, 47875199057919,
+STORE, 47875199041536, 47875199057919,
+STORE, 47875199021056, 47875199041535,
+ERASE, 47875199021056, 47875199021056,
+STORE, 47875199021056, 47875199041535,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199041536, 47875199049727,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199057919,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199049727,
+STORE, 47875199049728, 47875199057919,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199057920, 47875199406079,
+STORE, 47875199098880, 47875199406079,
+STORE, 47875199057920, 47875199098879,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199381503,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199311872, 47875199381503,
+STORE, 47875199098880, 47875199311871,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199311871,
+STORE, 47875199377408, 47875199381503,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199311872, 47875199311872,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199406080, 47875201667071,
+STORE, 47875199557632, 47875201667071,
+STORE, 47875199406080, 47875199557631,
+ERASE, 47875199557632, 47875199557632,
+STORE, 47875199557632, 47875201650687,
+STORE, 47875201650688, 47875201667071,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201658880, 47875201658880,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201667072, 47875201802239,
+ERASE, 47875201667072, 47875201667072,
+STORE, 47875201667072, 47875201691647,
+STORE, 47875201691648, 47875201802239,
+STORE, 47875201753088, 47875201802239,
+STORE, 47875201691648, 47875201753087,
+ERASE, 47875201691648, 47875201691648,
+STORE, 47875201691648, 47875201753087,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201753088, 47875201777663,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201802239,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201777663,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201785856, 47875201785856,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201785856, 47875201810431,
+STORE, 47875201810432, 47875201974271,
+ERASE, 47875201810432, 47875201810432,
+STORE, 47875201810432, 47875201822719,
+STORE, 47875201822720, 47875201974271,
+STORE, 47875201921024, 47875201974271,
+STORE, 47875201822720, 47875201921023,
+ERASE, 47875201822720, 47875201822720,
+STORE, 47875201822720, 47875201921023,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201921024, 47875201966079,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201974271,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201966079,
+STORE, 47875201966080, 47875201974271,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201974272, 47875202572287,
+STORE, 47875202093056, 47875202572287,
+STORE, 47875201974272, 47875202093055,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202519039,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202408448, 47875202519039,
+STORE, 47875202093056, 47875202408447,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202408447,
+STORE, 47875202514944, 47875202519039,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202408448, 47875202408448,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202572288, 47875205623807,
+STORE, 47875203117056, 47875205623807,
+STORE, 47875202572288, 47875203117055,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875205402623,
+STORE, 47875205402624, 47875205623807,
+STORE, 47875204812800, 47875205402623,
+STORE, 47875203117056, 47875204812799,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875204812799,
+STORE, 47875205398528, 47875205402623,
+STORE, 47875204812800, 47875205398527,
+ERASE, 47875204812800, 47875204812800,
+STORE, 47875204812800, 47875205398527,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205607424, 47875205607424,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205623808, 47875205656575,
+ERASE, 47875205623808, 47875205623808,
+STORE, 47875205623808, 47875205631999,
+STORE, 47875205632000, 47875205656575,
+STORE, 47875205644288, 47875205656575,
+STORE, 47875205632000, 47875205644287,
+ERASE, 47875205632000, 47875205632000,
+STORE, 47875205632000, 47875205644287,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205644288, 47875205648383,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205656575,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205648383,
+STORE, 47875205648384, 47875205656575,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205656576, 47875205693439,
+ERASE, 47875205656576, 47875205656576,
+STORE, 47875205656576, 47875205664767,
+STORE, 47875205664768, 47875205693439,
+STORE, 47875205681152, 47875205693439,
+STORE, 47875205664768, 47875205681151,
+ERASE, 47875205664768, 47875205664768,
+STORE, 47875205664768, 47875205681151,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205681152, 47875205685247,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205693439,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205685247,
+STORE, 47875205685248, 47875205693439,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205693440, 47875206168575,
+ERASE, 47875205693440, 47875205693440,
+STORE, 47875205693440, 47875205701631,
+STORE, 47875205701632, 47875206168575,
+STORE, 47875206037504, 47875206168575,
+STORE, 47875205701632, 47875206037503,
+ERASE, 47875205701632, 47875205701632,
+STORE, 47875205701632, 47875206037503,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206037504, 47875206160383,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206168575,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206160383,
+STORE, 47875206160384, 47875206168575,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206168576, 47875206176767,
+STORE, 47875206176768, 47875206197247,
+ERASE, 47875206176768, 47875206176768,
+STORE, 47875206176768, 47875206180863,
+STORE, 47875206180864, 47875206197247,
+STORE, 47875206184960, 47875206197247,
+STORE, 47875206180864, 47875206184959,
+ERASE, 47875206180864, 47875206180864,
+STORE, 47875206180864, 47875206184959,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206184960, 47875206189055,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206197247,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206189055,
+STORE, 47875206189056, 47875206197247,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206197248, 47875206205439,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198877695,
+STORE, 47875198877696, 47875198885887,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206193151,
+STORE, 47875206193152, 47875206197247,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201781759,
+STORE, 47875201781760, 47875201785855,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206164479,
+STORE, 47875206164480, 47875206168575,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205689343,
+STORE, 47875205689344, 47875205693439,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205652479,
+STORE, 47875205652480, 47875205656575,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205599231,
+STORE, 47875205599232, 47875205607423,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202555903,
+STORE, 47875202555904, 47875202572287,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201970175,
+STORE, 47875201970176, 47875201974271,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201654783,
+STORE, 47875201654784, 47875201658879,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199401983,
+STORE, 47875199401984, 47875199406079,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199053823,
+STORE, 47875199053824, 47875199057919,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199008767,
+STORE, 47875199008768, 47875199012863,
+ERASE, 94011547025408, 94011547025408,
+STORE, 94011547025408, 94011547148287,
+STORE, 94011547148288, 94011547152383,
+ERASE, 139757598109696, 139757598109696,
+STORE, 139757598109696, 139757598113791,
+STORE, 139757598113792, 139757598117887,
+ERASE, 47875197046784, 47875197046784,
+STORE, 94011557584896, 94011557720063,
+STORE, 94011557584896, 94011557855231,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557851135,
+STORE, 94011557851136, 94011557855231,
+ERASE, 94011557851136, 94011557851136,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557847039,
+STORE, 94011557847040, 94011557851135,
+ERASE, 94011557847040, 94011557847040,
+STORE, 94011557584896, 94011557982207,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557978111,
+STORE, 94011557978112, 94011557982207,
+ERASE, 94011557978112, 94011557978112,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557974015,
+STORE, 94011557974016, 94011557978111,
+ERASE, 94011557974016, 94011557974016,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734130360320, 140737488351231,
+ERASE, 140734130360320, 140734130360320,
+STORE, 140734130360320, 140734130364415,
+STORE, 94641232105472, 94641232785407,
+ERASE, 94641232105472, 94641232105472,
+STORE, 94641232105472, 94641232171007,
+STORE, 94641232171008, 94641232785407,
+ERASE, 94641232171008, 94641232171008,
+STORE, 94641232171008, 94641232519167,
+STORE, 94641232519168, 94641232658431,
+STORE, 94641232658432, 94641232785407,
+STORE, 139726599516160, 139726599688191,
+ERASE, 139726599516160, 139726599516160,
+STORE, 139726599516160, 139726599520255,
+STORE, 139726599520256, 139726599688191,
+ERASE, 139726599520256, 139726599520256,
+STORE, 139726599520256, 139726599643135,
+STORE, 139726599643136, 139726599675903,
+STORE, 139726599675904, 139726599684095,
+STORE, 139726599684096, 139726599688191,
+STORE, 140734130446336, 140734130450431,
+STORE, 140734130434048, 140734130446335,
+STORE, 47906195480576, 47906195488767,
+STORE, 47906195488768, 47906195496959,
+STORE, 47906195496960, 47906197336063,
+STORE, 47906195636224, 47906197336063,
+STORE, 47906195496960, 47906195636223,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906197295103,
+STORE, 47906197295104, 47906197336063,
+STORE, 47906196979712, 47906197295103,
+STORE, 47906195636224, 47906196979711,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906196979711,
+STORE, 47906197291008, 47906197295103,
+STORE, 47906196979712, 47906197291007,
+ERASE, 47906196979712, 47906196979712,
+STORE, 47906196979712, 47906197291007,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197319680, 47906197319680,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197336064, 47906197446655,
+STORE, 47906197352448, 47906197446655,
+STORE, 47906197336064, 47906197352447,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197438463,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197413888, 47906197438463,
+STORE, 47906197352448, 47906197413887,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197413887,
+STORE, 47906197434368, 47906197438463,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197413888, 47906197413888,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197446656, 47906197491711,
+ERASE, 47906197446656, 47906197446656,
+STORE, 47906197446656, 47906197454847,
+STORE, 47906197454848, 47906197491711,
+STORE, 47906197475328, 47906197491711,
+STORE, 47906197454848, 47906197475327,
+ERASE, 47906197454848, 47906197454848,
+STORE, 47906197454848, 47906197475327,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197475328, 47906197483519,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197491711,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197483519,
+STORE, 47906197483520, 47906197491711,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197491712, 47906197839871,
+STORE, 47906197532672, 47906197839871,
+STORE, 47906197491712, 47906197532671,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197815295,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197745664, 47906197815295,
+STORE, 47906197532672, 47906197745663,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197745663,
+STORE, 47906197811200, 47906197815295,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197745664, 47906197745664,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197839872, 47906200100863,
+STORE, 47906197991424, 47906200100863,
+STORE, 47906197839872, 47906197991423,
+ERASE, 47906197991424, 47906197991424,
+STORE, 47906197991424, 47906200084479,
+STORE, 47906200084480, 47906200100863,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200092672, 47906200092672,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200100864, 47906200236031,
+ERASE, 47906200100864, 47906200100864,
+STORE, 47906200100864, 47906200125439,
+STORE, 47906200125440, 47906200236031,
+STORE, 47906200186880, 47906200236031,
+STORE, 47906200125440, 47906200186879,
+ERASE, 47906200125440, 47906200125440,
+STORE, 47906200125440, 47906200186879,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200186880, 47906200211455,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200236031,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200211455,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200219648, 47906200219648,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200219648, 47906200244223,
+STORE, 47906200244224, 47906200408063,
+ERASE, 47906200244224, 47906200244224,
+STORE, 47906200244224, 47906200256511,
+STORE, 47906200256512, 47906200408063,
+STORE, 47906200354816, 47906200408063,
+STORE, 47906200256512, 47906200354815,
+ERASE, 47906200256512, 47906200256512,
+STORE, 47906200256512, 47906200354815,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200354816, 47906200399871,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200408063,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200399871,
+STORE, 47906200399872, 47906200408063,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200408064, 47906201006079,
+STORE, 47906200526848, 47906201006079,
+STORE, 47906200408064, 47906200526847,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200952831,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906200842240, 47906200952831,
+STORE, 47906200526848, 47906200842239,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200842239,
+STORE, 47906200948736, 47906200952831,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200842240, 47906200842240,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906201006080, 47906204057599,
+STORE, 47906201550848, 47906204057599,
+STORE, 47906201006080, 47906201550847,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203836415,
+STORE, 47906203836416, 47906204057599,
+STORE, 47906203246592, 47906203836415,
+STORE, 47906201550848, 47906203246591,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203246591,
+STORE, 47906203832320, 47906203836415,
+STORE, 47906203246592, 47906203832319,
+ERASE, 47906203246592, 47906203246592,
+STORE, 47906203246592, 47906203832319,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906204041216, 47906204041216,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906204057600, 47906204090367,
+ERASE, 47906204057600, 47906204057600,
+STORE, 47906204057600, 47906204065791,
+STORE, 47906204065792, 47906204090367,
+STORE, 47906204078080, 47906204090367,
+STORE, 47906204065792, 47906204078079,
+ERASE, 47906204065792, 47906204065792,
+STORE, 47906204065792, 47906204078079,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204078080, 47906204082175,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204090367,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204082175,
+STORE, 47906204082176, 47906204090367,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204090368, 47906204127231,
+ERASE, 47906204090368, 47906204090368,
+STORE, 47906204090368, 47906204098559,
+STORE, 47906204098560, 47906204127231,
+STORE, 47906204114944, 47906204127231,
+STORE, 47906204098560, 47906204114943,
+ERASE, 47906204098560, 47906204098560,
+STORE, 47906204098560, 47906204114943,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204114944, 47906204119039,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204127231,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204119039,
+STORE, 47906204119040, 47906204127231,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204127232, 47906204602367,
+ERASE, 47906204127232, 47906204127232,
+STORE, 47906204127232, 47906204135423,
+STORE, 47906204135424, 47906204602367,
+STORE, 47906204471296, 47906204602367,
+STORE, 47906204135424, 47906204471295,
+ERASE, 47906204135424, 47906204135424,
+STORE, 47906204135424, 47906204471295,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204471296, 47906204594175,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204602367,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204594175,
+STORE, 47906204594176, 47906204602367,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204602368, 47906204610559,
+STORE, 47906204610560, 47906204631039,
+ERASE, 47906204610560, 47906204610560,
+STORE, 47906204610560, 47906204614655,
+STORE, 47906204614656, 47906204631039,
+STORE, 47906204618752, 47906204631039,
+STORE, 47906204614656, 47906204618751,
+ERASE, 47906204614656, 47906204614656,
+STORE, 47906204614656, 47906204618751,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204618752, 47906204622847,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204631039,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204622847,
+STORE, 47906204622848, 47906204631039,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204631040, 47906204639231,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197311487,
+STORE, 47906197311488, 47906197319679,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204626943,
+STORE, 47906204626944, 47906204631039,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200215551,
+STORE, 47906200215552, 47906200219647,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204598271,
+STORE, 47906204598272, 47906204602367,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204123135,
+STORE, 47906204123136, 47906204127231,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204086271,
+STORE, 47906204086272, 47906204090367,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204033023,
+STORE, 47906204033024, 47906204041215,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906200989695,
+STORE, 47906200989696, 47906201006079,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200403967,
+STORE, 47906200403968, 47906200408063,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200088575,
+STORE, 47906200088576, 47906200092671,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197835775,
+STORE, 47906197835776, 47906197839871,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197487615,
+STORE, 47906197487616, 47906197491711,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197442559,
+STORE, 47906197442560, 47906197446655,
+ERASE, 94641232658432, 94641232658432,
+STORE, 94641232658432, 94641232781311,
+STORE, 94641232781312, 94641232785407,
+ERASE, 139726599675904, 139726599675904,
+STORE, 139726599675904, 139726599679999,
+STORE, 139726599680000, 139726599684095,
+ERASE, 47906195480576, 47906195480576,
+STORE, 94641242615808, 94641242750975,
+ };
+
+ static const unsigned long set10[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736427839488, 140737488351231,
+ERASE, 140736427839488, 140736427839488,
+STORE, 140736427839488, 140736427843583,
+STORE, 94071213395968, 94071213567999,
+ERASE, 94071213395968, 94071213395968,
+STORE, 94071213395968, 94071213412351,
+STORE, 94071213412352, 94071213567999,
+ERASE, 94071213412352, 94071213412352,
+STORE, 94071213412352, 94071213514751,
+STORE, 94071213514752, 94071213555711,
+STORE, 94071213555712, 94071213567999,
+STORE, 139968410644480, 139968410816511,
+ERASE, 139968410644480, 139968410644480,
+STORE, 139968410644480, 139968410648575,
+STORE, 139968410648576, 139968410816511,
+ERASE, 139968410648576, 139968410648576,
+STORE, 139968410648576, 139968410771455,
+STORE, 139968410771456, 139968410804223,
+STORE, 139968410804224, 139968410812415,
+STORE, 139968410812416, 139968410816511,
+STORE, 140736429277184, 140736429281279,
+STORE, 140736429264896, 140736429277183,
+STORE, 47664384352256, 47664384360447,
+STORE, 47664384360448, 47664384368639,
+STORE, 47664384368640, 47664384532479,
+ERASE, 47664384368640, 47664384368640,
+STORE, 47664384368640, 47664384380927,
+STORE, 47664384380928, 47664384532479,
+STORE, 47664384479232, 47664384532479,
+STORE, 47664384380928, 47664384479231,
+ERASE, 47664384380928, 47664384380928,
+STORE, 47664384380928, 47664384479231,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384479232, 47664384524287,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384532479,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384524287,
+STORE, 47664384524288, 47664384532479,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384532480, 47664387583999,
+STORE, 47664385077248, 47664387583999,
+STORE, 47664384532480, 47664385077247,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664387362815,
+STORE, 47664387362816, 47664387583999,
+STORE, 47664386772992, 47664387362815,
+STORE, 47664385077248, 47664386772991,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664386772991,
+STORE, 47664387358720, 47664387362815,
+STORE, 47664386772992, 47664387358719,
+ERASE, 47664386772992, 47664386772992,
+STORE, 47664386772992, 47664387358719,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387567616, 47664387567616,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387584000, 47664389423103,
+STORE, 47664387723264, 47664389423103,
+STORE, 47664387584000, 47664387723263,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389382143,
+STORE, 47664389382144, 47664389423103,
+STORE, 47664389066752, 47664389382143,
+STORE, 47664387723264, 47664389066751,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389066751,
+STORE, 47664389378048, 47664389382143,
+STORE, 47664389066752, 47664389378047,
+ERASE, 47664389066752, 47664389066752,
+STORE, 47664389066752, 47664389378047,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389406720, 47664389406720,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389423104, 47664389558271,
+ERASE, 47664389423104, 47664389423104,
+STORE, 47664389423104, 47664389447679,
+STORE, 47664389447680, 47664389558271,
+STORE, 47664389509120, 47664389558271,
+STORE, 47664389447680, 47664389509119,
+ERASE, 47664389447680, 47664389447680,
+STORE, 47664389447680, 47664389509119,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389509120, 47664389533695,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389558271,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389533695,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389541888, 47664389541888,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389558272, 47664389578751,
+ERASE, 47664389558272, 47664389558272,
+STORE, 47664389558272, 47664389562367,
+STORE, 47664389562368, 47664389578751,
+STORE, 47664389566464, 47664389578751,
+STORE, 47664389562368, 47664389566463,
+ERASE, 47664389562368, 47664389562368,
+STORE, 47664389562368, 47664389566463,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389566464, 47664389570559,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389578751,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389570559,
+STORE, 47664389570560, 47664389578751,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389578752, 47664389586943,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389398527,
+STORE, 47664389398528, 47664389406719,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389574655,
+STORE, 47664389574656, 47664389578751,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389537791,
+STORE, 47664389537792, 47664389541887,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387559423,
+STORE, 47664387559424, 47664387567615,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384528383,
+STORE, 47664384528384, 47664384532479,
+ERASE, 94071213555712, 94071213555712,
+STORE, 94071213555712, 94071213563903,
+STORE, 94071213563904, 94071213567999,
+ERASE, 139968410804224, 139968410804224,
+STORE, 139968410804224, 139968410808319,
+STORE, 139968410808320, 139968410812415,
+ERASE, 47664384352256, 47664384352256,
+STORE, 94071244402688, 94071244537855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728271503360, 140737488351231,
+ERASE, 140728271503360, 140728271503360,
+STORE, 140728271503360, 140728271507455,
+STORE, 94410361982976, 94410362155007,
+ERASE, 94410361982976, 94410361982976,
+STORE, 94410361982976, 94410361999359,
+STORE, 94410361999360, 94410362155007,
+ERASE, 94410361999360, 94410361999360,
+STORE, 94410361999360, 94410362101759,
+STORE, 94410362101760, 94410362142719,
+STORE, 94410362142720, 94410362155007,
+STORE, 140351953997824, 140351954169855,
+ERASE, 140351953997824, 140351953997824,
+STORE, 140351953997824, 140351954001919,
+STORE, 140351954001920, 140351954169855,
+ERASE, 140351954001920, 140351954001920,
+STORE, 140351954001920, 140351954124799,
+STORE, 140351954124800, 140351954157567,
+STORE, 140351954157568, 140351954165759,
+STORE, 140351954165760, 140351954169855,
+STORE, 140728272429056, 140728272433151,
+STORE, 140728272416768, 140728272429055,
+STORE, 47280840998912, 47280841007103,
+STORE, 47280841007104, 47280841015295,
+STORE, 47280841015296, 47280841179135,
+ERASE, 47280841015296, 47280841015296,
+STORE, 47280841015296, 47280841027583,
+STORE, 47280841027584, 47280841179135,
+STORE, 47280841125888, 47280841179135,
+STORE, 47280841027584, 47280841125887,
+ERASE, 47280841027584, 47280841027584,
+STORE, 47280841027584, 47280841125887,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841125888, 47280841170943,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841179135,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841170943,
+STORE, 47280841170944, 47280841179135,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841179136, 47280844230655,
+STORE, 47280841723904, 47280844230655,
+STORE, 47280841179136, 47280841723903,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280844009471,
+STORE, 47280844009472, 47280844230655,
+STORE, 47280843419648, 47280844009471,
+STORE, 47280841723904, 47280843419647,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280843419647,
+STORE, 47280844005376, 47280844009471,
+STORE, 47280843419648, 47280844005375,
+ERASE, 47280843419648, 47280843419648,
+STORE, 47280843419648, 47280844005375,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844214272, 47280844214272,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844230656, 47280846069759,
+STORE, 47280844369920, 47280846069759,
+STORE, 47280844230656, 47280844369919,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280846028799,
+STORE, 47280846028800, 47280846069759,
+STORE, 47280845713408, 47280846028799,
+STORE, 47280844369920, 47280845713407,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280845713407,
+STORE, 47280846024704, 47280846028799,
+STORE, 47280845713408, 47280846024703,
+ERASE, 47280845713408, 47280845713408,
+STORE, 47280845713408, 47280846024703,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846053376, 47280846053376,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846069760, 47280846204927,
+ERASE, 47280846069760, 47280846069760,
+STORE, 47280846069760, 47280846094335,
+STORE, 47280846094336, 47280846204927,
+STORE, 47280846155776, 47280846204927,
+STORE, 47280846094336, 47280846155775,
+ERASE, 47280846094336, 47280846094336,
+STORE, 47280846094336, 47280846155775,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846155776, 47280846180351,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846204927,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846180351,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846188544, 47280846188544,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846204928, 47280846225407,
+ERASE, 47280846204928, 47280846204928,
+STORE, 47280846204928, 47280846209023,
+STORE, 47280846209024, 47280846225407,
+STORE, 47280846213120, 47280846225407,
+STORE, 47280846209024, 47280846213119,
+ERASE, 47280846209024, 47280846209024,
+STORE, 47280846209024, 47280846213119,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846213120, 47280846217215,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846225407,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846217215,
+STORE, 47280846217216, 47280846225407,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846225408, 47280846233599,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846045183,
+STORE, 47280846045184, 47280846053375,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846221311,
+STORE, 47280846221312, 47280846225407,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846184447,
+STORE, 47280846184448, 47280846188543,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844206079,
+STORE, 47280844206080, 47280844214271,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841175039,
+STORE, 47280841175040, 47280841179135,
+ERASE, 94410362142720, 94410362142720,
+STORE, 94410362142720, 94410362150911,
+STORE, 94410362150912, 94410362155007,
+ERASE, 140351954157568, 140351954157568,
+STORE, 140351954157568, 140351954161663,
+STORE, 140351954161664, 140351954165759,
+ERASE, 47280840998912, 47280840998912,
+STORE, 94410379456512, 94410379591679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732946362368, 140737488351231,
+ERASE, 140732946362368, 140732946362368,
+STORE, 140732946362368, 140732946366463,
+STORE, 94352937934848, 94352938106879,
+ERASE, 94352937934848, 94352937934848,
+STORE, 94352937934848, 94352937951231,
+STORE, 94352937951232, 94352938106879,
+ERASE, 94352937951232, 94352937951232,
+STORE, 94352937951232, 94352938053631,
+STORE, 94352938053632, 94352938094591,
+STORE, 94352938094592, 94352938106879,
+STORE, 140595518742528, 140595518914559,
+ERASE, 140595518742528, 140595518742528,
+STORE, 140595518742528, 140595518746623,
+STORE, 140595518746624, 140595518914559,
+ERASE, 140595518746624, 140595518746624,
+STORE, 140595518746624, 140595518869503,
+STORE, 140595518869504, 140595518902271,
+STORE, 140595518902272, 140595518910463,
+STORE, 140595518910464, 140595518914559,
+STORE, 140732947468288, 140732947472383,
+STORE, 140732947456000, 140732947468287,
+STORE, 47037276254208, 47037276262399,
+STORE, 47037276262400, 47037276270591,
+STORE, 47037276270592, 47037276434431,
+ERASE, 47037276270592, 47037276270592,
+STORE, 47037276270592, 47037276282879,
+STORE, 47037276282880, 47037276434431,
+STORE, 47037276381184, 47037276434431,
+STORE, 47037276282880, 47037276381183,
+ERASE, 47037276282880, 47037276282880,
+STORE, 47037276282880, 47037276381183,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276381184, 47037276426239,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276434431,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276426239,
+STORE, 47037276426240, 47037276434431,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276434432, 47037279485951,
+STORE, 47037276979200, 47037279485951,
+STORE, 47037276434432, 47037276979199,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037279264767,
+STORE, 47037279264768, 47037279485951,
+STORE, 47037278674944, 47037279264767,
+STORE, 47037276979200, 47037278674943,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037278674943,
+STORE, 47037279260672, 47037279264767,
+STORE, 47037278674944, 47037279260671,
+ERASE, 47037278674944, 47037278674944,
+STORE, 47037278674944, 47037279260671,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279469568, 47037279469568,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279485952, 47037281325055,
+STORE, 47037279625216, 47037281325055,
+STORE, 47037279485952, 47037279625215,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037281284095,
+STORE, 47037281284096, 47037281325055,
+STORE, 47037280968704, 47037281284095,
+STORE, 47037279625216, 47037280968703,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037280968703,
+STORE, 47037281280000, 47037281284095,
+STORE, 47037280968704, 47037281279999,
+ERASE, 47037280968704, 47037280968704,
+STORE, 47037280968704, 47037281279999,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281308672, 47037281308672,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281325056, 47037281460223,
+ERASE, 47037281325056, 47037281325056,
+STORE, 47037281325056, 47037281349631,
+STORE, 47037281349632, 47037281460223,
+STORE, 47037281411072, 47037281460223,
+STORE, 47037281349632, 47037281411071,
+ERASE, 47037281349632, 47037281349632,
+STORE, 47037281349632, 47037281411071,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281411072, 47037281435647,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281460223,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281435647,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281443840, 47037281443840,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281460224, 47037281480703,
+ERASE, 47037281460224, 47037281460224,
+STORE, 47037281460224, 47037281464319,
+STORE, 47037281464320, 47037281480703,
+STORE, 47037281468416, 47037281480703,
+STORE, 47037281464320, 47037281468415,
+ERASE, 47037281464320, 47037281464320,
+STORE, 47037281464320, 47037281468415,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281468416, 47037281472511,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281480703,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281472511,
+STORE, 47037281472512, 47037281480703,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281480704, 47037281488895,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281300479,
+STORE, 47037281300480, 47037281308671,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281476607,
+STORE, 47037281476608, 47037281480703,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281439743,
+STORE, 47037281439744, 47037281443839,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279461375,
+STORE, 47037279461376, 47037279469567,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276430335,
+STORE, 47037276430336, 47037276434431,
+ERASE, 94352938094592, 94352938094592,
+STORE, 94352938094592, 94352938102783,
+STORE, 94352938102784, 94352938106879,
+ERASE, 140595518902272, 140595518902272,
+STORE, 140595518902272, 140595518906367,
+STORE, 140595518906368, 140595518910463,
+ERASE, 47037276254208, 47037276254208,
+STORE, 94352938438656, 94352938573823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733506027520, 140737488351231,
+ERASE, 140733506027520, 140733506027520,
+STORE, 140733506027520, 140733506031615,
+STORE, 94150123073536, 94150123245567,
+ERASE, 94150123073536, 94150123073536,
+STORE, 94150123073536, 94150123089919,
+STORE, 94150123089920, 94150123245567,
+ERASE, 94150123089920, 94150123089920,
+STORE, 94150123089920, 94150123192319,
+STORE, 94150123192320, 94150123233279,
+STORE, 94150123233280, 94150123245567,
+STORE, 140081290375168, 140081290547199,
+ERASE, 140081290375168, 140081290375168,
+STORE, 140081290375168, 140081290379263,
+STORE, 140081290379264, 140081290547199,
+ERASE, 140081290379264, 140081290379264,
+STORE, 140081290379264, 140081290502143,
+STORE, 140081290502144, 140081290534911,
+STORE, 140081290534912, 140081290543103,
+STORE, 140081290543104, 140081290547199,
+STORE, 140733506707456, 140733506711551,
+STORE, 140733506695168, 140733506707455,
+STORE, 47551504621568, 47551504629759,
+STORE, 47551504629760, 47551504637951,
+STORE, 47551504637952, 47551504801791,
+ERASE, 47551504637952, 47551504637952,
+STORE, 47551504637952, 47551504650239,
+STORE, 47551504650240, 47551504801791,
+STORE, 47551504748544, 47551504801791,
+STORE, 47551504650240, 47551504748543,
+ERASE, 47551504650240, 47551504650240,
+STORE, 47551504650240, 47551504748543,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504748544, 47551504793599,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504801791,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504793599,
+STORE, 47551504793600, 47551504801791,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504801792, 47551507853311,
+STORE, 47551505346560, 47551507853311,
+STORE, 47551504801792, 47551505346559,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507632127,
+STORE, 47551507632128, 47551507853311,
+STORE, 47551507042304, 47551507632127,
+STORE, 47551505346560, 47551507042303,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507042303,
+STORE, 47551507628032, 47551507632127,
+STORE, 47551507042304, 47551507628031,
+ERASE, 47551507042304, 47551507042304,
+STORE, 47551507042304, 47551507628031,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507836928, 47551507836928,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507853312, 47551509692415,
+STORE, 47551507992576, 47551509692415,
+STORE, 47551507853312, 47551507992575,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509651455,
+STORE, 47551509651456, 47551509692415,
+STORE, 47551509336064, 47551509651455,
+STORE, 47551507992576, 47551509336063,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509336063,
+STORE, 47551509647360, 47551509651455,
+STORE, 47551509336064, 47551509647359,
+ERASE, 47551509336064, 47551509336064,
+STORE, 47551509336064, 47551509647359,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509676032, 47551509676032,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509692416, 47551509827583,
+ERASE, 47551509692416, 47551509692416,
+STORE, 47551509692416, 47551509716991,
+STORE, 47551509716992, 47551509827583,
+STORE, 47551509778432, 47551509827583,
+STORE, 47551509716992, 47551509778431,
+ERASE, 47551509716992, 47551509716992,
+STORE, 47551509716992, 47551509778431,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509778432, 47551509803007,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509827583,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509803007,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509811200, 47551509811200,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509827584, 47551509848063,
+ERASE, 47551509827584, 47551509827584,
+STORE, 47551509827584, 47551509831679,
+STORE, 47551509831680, 47551509848063,
+STORE, 47551509835776, 47551509848063,
+STORE, 47551509831680, 47551509835775,
+ERASE, 47551509831680, 47551509831680,
+STORE, 47551509831680, 47551509835775,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509835776, 47551509839871,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509848063,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509839871,
+STORE, 47551509839872, 47551509848063,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509848064, 47551509856255,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509667839,
+STORE, 47551509667840, 47551509676031,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509843967,
+STORE, 47551509843968, 47551509848063,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509807103,
+STORE, 47551509807104, 47551509811199,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507828735,
+STORE, 47551507828736, 47551507836927,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504797695,
+STORE, 47551504797696, 47551504801791,
+ERASE, 94150123233280, 94150123233280,
+STORE, 94150123233280, 94150123241471,
+STORE, 94150123241472, 94150123245567,
+ERASE, 140081290534912, 140081290534912,
+STORE, 140081290534912, 140081290539007,
+STORE, 140081290539008, 140081290543103,
+ERASE, 47551504621568, 47551504621568,
+STORE, 94150148112384, 94150148247551,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734389334016, 140737488351231,
+ERASE, 140734389334016, 140734389334016,
+STORE, 140734389334016, 140734389338111,
+STORE, 94844636606464, 94844636778495,
+ERASE, 94844636606464, 94844636606464,
+STORE, 94844636606464, 94844636622847,
+STORE, 94844636622848, 94844636778495,
+ERASE, 94844636622848, 94844636622848,
+STORE, 94844636622848, 94844636725247,
+STORE, 94844636725248, 94844636766207,
+STORE, 94844636766208, 94844636778495,
+STORE, 139922765217792, 139922765389823,
+ERASE, 139922765217792, 139922765217792,
+STORE, 139922765217792, 139922765221887,
+STORE, 139922765221888, 139922765389823,
+ERASE, 139922765221888, 139922765221888,
+STORE, 139922765221888, 139922765344767,
+STORE, 139922765344768, 139922765377535,
+STORE, 139922765377536, 139922765385727,
+STORE, 139922765385728, 139922765389823,
+STORE, 140734389678080, 140734389682175,
+STORE, 140734389665792, 140734389678079,
+STORE, 47710029778944, 47710029787135,
+STORE, 47710029787136, 47710029795327,
+STORE, 47710029795328, 47710029959167,
+ERASE, 47710029795328, 47710029795328,
+STORE, 47710029795328, 47710029807615,
+STORE, 47710029807616, 47710029959167,
+STORE, 47710029905920, 47710029959167,
+STORE, 47710029807616, 47710029905919,
+ERASE, 47710029807616, 47710029807616,
+STORE, 47710029807616, 47710029905919,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029905920, 47710029950975,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029959167,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029950975,
+STORE, 47710029950976, 47710029959167,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029959168, 47710033010687,
+STORE, 47710030503936, 47710033010687,
+STORE, 47710029959168, 47710030503935,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032789503,
+STORE, 47710032789504, 47710033010687,
+STORE, 47710032199680, 47710032789503,
+STORE, 47710030503936, 47710032199679,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032199679,
+STORE, 47710032785408, 47710032789503,
+STORE, 47710032199680, 47710032785407,
+ERASE, 47710032199680, 47710032199680,
+STORE, 47710032199680, 47710032785407,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032994304, 47710032994304,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710033010688, 47710034849791,
+STORE, 47710033149952, 47710034849791,
+STORE, 47710033010688, 47710033149951,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034808831,
+STORE, 47710034808832, 47710034849791,
+STORE, 47710034493440, 47710034808831,
+STORE, 47710033149952, 47710034493439,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034493439,
+STORE, 47710034804736, 47710034808831,
+STORE, 47710034493440, 47710034804735,
+ERASE, 47710034493440, 47710034493440,
+STORE, 47710034493440, 47710034804735,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034833408, 47710034833408,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034849792, 47710034984959,
+ERASE, 47710034849792, 47710034849792,
+STORE, 47710034849792, 47710034874367,
+STORE, 47710034874368, 47710034984959,
+STORE, 47710034935808, 47710034984959,
+STORE, 47710034874368, 47710034935807,
+ERASE, 47710034874368, 47710034874368,
+STORE, 47710034874368, 47710034935807,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034935808, 47710034960383,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034984959,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034960383,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034968576, 47710034968576,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034984960, 47710035005439,
+ERASE, 47710034984960, 47710034984960,
+STORE, 47710034984960, 47710034989055,
+STORE, 47710034989056, 47710035005439,
+STORE, 47710034993152, 47710035005439,
+STORE, 47710034989056, 47710034993151,
+ERASE, 47710034989056, 47710034989056,
+STORE, 47710034989056, 47710034993151,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710034993152, 47710034997247,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710035005439,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710034997247,
+STORE, 47710034997248, 47710035005439,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710035005440, 47710035013631,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034825215,
+STORE, 47710034825216, 47710034833407,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035001343,
+STORE, 47710035001344, 47710035005439,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034964479,
+STORE, 47710034964480, 47710034968575,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032986111,
+STORE, 47710032986112, 47710032994303,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029955071,
+STORE, 47710029955072, 47710029959167,
+ERASE, 94844636766208, 94844636766208,
+STORE, 94844636766208, 94844636774399,
+STORE, 94844636774400, 94844636778495,
+ERASE, 139922765377536, 139922765377536,
+STORE, 139922765377536, 139922765381631,
+STORE, 139922765381632, 139922765385727,
+ERASE, 47710029778944, 47710029778944,
+STORE, 94844641775616, 94844641910783,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732213886976, 140737488351231,
+ERASE, 140732213886976, 140732213886976,
+STORE, 140732213886976, 140732213891071,
+STORE, 94240508887040, 94240509059071,
+ERASE, 94240508887040, 94240508887040,
+STORE, 94240508887040, 94240508903423,
+STORE, 94240508903424, 94240509059071,
+ERASE, 94240508903424, 94240508903424,
+STORE, 94240508903424, 94240509005823,
+STORE, 94240509005824, 94240509046783,
+STORE, 94240509046784, 94240509059071,
+STORE, 140275106516992, 140275106689023,
+ERASE, 140275106516992, 140275106516992,
+STORE, 140275106516992, 140275106521087,
+STORE, 140275106521088, 140275106689023,
+ERASE, 140275106521088, 140275106521088,
+STORE, 140275106521088, 140275106643967,
+STORE, 140275106643968, 140275106676735,
+STORE, 140275106676736, 140275106684927,
+STORE, 140275106684928, 140275106689023,
+STORE, 140732213977088, 140732213981183,
+STORE, 140732213964800, 140732213977087,
+STORE, 47357688479744, 47357688487935,
+STORE, 47357688487936, 47357688496127,
+STORE, 47357688496128, 47357688659967,
+ERASE, 47357688496128, 47357688496128,
+STORE, 47357688496128, 47357688508415,
+STORE, 47357688508416, 47357688659967,
+STORE, 47357688606720, 47357688659967,
+STORE, 47357688508416, 47357688606719,
+ERASE, 47357688508416, 47357688508416,
+STORE, 47357688508416, 47357688606719,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688606720, 47357688651775,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688659967,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688651775,
+STORE, 47357688651776, 47357688659967,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688659968, 47357691711487,
+STORE, 47357689204736, 47357691711487,
+STORE, 47357688659968, 47357689204735,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357691490303,
+STORE, 47357691490304, 47357691711487,
+STORE, 47357690900480, 47357691490303,
+STORE, 47357689204736, 47357690900479,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357690900479,
+STORE, 47357691486208, 47357691490303,
+STORE, 47357690900480, 47357691486207,
+ERASE, 47357690900480, 47357690900480,
+STORE, 47357690900480, 47357691486207,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691695104, 47357691695104,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691711488, 47357693550591,
+STORE, 47357691850752, 47357693550591,
+STORE, 47357691711488, 47357691850751,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693509631,
+STORE, 47357693509632, 47357693550591,
+STORE, 47357693194240, 47357693509631,
+STORE, 47357691850752, 47357693194239,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693194239,
+STORE, 47357693505536, 47357693509631,
+STORE, 47357693194240, 47357693505535,
+ERASE, 47357693194240, 47357693194240,
+STORE, 47357693194240, 47357693505535,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693534208, 47357693534208,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693550592, 47357693685759,
+ERASE, 47357693550592, 47357693550592,
+STORE, 47357693550592, 47357693575167,
+STORE, 47357693575168, 47357693685759,
+STORE, 47357693636608, 47357693685759,
+STORE, 47357693575168, 47357693636607,
+ERASE, 47357693575168, 47357693575168,
+STORE, 47357693575168, 47357693636607,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693636608, 47357693661183,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693685759,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693661183,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693669376, 47357693669376,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693685760, 47357693706239,
+ERASE, 47357693685760, 47357693685760,
+STORE, 47357693685760, 47357693689855,
+STORE, 47357693689856, 47357693706239,
+STORE, 47357693693952, 47357693706239,
+STORE, 47357693689856, 47357693693951,
+ERASE, 47357693689856, 47357693689856,
+STORE, 47357693689856, 47357693693951,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693693952, 47357693698047,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693706239,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693698047,
+STORE, 47357693698048, 47357693706239,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693706240, 47357693714431,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693526015,
+STORE, 47357693526016, 47357693534207,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693702143,
+STORE, 47357693702144, 47357693706239,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693665279,
+STORE, 47357693665280, 47357693669375,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691686911,
+STORE, 47357691686912, 47357691695103,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688655871,
+STORE, 47357688655872, 47357688659967,
+ERASE, 94240509046784, 94240509046784,
+STORE, 94240509046784, 94240509054975,
+STORE, 94240509054976, 94240509059071,
+ERASE, 140275106676736, 140275106676736,
+STORE, 140275106676736, 140275106680831,
+STORE, 140275106680832, 140275106684927,
+ERASE, 47357688479744, 47357688479744,
+STORE, 94240518361088, 94240518496255,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732688277504, 140737488351231,
+ERASE, 140732688277504, 140732688277504,
+STORE, 140732688277504, 140732688281599,
+STORE, 94629171351552, 94629172064255,
+ERASE, 94629171351552, 94629171351552,
+STORE, 94629171351552, 94629171400703,
+STORE, 94629171400704, 94629172064255,
+ERASE, 94629171400704, 94629171400704,
+STORE, 94629171400704, 94629171945471,
+STORE, 94629171945472, 94629172043775,
+STORE, 94629172043776, 94629172064255,
+STORE, 139770707644416, 139770707816447,
+ERASE, 139770707644416, 139770707644416,
+STORE, 139770707644416, 139770707648511,
+STORE, 139770707648512, 139770707816447,
+ERASE, 139770707648512, 139770707648512,
+STORE, 139770707648512, 139770707771391,
+STORE, 139770707771392, 139770707804159,
+STORE, 139770707804160, 139770707812351,
+STORE, 139770707812352, 139770707816447,
+STORE, 140732689121280, 140732689125375,
+STORE, 140732689108992, 140732689121279,
+STORE, 47862087352320, 47862087360511,
+STORE, 47862087360512, 47862087368703,
+STORE, 47862087368704, 47862087475199,
+STORE, 47862087385088, 47862087475199,
+STORE, 47862087368704, 47862087385087,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087458815,
+STORE, 47862087458816, 47862087475199,
+STORE, 47862087438336, 47862087458815,
+STORE, 47862087385088, 47862087438335,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087438335,
+STORE, 47862087454720, 47862087458815,
+STORE, 47862087438336, 47862087454719,
+ERASE, 47862087438336, 47862087438336,
+STORE, 47862087438336, 47862087454719,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087467008, 47862087467008,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087475200, 47862089314303,
+STORE, 47862087614464, 47862089314303,
+STORE, 47862087475200, 47862087614463,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862089273343,
+STORE, 47862089273344, 47862089314303,
+STORE, 47862088957952, 47862089273343,
+STORE, 47862087614464, 47862088957951,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862088957951,
+STORE, 47862089269248, 47862089273343,
+STORE, 47862088957952, 47862089269247,
+ERASE, 47862088957952, 47862088957952,
+STORE, 47862088957952, 47862089269247,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089297920, 47862089297920,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089297920, 47862089326591,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089289727,
+STORE, 47862089289728, 47862089297919,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087462911,
+STORE, 47862087462912, 47862087467007,
+ERASE, 94629172043776, 94629172043776,
+STORE, 94629172043776, 94629172060159,
+STORE, 94629172060160, 94629172064255,
+ERASE, 139770707804160, 139770707804160,
+STORE, 139770707804160, 139770707808255,
+STORE, 139770707808256, 139770707812351,
+ERASE, 47862087352320, 47862087352320,
+STORE, 94629197533184, 94629197668351,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727540711424, 140737488351231,
+ERASE, 140727540711424, 140727540711424,
+STORE, 140727540711424, 140727540715519,
+STORE, 94299865313280, 94299866025983,
+ERASE, 94299865313280, 94299865313280,
+STORE, 94299865313280, 94299865362431,
+STORE, 94299865362432, 94299866025983,
+ERASE, 94299865362432, 94299865362432,
+STORE, 94299865362432, 94299865907199,
+STORE, 94299865907200, 94299866005503,
+STORE, 94299866005504, 94299866025983,
+STORE, 140680268763136, 140680268935167,
+ERASE, 140680268763136, 140680268763136,
+STORE, 140680268763136, 140680268767231,
+STORE, 140680268767232, 140680268935167,
+ERASE, 140680268767232, 140680268767232,
+STORE, 140680268767232, 140680268890111,
+STORE, 140680268890112, 140680268922879,
+STORE, 140680268922880, 140680268931071,
+STORE, 140680268931072, 140680268935167,
+STORE, 140727541424128, 140727541428223,
+STORE, 140727541411840, 140727541424127,
+STORE, 46952526233600, 46952526241791,
+STORE, 46952526241792, 46952526249983,
+STORE, 46952526249984, 46952526356479,
+STORE, 46952526266368, 46952526356479,
+STORE, 46952526249984, 46952526266367,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526340095,
+STORE, 46952526340096, 46952526356479,
+STORE, 46952526319616, 46952526340095,
+STORE, 46952526266368, 46952526319615,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526319615,
+STORE, 46952526336000, 46952526340095,
+STORE, 46952526319616, 46952526335999,
+ERASE, 46952526319616, 46952526319616,
+STORE, 46952526319616, 46952526335999,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526348288, 46952526348288,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526356480, 46952528195583,
+STORE, 46952526495744, 46952528195583,
+STORE, 46952526356480, 46952526495743,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952528154623,
+STORE, 46952528154624, 46952528195583,
+STORE, 46952527839232, 46952528154623,
+STORE, 46952526495744, 46952527839231,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952527839231,
+STORE, 46952528150528, 46952528154623,
+STORE, 46952527839232, 46952528150527,
+ERASE, 46952527839232, 46952527839232,
+STORE, 46952527839232, 46952528150527,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528179200, 46952528179200,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528179200, 46952528207871,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528171007,
+STORE, 46952528171008, 46952528179199,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526344191,
+STORE, 46952526344192, 46952526348287,
+ERASE, 94299866005504, 94299866005504,
+STORE, 94299866005504, 94299866021887,
+STORE, 94299866021888, 94299866025983,
+ERASE, 140680268922880, 140680268922880,
+STORE, 140680268922880, 140680268926975,
+STORE, 140680268926976, 140680268931071,
+ERASE, 46952526233600, 46952526233600,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722874793984, 140737488351231,
+ERASE, 140722874793984, 140722874793984,
+STORE, 140722874793984, 140722874798079,
+STORE, 94448916213760, 94448916926463,
+ERASE, 94448916213760, 94448916213760,
+STORE, 94448916213760, 94448916262911,
+STORE, 94448916262912, 94448916926463,
+ERASE, 94448916262912, 94448916262912,
+STORE, 94448916262912, 94448916807679,
+STORE, 94448916807680, 94448916905983,
+STORE, 94448916905984, 94448916926463,
+STORE, 140389117046784, 140389117218815,
+ERASE, 140389117046784, 140389117046784,
+STORE, 140389117046784, 140389117050879,
+STORE, 140389117050880, 140389117218815,
+ERASE, 140389117050880, 140389117050880,
+STORE, 140389117050880, 140389117173759,
+STORE, 140389117173760, 140389117206527,
+STORE, 140389117206528, 140389117214719,
+STORE, 140389117214720, 140389117218815,
+STORE, 140722875297792, 140722875301887,
+STORE, 140722875285504, 140722875297791,
+STORE, 47243677949952, 47243677958143,
+STORE, 47243677958144, 47243677966335,
+STORE, 47243677966336, 47243678072831,
+STORE, 47243677982720, 47243678072831,
+STORE, 47243677966336, 47243677982719,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678056447,
+STORE, 47243678056448, 47243678072831,
+STORE, 47243678035968, 47243678056447,
+STORE, 47243677982720, 47243678035967,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678035967,
+STORE, 47243678052352, 47243678056447,
+STORE, 47243678035968, 47243678052351,
+ERASE, 47243678035968, 47243678035968,
+STORE, 47243678035968, 47243678052351,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678064640, 47243678064640,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678072832, 47243679911935,
+STORE, 47243678212096, 47243679911935,
+STORE, 47243678072832, 47243678212095,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679870975,
+STORE, 47243679870976, 47243679911935,
+STORE, 47243679555584, 47243679870975,
+STORE, 47243678212096, 47243679555583,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679555583,
+STORE, 47243679866880, 47243679870975,
+STORE, 47243679555584, 47243679866879,
+ERASE, 47243679555584, 47243679555584,
+STORE, 47243679555584, 47243679866879,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679895552, 47243679895552,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679895552, 47243679924223,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679887359,
+STORE, 47243679887360, 47243679895551,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678060543,
+STORE, 47243678060544, 47243678064639,
+ERASE, 94448916905984, 94448916905984,
+STORE, 94448916905984, 94448916922367,
+STORE, 94448916922368, 94448916926463,
+ERASE, 140389117206528, 140389117206528,
+STORE, 140389117206528, 140389117210623,
+STORE, 140389117210624, 140389117214719,
+ERASE, 47243677949952, 47243677949952,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733068505088, 140737488351231,
+ERASE, 140733068505088, 140733068505088,
+STORE, 140733068505088, 140733068509183,
+STORE, 94207145750528, 94207146463231,
+ERASE, 94207145750528, 94207145750528,
+STORE, 94207145750528, 94207145799679,
+STORE, 94207145799680, 94207146463231,
+ERASE, 94207145799680, 94207145799680,
+STORE, 94207145799680, 94207146344447,
+STORE, 94207146344448, 94207146442751,
+STORE, 94207146442752, 94207146463231,
+STORE, 140684504911872, 140684505083903,
+ERASE, 140684504911872, 140684504911872,
+STORE, 140684504911872, 140684504915967,
+STORE, 140684504915968, 140684505083903,
+ERASE, 140684504915968, 140684504915968,
+STORE, 140684504915968, 140684505038847,
+STORE, 140684505038848, 140684505071615,
+STORE, 140684505071616, 140684505079807,
+STORE, 140684505079808, 140684505083903,
+STORE, 140733068607488, 140733068611583,
+STORE, 140733068595200, 140733068607487,
+STORE, 46948290084864, 46948290093055,
+STORE, 46948290093056, 46948290101247,
+STORE, 46948290101248, 46948290207743,
+STORE, 46948290117632, 46948290207743,
+STORE, 46948290101248, 46948290117631,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290191359,
+STORE, 46948290191360, 46948290207743,
+STORE, 46948290170880, 46948290191359,
+STORE, 46948290117632, 46948290170879,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290170879,
+STORE, 46948290187264, 46948290191359,
+STORE, 46948290170880, 46948290187263,
+ERASE, 46948290170880, 46948290170880,
+STORE, 46948290170880, 46948290187263,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290199552, 46948290199552,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290207744, 46948292046847,
+STORE, 46948290347008, 46948292046847,
+STORE, 46948290207744, 46948290347007,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948292005887,
+STORE, 46948292005888, 46948292046847,
+STORE, 46948291690496, 46948292005887,
+STORE, 46948290347008, 46948291690495,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948291690495,
+STORE, 46948292001792, 46948292005887,
+STORE, 46948291690496, 46948292001791,
+ERASE, 46948291690496, 46948291690496,
+STORE, 46948291690496, 46948292001791,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292030464, 46948292030464,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292030464, 46948292059135,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292022271,
+STORE, 46948292022272, 46948292030463,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290195455,
+STORE, 46948290195456, 46948290199551,
+ERASE, 94207146442752, 94207146442752,
+STORE, 94207146442752, 94207146459135,
+STORE, 94207146459136, 94207146463231,
+ERASE, 140684505071616, 140684505071616,
+STORE, 140684505071616, 140684505075711,
+STORE, 140684505075712, 140684505079807,
+ERASE, 46948290084864, 46948290084864,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726367158272, 140737488351231,
+ERASE, 140726367158272, 140726367158272,
+STORE, 140726367158272, 140726367162367,
+STORE, 94436124106752, 94436124819455,
+ERASE, 94436124106752, 94436124106752,
+STORE, 94436124106752, 94436124155903,
+STORE, 94436124155904, 94436124819455,
+ERASE, 94436124155904, 94436124155904,
+STORE, 94436124155904, 94436124700671,
+STORE, 94436124700672, 94436124798975,
+STORE, 94436124798976, 94436124819455,
+STORE, 140049025044480, 140049025216511,
+ERASE, 140049025044480, 140049025044480,
+STORE, 140049025044480, 140049025048575,
+STORE, 140049025048576, 140049025216511,
+ERASE, 140049025048576, 140049025048576,
+STORE, 140049025048576, 140049025171455,
+STORE, 140049025171456, 140049025204223,
+STORE, 140049025204224, 140049025212415,
+STORE, 140049025212416, 140049025216511,
+STORE, 140726367256576, 140726367260671,
+STORE, 140726367244288, 140726367256575,
+STORE, 47583769952256, 47583769960447,
+STORE, 47583769960448, 47583769968639,
+STORE, 47583769968640, 47583770075135,
+STORE, 47583769985024, 47583770075135,
+STORE, 47583769968640, 47583769985023,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770058751,
+STORE, 47583770058752, 47583770075135,
+STORE, 47583770038272, 47583770058751,
+STORE, 47583769985024, 47583770038271,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770038271,
+STORE, 47583770054656, 47583770058751,
+STORE, 47583770038272, 47583770054655,
+ERASE, 47583770038272, 47583770038272,
+STORE, 47583770038272, 47583770054655,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770066944, 47583770066944,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770075136, 47583771914239,
+STORE, 47583770214400, 47583771914239,
+STORE, 47583770075136, 47583770214399,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771873279,
+STORE, 47583771873280, 47583771914239,
+STORE, 47583771557888, 47583771873279,
+STORE, 47583770214400, 47583771557887,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771557887,
+STORE, 47583771869184, 47583771873279,
+STORE, 47583771557888, 47583771869183,
+ERASE, 47583771557888, 47583771557888,
+STORE, 47583771557888, 47583771869183,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771897856, 47583771897856,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771897856, 47583771926527,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771889663,
+STORE, 47583771889664, 47583771897855,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770062847,
+STORE, 47583770062848, 47583770066943,
+ERASE, 94436124798976, 94436124798976,
+STORE, 94436124798976, 94436124815359,
+STORE, 94436124815360, 94436124819455,
+ERASE, 140049025204224, 140049025204224,
+STORE, 140049025204224, 140049025208319,
+STORE, 140049025208320, 140049025212415,
+ERASE, 47583769952256, 47583769952256,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727116099584, 140737488351231,
+ERASE, 140727116099584, 140727116099584,
+STORE, 140727116099584, 140727116103679,
+STORE, 94166319734784, 94166320447487,
+ERASE, 94166319734784, 94166319734784,
+STORE, 94166319734784, 94166319783935,
+STORE, 94166319783936, 94166320447487,
+ERASE, 94166319783936, 94166319783936,
+STORE, 94166319783936, 94166320328703,
+STORE, 94166320328704, 94166320427007,
+STORE, 94166320427008, 94166320447487,
+STORE, 139976559542272, 139976559714303,
+ERASE, 139976559542272, 139976559542272,
+STORE, 139976559542272, 139976559546367,
+STORE, 139976559546368, 139976559714303,
+ERASE, 139976559546368, 139976559546368,
+STORE, 139976559546368, 139976559669247,
+STORE, 139976559669248, 139976559702015,
+STORE, 139976559702016, 139976559710207,
+STORE, 139976559710208, 139976559714303,
+STORE, 140727116222464, 140727116226559,
+STORE, 140727116210176, 140727116222463,
+STORE, 47656235454464, 47656235462655,
+STORE, 47656235462656, 47656235470847,
+STORE, 47656235470848, 47656235577343,
+STORE, 47656235487232, 47656235577343,
+STORE, 47656235470848, 47656235487231,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235560959,
+STORE, 47656235560960, 47656235577343,
+STORE, 47656235540480, 47656235560959,
+STORE, 47656235487232, 47656235540479,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235540479,
+STORE, 47656235556864, 47656235560959,
+STORE, 47656235540480, 47656235556863,
+ERASE, 47656235540480, 47656235540480,
+STORE, 47656235540480, 47656235556863,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235569152, 47656235569152,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235577344, 47656237416447,
+STORE, 47656235716608, 47656237416447,
+STORE, 47656235577344, 47656235716607,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237375487,
+STORE, 47656237375488, 47656237416447,
+STORE, 47656237060096, 47656237375487,
+STORE, 47656235716608, 47656237060095,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237060095,
+STORE, 47656237371392, 47656237375487,
+STORE, 47656237060096, 47656237371391,
+ERASE, 47656237060096, 47656237060096,
+STORE, 47656237060096, 47656237371391,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237400064, 47656237400064,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237400064, 47656237428735,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237391871,
+STORE, 47656237391872, 47656237400063,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235565055,
+STORE, 47656235565056, 47656235569151,
+ERASE, 94166320427008, 94166320427008,
+STORE, 94166320427008, 94166320443391,
+STORE, 94166320443392, 94166320447487,
+ERASE, 139976559702016, 139976559702016,
+STORE, 139976559702016, 139976559706111,
+STORE, 139976559706112, 139976559710207,
+ERASE, 47656235454464, 47656235454464,
+STORE, 94166332153856, 94166332289023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726412816384, 140737488351231,
+ERASE, 140726412816384, 140726412816384,
+STORE, 140726412816384, 140726412820479,
+STORE, 94094884507648, 94094885220351,
+ERASE, 94094884507648, 94094884507648,
+STORE, 94094884507648, 94094884556799,
+STORE, 94094884556800, 94094885220351,
+ERASE, 94094884556800, 94094884556800,
+STORE, 94094884556800, 94094885101567,
+STORE, 94094885101568, 94094885199871,
+STORE, 94094885199872, 94094885220351,
+STORE, 139773773938688, 139773774110719,
+ERASE, 139773773938688, 139773773938688,
+STORE, 139773773938688, 139773773942783,
+STORE, 139773773942784, 139773774110719,
+ERASE, 139773773942784, 139773773942784,
+STORE, 139773773942784, 139773774065663,
+STORE, 139773774065664, 139773774098431,
+STORE, 139773774098432, 139773774106623,
+STORE, 139773774106624, 139773774110719,
+STORE, 140726412963840, 140726412967935,
+STORE, 140726412951552, 140726412963839,
+STORE, 47859021058048, 47859021066239,
+STORE, 47859021066240, 47859021074431,
+STORE, 47859021074432, 47859021180927,
+STORE, 47859021090816, 47859021180927,
+STORE, 47859021074432, 47859021090815,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021164543,
+STORE, 47859021164544, 47859021180927,
+STORE, 47859021144064, 47859021164543,
+STORE, 47859021090816, 47859021144063,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021144063,
+STORE, 47859021160448, 47859021164543,
+STORE, 47859021144064, 47859021160447,
+ERASE, 47859021144064, 47859021144064,
+STORE, 47859021144064, 47859021160447,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021172736, 47859021172736,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021180928, 47859023020031,
+STORE, 47859021320192, 47859023020031,
+STORE, 47859021180928, 47859021320191,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022979071,
+STORE, 47859022979072, 47859023020031,
+STORE, 47859022663680, 47859022979071,
+STORE, 47859021320192, 47859022663679,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022663679,
+STORE, 47859022974976, 47859022979071,
+STORE, 47859022663680, 47859022974975,
+ERASE, 47859022663680, 47859022663680,
+STORE, 47859022663680, 47859022974975,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859023003648, 47859023003648,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859023003648, 47859023032319,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859022995455,
+STORE, 47859022995456, 47859023003647,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021168639,
+STORE, 47859021168640, 47859021172735,
+ERASE, 94094885199872, 94094885199872,
+STORE, 94094885199872, 94094885216255,
+STORE, 94094885216256, 94094885220351,
+ERASE, 139773774098432, 139773774098432,
+STORE, 139773774098432, 139773774102527,
+STORE, 139773774102528, 139773774106623,
+ERASE, 47859021058048, 47859021058048,
+STORE, 94094901108736, 94094901243903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736567963648, 140737488351231,
+ERASE, 140736567963648, 140736567963648,
+STORE, 140736567963648, 140736567967743,
+STORE, 94924425748480, 94924426461183,
+ERASE, 94924425748480, 94924425748480,
+STORE, 94924425748480, 94924425797631,
+STORE, 94924425797632, 94924426461183,
+ERASE, 94924425797632, 94924425797632,
+STORE, 94924425797632, 94924426342399,
+STORE, 94924426342400, 94924426440703,
+STORE, 94924426440704, 94924426461183,
+STORE, 140042126319616, 140042126491647,
+ERASE, 140042126319616, 140042126319616,
+STORE, 140042126319616, 140042126323711,
+STORE, 140042126323712, 140042126491647,
+ERASE, 140042126323712, 140042126323712,
+STORE, 140042126323712, 140042126446591,
+STORE, 140042126446592, 140042126479359,
+STORE, 140042126479360, 140042126487551,
+STORE, 140042126487552, 140042126491647,
+STORE, 140736568672256, 140736568676351,
+STORE, 140736568659968, 140736568672255,
+STORE, 47590668677120, 47590668685311,
+STORE, 47590668685312, 47590668693503,
+STORE, 47590668693504, 47590668799999,
+STORE, 47590668709888, 47590668799999,
+STORE, 47590668693504, 47590668709887,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668783615,
+STORE, 47590668783616, 47590668799999,
+STORE, 47590668763136, 47590668783615,
+STORE, 47590668709888, 47590668763135,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668763135,
+STORE, 47590668779520, 47590668783615,
+STORE, 47590668763136, 47590668779519,
+ERASE, 47590668763136, 47590668763136,
+STORE, 47590668763136, 47590668779519,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668791808, 47590668791808,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668800000, 47590670639103,
+STORE, 47590668939264, 47590670639103,
+STORE, 47590668800000, 47590668939263,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670598143,
+STORE, 47590670598144, 47590670639103,
+STORE, 47590670282752, 47590670598143,
+STORE, 47590668939264, 47590670282751,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670282751,
+STORE, 47590670594048, 47590670598143,
+STORE, 47590670282752, 47590670594047,
+ERASE, 47590670282752, 47590670282752,
+STORE, 47590670282752, 47590670594047,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670622720, 47590670622720,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670622720, 47590670651391,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670614527,
+STORE, 47590670614528, 47590670622719,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668787711,
+STORE, 47590668787712, 47590668791807,
+ERASE, 94924426440704, 94924426440704,
+STORE, 94924426440704, 94924426457087,
+STORE, 94924426457088, 94924426461183,
+ERASE, 140042126479360, 140042126479360,
+STORE, 140042126479360, 140042126483455,
+STORE, 140042126483456, 140042126487551,
+ERASE, 47590668677120, 47590668677120,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733281439744, 140737488351231,
+ERASE, 140733281439744, 140733281439744,
+STORE, 140733281439744, 140733281443839,
+STORE, 94490667069440, 94490667782143,
+ERASE, 94490667069440, 94490667069440,
+STORE, 94490667069440, 94490667118591,
+STORE, 94490667118592, 94490667782143,
+ERASE, 94490667118592, 94490667118592,
+STORE, 94490667118592, 94490667663359,
+STORE, 94490667663360, 94490667761663,
+STORE, 94490667761664, 94490667782143,
+STORE, 139878215118848, 139878215290879,
+ERASE, 139878215118848, 139878215118848,
+STORE, 139878215118848, 139878215122943,
+STORE, 139878215122944, 139878215290879,
+ERASE, 139878215122944, 139878215122944,
+STORE, 139878215122944, 139878215245823,
+STORE, 139878215245824, 139878215278591,
+STORE, 139878215278592, 139878215286783,
+STORE, 139878215286784, 139878215290879,
+STORE, 140733281464320, 140733281468415,
+STORE, 140733281452032, 140733281464319,
+STORE, 47754579877888, 47754579886079,
+STORE, 47754579886080, 47754579894271,
+STORE, 47754579894272, 47754580000767,
+STORE, 47754579910656, 47754580000767,
+STORE, 47754579894272, 47754579910655,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579984383,
+STORE, 47754579984384, 47754580000767,
+STORE, 47754579963904, 47754579984383,
+STORE, 47754579910656, 47754579963903,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579963903,
+STORE, 47754579980288, 47754579984383,
+STORE, 47754579963904, 47754579980287,
+ERASE, 47754579963904, 47754579963904,
+STORE, 47754579963904, 47754579980287,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579992576, 47754579992576,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754580000768, 47754581839871,
+STORE, 47754580140032, 47754581839871,
+STORE, 47754580000768, 47754580140031,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581798911,
+STORE, 47754581798912, 47754581839871,
+STORE, 47754581483520, 47754581798911,
+STORE, 47754580140032, 47754581483519,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581483519,
+STORE, 47754581794816, 47754581798911,
+STORE, 47754581483520, 47754581794815,
+ERASE, 47754581483520, 47754581483520,
+STORE, 47754581483520, 47754581794815,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581823488, 47754581823488,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581823488, 47754581852159,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581815295,
+STORE, 47754581815296, 47754581823487,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579988479,
+STORE, 47754579988480, 47754579992575,
+ERASE, 94490667761664, 94490667761664,
+STORE, 94490667761664, 94490667778047,
+STORE, 94490667778048, 94490667782143,
+ERASE, 139878215278592, 139878215278592,
+STORE, 139878215278592, 139878215282687,
+STORE, 139878215282688, 139878215286783,
+ERASE, 47754579877888, 47754579877888,
+STORE, 94490669649920, 94490669785087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140735382188032, 140737488351231,
+ERASE, 140735382188032, 140735382188032,
+STORE, 140735382188032, 140735382192127,
+STORE, 94150181302272, 94150182014975,
+ERASE, 94150181302272, 94150181302272,
+STORE, 94150181302272, 94150181351423,
+STORE, 94150181351424, 94150182014975,
+ERASE, 94150181351424, 94150181351424,
+STORE, 94150181351424, 94150181896191,
+STORE, 94150181896192, 94150181994495,
+STORE, 94150181994496, 94150182014975,
+STORE, 139679752458240, 139679752630271,
+ERASE, 139679752458240, 139679752458240,
+STORE, 139679752458240, 139679752462335,
+STORE, 139679752462336, 139679752630271,
+ERASE, 139679752462336, 139679752462336,
+STORE, 139679752462336, 139679752585215,
+STORE, 139679752585216, 139679752617983,
+STORE, 139679752617984, 139679752626175,
+STORE, 139679752626176, 139679752630271,
+STORE, 140735382536192, 140735382540287,
+STORE, 140735382523904, 140735382536191,
+STORE, 47953042538496, 47953042546687,
+STORE, 47953042546688, 47953042554879,
+STORE, 47953042554880, 47953042661375,
+STORE, 47953042571264, 47953042661375,
+STORE, 47953042554880, 47953042571263,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042644991,
+STORE, 47953042644992, 47953042661375,
+STORE, 47953042624512, 47953042644991,
+STORE, 47953042571264, 47953042624511,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042624511,
+STORE, 47953042640896, 47953042644991,
+STORE, 47953042624512, 47953042640895,
+ERASE, 47953042624512, 47953042624512,
+STORE, 47953042624512, 47953042640895,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042653184, 47953042653184,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042661376, 47953044500479,
+STORE, 47953042800640, 47953044500479,
+STORE, 47953042661376, 47953042800639,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044459519,
+STORE, 47953044459520, 47953044500479,
+STORE, 47953044144128, 47953044459519,
+STORE, 47953042800640, 47953044144127,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044144127,
+STORE, 47953044455424, 47953044459519,
+STORE, 47953044144128, 47953044455423,
+ERASE, 47953044144128, 47953044144128,
+STORE, 47953044144128, 47953044455423,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044484096, 47953044484096,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044484096, 47953044512767,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044475903,
+STORE, 47953044475904, 47953044484095,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042649087,
+STORE, 47953042649088, 47953042653183,
+ERASE, 94150181994496, 94150181994496,
+STORE, 94150181994496, 94150182010879,
+STORE, 94150182010880, 94150182014975,
+ERASE, 139679752617984, 139679752617984,
+STORE, 139679752617984, 139679752622079,
+STORE, 139679752622080, 139679752626175,
+ERASE, 47953042538496, 47953042538496,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737044123648, 140737488351231,
+ERASE, 140737044123648, 140737044123648,
+STORE, 140737044123648, 140737044127743,
+STORE, 94425324294144, 94425325006847,
+ERASE, 94425324294144, 94425324294144,
+STORE, 94425324294144, 94425324343295,
+STORE, 94425324343296, 94425325006847,
+ERASE, 94425324343296, 94425324343296,
+STORE, 94425324343296, 94425324888063,
+STORE, 94425324888064, 94425324986367,
+STORE, 94425324986368, 94425325006847,
+STORE, 140382015016960, 140382015188991,
+ERASE, 140382015016960, 140382015016960,
+STORE, 140382015016960, 140382015021055,
+STORE, 140382015021056, 140382015188991,
+ERASE, 140382015021056, 140382015021056,
+STORE, 140382015021056, 140382015143935,
+STORE, 140382015143936, 140382015176703,
+STORE, 140382015176704, 140382015184895,
+STORE, 140382015184896, 140382015188991,
+STORE, 140737045585920, 140737045590015,
+STORE, 140737045573632, 140737045585919,
+STORE, 47250779979776, 47250779987967,
+STORE, 47250779987968, 47250779996159,
+STORE, 47250779996160, 47250780102655,
+STORE, 47250780012544, 47250780102655,
+STORE, 47250779996160, 47250780012543,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780086271,
+STORE, 47250780086272, 47250780102655,
+STORE, 47250780065792, 47250780086271,
+STORE, 47250780012544, 47250780065791,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780065791,
+STORE, 47250780082176, 47250780086271,
+STORE, 47250780065792, 47250780082175,
+ERASE, 47250780065792, 47250780065792,
+STORE, 47250780065792, 47250780082175,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780094464, 47250780094464,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780102656, 47250781941759,
+STORE, 47250780241920, 47250781941759,
+STORE, 47250780102656, 47250780241919,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781900799,
+STORE, 47250781900800, 47250781941759,
+STORE, 47250781585408, 47250781900799,
+STORE, 47250780241920, 47250781585407,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781585407,
+STORE, 47250781896704, 47250781900799,
+STORE, 47250781585408, 47250781896703,
+ERASE, 47250781585408, 47250781585408,
+STORE, 47250781585408, 47250781896703,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781925376, 47250781925376,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781925376, 47250781954047,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781917183,
+STORE, 47250781917184, 47250781925375,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780090367,
+STORE, 47250780090368, 47250780094463,
+ERASE, 94425324986368, 94425324986368,
+STORE, 94425324986368, 94425325002751,
+STORE, 94425325002752, 94425325006847,
+ERASE, 140382015176704, 140382015176704,
+STORE, 140382015176704, 140382015180799,
+STORE, 140382015180800, 140382015184895,
+ERASE, 47250779979776, 47250779979776,
+STORE, 94425351438336, 94425351573503,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736801144832, 140737488351231,
+ERASE, 140736801144832, 140736801144832,
+STORE, 140736801144832, 140736801148927,
+STORE, 94629429358592, 94629430071295,
+ERASE, 94629429358592, 94629429358592,
+STORE, 94629429358592, 94629429407743,
+STORE, 94629429407744, 94629430071295,
+ERASE, 94629429407744, 94629429407744,
+STORE, 94629429407744, 94629429952511,
+STORE, 94629429952512, 94629430050815,
+STORE, 94629430050816, 94629430071295,
+STORE, 139801685483520, 139801685655551,
+ERASE, 139801685483520, 139801685483520,
+STORE, 139801685483520, 139801685487615,
+STORE, 139801685487616, 139801685655551,
+ERASE, 139801685487616, 139801685487616,
+STORE, 139801685487616, 139801685610495,
+STORE, 139801685610496, 139801685643263,
+STORE, 139801685643264, 139801685651455,
+STORE, 139801685651456, 139801685655551,
+STORE, 140736801198080, 140736801202175,
+STORE, 140736801185792, 140736801198079,
+STORE, 47831109513216, 47831109521407,
+STORE, 47831109521408, 47831109529599,
+STORE, 47831109529600, 47831109636095,
+STORE, 47831109545984, 47831109636095,
+STORE, 47831109529600, 47831109545983,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109619711,
+STORE, 47831109619712, 47831109636095,
+STORE, 47831109599232, 47831109619711,
+STORE, 47831109545984, 47831109599231,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109599231,
+STORE, 47831109615616, 47831109619711,
+STORE, 47831109599232, 47831109615615,
+ERASE, 47831109599232, 47831109599232,
+STORE, 47831109599232, 47831109615615,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109627904, 47831109627904,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109636096, 47831111475199,
+STORE, 47831109775360, 47831111475199,
+STORE, 47831109636096, 47831109775359,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111434239,
+STORE, 47831111434240, 47831111475199,
+STORE, 47831111118848, 47831111434239,
+STORE, 47831109775360, 47831111118847,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111118847,
+STORE, 47831111430144, 47831111434239,
+STORE, 47831111118848, 47831111430143,
+ERASE, 47831111118848, 47831111118848,
+STORE, 47831111118848, 47831111430143,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111458816, 47831111458816,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111458816, 47831111487487,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111450623,
+STORE, 47831111450624, 47831111458815,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109623807,
+STORE, 47831109623808, 47831109627903,
+ERASE, 94629430050816, 94629430050816,
+STORE, 94629430050816, 94629430067199,
+STORE, 94629430067200, 94629430071295,
+ERASE, 139801685643264, 139801685643264,
+STORE, 139801685643264, 139801685647359,
+STORE, 139801685647360, 139801685651455,
+ERASE, 47831109513216, 47831109513216,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729419612160, 140737488351231,
+ERASE, 140729419612160, 140729419612160,
+STORE, 140729419612160, 140729419616255,
+STORE, 94443354148864, 94443354861567,
+ERASE, 94443354148864, 94443354148864,
+STORE, 94443354148864, 94443354198015,
+STORE, 94443354198016, 94443354861567,
+ERASE, 94443354198016, 94443354198016,
+STORE, 94443354198016, 94443354742783,
+STORE, 94443354742784, 94443354841087,
+STORE, 94443354841088, 94443354861567,
+STORE, 139741700038656, 139741700210687,
+ERASE, 139741700038656, 139741700038656,
+STORE, 139741700038656, 139741700042751,
+STORE, 139741700042752, 139741700210687,
+ERASE, 139741700042752, 139741700042752,
+STORE, 139741700042752, 139741700165631,
+STORE, 139741700165632, 139741700198399,
+STORE, 139741700198400, 139741700206591,
+STORE, 139741700206592, 139741700210687,
+STORE, 140729420574720, 140729420578815,
+STORE, 140729420562432, 140729420574719,
+STORE, 47891094958080, 47891094966271,
+STORE, 47891094966272, 47891094974463,
+STORE, 47891094974464, 47891095080959,
+STORE, 47891094990848, 47891095080959,
+STORE, 47891094974464, 47891094990847,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095064575,
+STORE, 47891095064576, 47891095080959,
+STORE, 47891095044096, 47891095064575,
+STORE, 47891094990848, 47891095044095,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095044095,
+STORE, 47891095060480, 47891095064575,
+STORE, 47891095044096, 47891095060479,
+ERASE, 47891095044096, 47891095044096,
+STORE, 47891095044096, 47891095060479,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095072768, 47891095072768,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095080960, 47891096920063,
+STORE, 47891095220224, 47891096920063,
+STORE, 47891095080960, 47891095220223,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096879103,
+STORE, 47891096879104, 47891096920063,
+STORE, 47891096563712, 47891096879103,
+STORE, 47891095220224, 47891096563711,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096563711,
+STORE, 47891096875008, 47891096879103,
+STORE, 47891096563712, 47891096875007,
+ERASE, 47891096563712, 47891096563712,
+STORE, 47891096563712, 47891096875007,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096903680, 47891096903680,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096903680, 47891096932351,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096895487,
+STORE, 47891096895488, 47891096903679,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095068671,
+STORE, 47891095068672, 47891095072767,
+ERASE, 94443354841088, 94443354841088,
+STORE, 94443354841088, 94443354857471,
+STORE, 94443354857472, 94443354861567,
+ERASE, 139741700198400, 139741700198400,
+STORE, 139741700198400, 139741700202495,
+STORE, 139741700202496, 139741700206591,
+ERASE, 47891094958080, 47891094958080,
+STORE, 94443360825344, 94443360960511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722961661952, 140737488351231,
+ERASE, 140722961661952, 140722961661952,
+STORE, 140722961661952, 140722961666047,
+STORE, 94878388944896, 94878389657599,
+ERASE, 94878388944896, 94878388944896,
+STORE, 94878388944896, 94878388994047,
+STORE, 94878388994048, 94878389657599,
+ERASE, 94878388994048, 94878388994048,
+STORE, 94878388994048, 94878389538815,
+STORE, 94878389538816, 94878389637119,
+STORE, 94878389637120, 94878389657599,
+STORE, 140210690056192, 140210690228223,
+ERASE, 140210690056192, 140210690056192,
+STORE, 140210690056192, 140210690060287,
+STORE, 140210690060288, 140210690228223,
+ERASE, 140210690060288, 140210690060288,
+STORE, 140210690060288, 140210690183167,
+STORE, 140210690183168, 140210690215935,
+STORE, 140210690215936, 140210690224127,
+STORE, 140210690224128, 140210690228223,
+STORE, 140722963148800, 140722963152895,
+STORE, 140722963136512, 140722963148799,
+STORE, 47422104940544, 47422104948735,
+STORE, 47422104948736, 47422104956927,
+STORE, 47422104956928, 47422105063423,
+STORE, 47422104973312, 47422105063423,
+STORE, 47422104956928, 47422104973311,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105047039,
+STORE, 47422105047040, 47422105063423,
+STORE, 47422105026560, 47422105047039,
+STORE, 47422104973312, 47422105026559,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105026559,
+STORE, 47422105042944, 47422105047039,
+STORE, 47422105026560, 47422105042943,
+ERASE, 47422105026560, 47422105026560,
+STORE, 47422105026560, 47422105042943,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105055232, 47422105055232,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105063424, 47422106902527,
+STORE, 47422105202688, 47422106902527,
+STORE, 47422105063424, 47422105202687,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106861567,
+STORE, 47422106861568, 47422106902527,
+STORE, 47422106546176, 47422106861567,
+STORE, 47422105202688, 47422106546175,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106546175,
+STORE, 47422106857472, 47422106861567,
+STORE, 47422106546176, 47422106857471,
+ERASE, 47422106546176, 47422106546176,
+STORE, 47422106546176, 47422106857471,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106886144, 47422106886144,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106886144, 47422106914815,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106877951,
+STORE, 47422106877952, 47422106886143,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105051135,
+STORE, 47422105051136, 47422105055231,
+ERASE, 94878389637120, 94878389637120,
+STORE, 94878389637120, 94878389653503,
+STORE, 94878389653504, 94878389657599,
+ERASE, 140210690215936, 140210690215936,
+STORE, 140210690215936, 140210690220031,
+STORE, 140210690220032, 140210690224127,
+ERASE, 47422104940544, 47422104940544,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727690309632, 140737488351231,
+ERASE, 140727690309632, 140727690309632,
+STORE, 140727690309632, 140727690313727,
+STORE, 94121892208640, 94121892921343,
+ERASE, 94121892208640, 94121892208640,
+STORE, 94121892208640, 94121892257791,
+STORE, 94121892257792, 94121892921343,
+ERASE, 94121892257792, 94121892257792,
+STORE, 94121892257792, 94121892802559,
+STORE, 94121892802560, 94121892900863,
+STORE, 94121892900864, 94121892921343,
+STORE, 140662438326272, 140662438498303,
+ERASE, 140662438326272, 140662438326272,
+STORE, 140662438326272, 140662438330367,
+STORE, 140662438330368, 140662438498303,
+ERASE, 140662438330368, 140662438330368,
+STORE, 140662438330368, 140662438453247,
+STORE, 140662438453248, 140662438486015,
+STORE, 140662438486016, 140662438494207,
+STORE, 140662438494208, 140662438498303,
+STORE, 140727690379264, 140727690383359,
+STORE, 140727690366976, 140727690379263,
+STORE, 46970356670464, 46970356678655,
+STORE, 46970356678656, 46970356686847,
+STORE, 46970356686848, 46970356793343,
+STORE, 46970356703232, 46970356793343,
+STORE, 46970356686848, 46970356703231,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356776959,
+STORE, 46970356776960, 46970356793343,
+STORE, 46970356756480, 46970356776959,
+STORE, 46970356703232, 46970356756479,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356756479,
+STORE, 46970356772864, 46970356776959,
+STORE, 46970356756480, 46970356772863,
+ERASE, 46970356756480, 46970356756480,
+STORE, 46970356756480, 46970356772863,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356785152, 46970356785152,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356793344, 46970358632447,
+STORE, 46970356932608, 46970358632447,
+STORE, 46970356793344, 46970356932607,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358591487,
+STORE, 46970358591488, 46970358632447,
+STORE, 46970358276096, 46970358591487,
+STORE, 46970356932608, 46970358276095,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358276095,
+STORE, 46970358587392, 46970358591487,
+STORE, 46970358276096, 46970358587391,
+ERASE, 46970358276096, 46970358276096,
+STORE, 46970358276096, 46970358587391,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358616064, 46970358616064,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358616064, 46970358644735,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358607871,
+STORE, 46970358607872, 46970358616063,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356781055,
+STORE, 46970356781056, 46970356785151,
+ERASE, 94121892900864, 94121892900864,
+STORE, 94121892900864, 94121892917247,
+STORE, 94121892917248, 94121892921343,
+ERASE, 140662438486016, 140662438486016,
+STORE, 140662438486016, 140662438490111,
+STORE, 140662438490112, 140662438494207,
+ERASE, 46970356670464, 46970356670464,
+STORE, 94121898610688, 94121898745855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737189351424, 140737488351231,
+ERASE, 140737189351424, 140737189351424,
+STORE, 140737189351424, 140737189355519,
+STORE, 93847948832768, 93847949545471,
+ERASE, 93847948832768, 93847948832768,
+STORE, 93847948832768, 93847948881919,
+STORE, 93847948881920, 93847949545471,
+ERASE, 93847948881920, 93847948881920,
+STORE, 93847948881920, 93847949426687,
+STORE, 93847949426688, 93847949524991,
+STORE, 93847949524992, 93847949545471,
+STORE, 139698989985792, 139698990157823,
+ERASE, 139698989985792, 139698989985792,
+STORE, 139698989985792, 139698989989887,
+STORE, 139698989989888, 139698990157823,
+ERASE, 139698989989888, 139698989989888,
+STORE, 139698989989888, 139698990112767,
+STORE, 139698990112768, 139698990145535,
+STORE, 139698990145536, 139698990153727,
+STORE, 139698990153728, 139698990157823,
+STORE, 140737189744640, 140737189748735,
+STORE, 140737189732352, 140737189744639,
+STORE, 47933805010944, 47933805019135,
+STORE, 47933805019136, 47933805027327,
+STORE, 47933805027328, 47933805133823,
+STORE, 47933805043712, 47933805133823,
+STORE, 47933805027328, 47933805043711,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805117439,
+STORE, 47933805117440, 47933805133823,
+STORE, 47933805096960, 47933805117439,
+STORE, 47933805043712, 47933805096959,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805096959,
+STORE, 47933805113344, 47933805117439,
+STORE, 47933805096960, 47933805113343,
+ERASE, 47933805096960, 47933805096960,
+STORE, 47933805096960, 47933805113343,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805125632, 47933805125632,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805133824, 47933806972927,
+STORE, 47933805273088, 47933806972927,
+STORE, 47933805133824, 47933805273087,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806931967,
+STORE, 47933806931968, 47933806972927,
+STORE, 47933806616576, 47933806931967,
+STORE, 47933805273088, 47933806616575,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806616575,
+STORE, 47933806927872, 47933806931967,
+STORE, 47933806616576, 47933806927871,
+ERASE, 47933806616576, 47933806616576,
+STORE, 47933806616576, 47933806927871,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806956544, 47933806956544,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806956544, 47933806985215,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806948351,
+STORE, 47933806948352, 47933806956543,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805121535,
+STORE, 47933805121536, 47933805125631,
+ERASE, 93847949524992, 93847949524992,
+STORE, 93847949524992, 93847949541375,
+STORE, 93847949541376, 93847949545471,
+ERASE, 139698990145536, 139698990145536,
+STORE, 139698990145536, 139698990149631,
+STORE, 139698990149632, 139698990153727,
+ERASE, 47933805010944, 47933805010944,
+STORE, 140737488347136, 140737488351231,
+STORE, 140725553991680, 140737488351231,
+ERASE, 140725553991680, 140725553991680,
+STORE, 140725553991680, 140725553995775,
+STORE, 93980056248320, 93980056961023,
+ERASE, 93980056248320, 93980056248320,
+STORE, 93980056248320, 93980056297471,
+STORE, 93980056297472, 93980056961023,
+ERASE, 93980056297472, 93980056297472,
+STORE, 93980056297472, 93980056842239,
+STORE, 93980056842240, 93980056940543,
+STORE, 93980056940544, 93980056961023,
+STORE, 140146588971008, 140146589143039,
+ERASE, 140146588971008, 140146588971008,
+STORE, 140146588971008, 140146588975103,
+STORE, 140146588975104, 140146589143039,
+ERASE, 140146588975104, 140146588975104,
+STORE, 140146588975104, 140146589097983,
+STORE, 140146589097984, 140146589130751,
+STORE, 140146589130752, 140146589138943,
+STORE, 140146589138944, 140146589143039,
+STORE, 140725554860032, 140725554864127,
+STORE, 140725554847744, 140725554860031,
+STORE, 47486206025728, 47486206033919,
+STORE, 47486206033920, 47486206042111,
+STORE, 47486206042112, 47486206148607,
+STORE, 47486206058496, 47486206148607,
+STORE, 47486206042112, 47486206058495,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206132223,
+STORE, 47486206132224, 47486206148607,
+STORE, 47486206111744, 47486206132223,
+STORE, 47486206058496, 47486206111743,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206111743,
+STORE, 47486206128128, 47486206132223,
+STORE, 47486206111744, 47486206128127,
+ERASE, 47486206111744, 47486206111744,
+STORE, 47486206111744, 47486206128127,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206140416, 47486206140416,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206148608, 47486207987711,
+STORE, 47486206287872, 47486207987711,
+STORE, 47486206148608, 47486206287871,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207946751,
+STORE, 47486207946752, 47486207987711,
+STORE, 47486207631360, 47486207946751,
+STORE, 47486206287872, 47486207631359,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207631359,
+STORE, 47486207942656, 47486207946751,
+STORE, 47486207631360, 47486207942655,
+ERASE, 47486207631360, 47486207631360,
+STORE, 47486207631360, 47486207942655,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207971328, 47486207971328,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207971328, 47486207999999,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207963135,
+STORE, 47486207963136, 47486207971327,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206136319,
+STORE, 47486206136320, 47486206140415,
+ERASE, 93980056940544, 93980056940544,
+STORE, 93980056940544, 93980056956927,
+STORE, 93980056956928, 93980056961023,
+ERASE, 140146589130752, 140146589130752,
+STORE, 140146589130752, 140146589134847,
+STORE, 140146589134848, 140146589138943,
+ERASE, 47486206025728, 47486206025728,
+STORE, 93980070006784, 93980070141951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727334776832, 140737488351231,
+ERASE, 140727334776832, 140727334776832,
+STORE, 140727334776832, 140727334780927,
+STORE, 94049747247104, 94049747959807,
+ERASE, 94049747247104, 94049747247104,
+STORE, 94049747247104, 94049747296255,
+STORE, 94049747296256, 94049747959807,
+ERASE, 94049747296256, 94049747296256,
+STORE, 94049747296256, 94049747841023,
+STORE, 94049747841024, 94049747939327,
+STORE, 94049747939328, 94049747959807,
+STORE, 140227307216896, 140227307388927,
+ERASE, 140227307216896, 140227307216896,
+STORE, 140227307216896, 140227307220991,
+STORE, 140227307220992, 140227307388927,
+ERASE, 140227307220992, 140227307220992,
+STORE, 140227307220992, 140227307343871,
+STORE, 140227307343872, 140227307376639,
+STORE, 140227307376640, 140227307384831,
+STORE, 140227307384832, 140227307388927,
+STORE, 140727335337984, 140727335342079,
+STORE, 140727335325696, 140727335337983,
+STORE, 47405487779840, 47405487788031,
+STORE, 47405487788032, 47405487796223,
+STORE, 47405487796224, 47405487902719,
+STORE, 47405487812608, 47405487902719,
+STORE, 47405487796224, 47405487812607,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487886335,
+STORE, 47405487886336, 47405487902719,
+STORE, 47405487865856, 47405487886335,
+STORE, 47405487812608, 47405487865855,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487865855,
+STORE, 47405487882240, 47405487886335,
+STORE, 47405487865856, 47405487882239,
+ERASE, 47405487865856, 47405487865856,
+STORE, 47405487865856, 47405487882239,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487894528, 47405487894528,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487902720, 47405489741823,
+STORE, 47405488041984, 47405489741823,
+STORE, 47405487902720, 47405488041983,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489700863,
+STORE, 47405489700864, 47405489741823,
+STORE, 47405489385472, 47405489700863,
+STORE, 47405488041984, 47405489385471,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489385471,
+STORE, 47405489696768, 47405489700863,
+STORE, 47405489385472, 47405489696767,
+ERASE, 47405489385472, 47405489385472,
+STORE, 47405489385472, 47405489696767,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489725440, 47405489725440,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489725440, 47405489754111,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489717247,
+STORE, 47405489717248, 47405489725439,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487890431,
+STORE, 47405487890432, 47405487894527,
+ERASE, 94049747939328, 94049747939328,
+STORE, 94049747939328, 94049747955711,
+STORE, 94049747955712, 94049747959807,
+ERASE, 140227307376640, 140227307376640,
+STORE, 140227307376640, 140227307380735,
+STORE, 140227307380736, 140227307384831,
+ERASE, 47405487779840, 47405487779840,
+STORE, 94049758810112, 94049758945279,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727079718912, 140737488351231,
+ERASE, 140727079718912, 140727079718912,
+STORE, 140727079718912, 140727079723007,
+STORE, 94250996527104, 94250997239807,
+ERASE, 94250996527104, 94250996527104,
+STORE, 94250996527104, 94250996576255,
+STORE, 94250996576256, 94250997239807,
+ERASE, 94250996576256, 94250996576256,
+STORE, 94250996576256, 94250997121023,
+STORE, 94250997121024, 94250997219327,
+STORE, 94250997219328, 94250997239807,
+STORE, 140060022587392, 140060022759423,
+ERASE, 140060022587392, 140060022587392,
+STORE, 140060022587392, 140060022591487,
+STORE, 140060022591488, 140060022759423,
+ERASE, 140060022591488, 140060022591488,
+STORE, 140060022591488, 140060022714367,
+STORE, 140060022714368, 140060022747135,
+STORE, 140060022747136, 140060022755327,
+STORE, 140060022755328, 140060022759423,
+STORE, 140727079788544, 140727079792639,
+STORE, 140727079776256, 140727079788543,
+STORE, 47572772409344, 47572772417535,
+STORE, 47572772417536, 47572772425727,
+STORE, 47572772425728, 47572772532223,
+STORE, 47572772442112, 47572772532223,
+STORE, 47572772425728, 47572772442111,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772515839,
+STORE, 47572772515840, 47572772532223,
+STORE, 47572772495360, 47572772515839,
+STORE, 47572772442112, 47572772495359,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772495359,
+STORE, 47572772511744, 47572772515839,
+STORE, 47572772495360, 47572772511743,
+ERASE, 47572772495360, 47572772495360,
+STORE, 47572772495360, 47572772511743,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772524032, 47572772524032,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772532224, 47572774371327,
+STORE, 47572772671488, 47572774371327,
+STORE, 47572772532224, 47572772671487,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774330367,
+STORE, 47572774330368, 47572774371327,
+STORE, 47572774014976, 47572774330367,
+STORE, 47572772671488, 47572774014975,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774014975,
+STORE, 47572774326272, 47572774330367,
+STORE, 47572774014976, 47572774326271,
+ERASE, 47572774014976, 47572774014976,
+STORE, 47572774014976, 47572774326271,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774354944, 47572774354944,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774354944, 47572774383615,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774346751,
+STORE, 47572774346752, 47572774354943,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772519935,
+STORE, 47572772519936, 47572772524031,
+ERASE, 94250997219328, 94250997219328,
+STORE, 94250997219328, 94250997235711,
+STORE, 94250997235712, 94250997239807,
+ERASE, 140060022747136, 140060022747136,
+STORE, 140060022747136, 140060022751231,
+STORE, 140060022751232, 140060022755327,
+ERASE, 47572772409344, 47572772409344,
+STORE, 94251018305536, 94251018440703,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730012389376, 140737488351231,
+ERASE, 140730012389376, 140730012389376,
+STORE, 140730012389376, 140730012393471,
+STORE, 94382607675392, 94382607695871,
+ERASE, 94382607675392, 94382607675392,
+STORE, 94382607675392, 94382607679487,
+STORE, 94382607679488, 94382607695871,
+ERASE, 94382607679488, 94382607679488,
+STORE, 94382607679488, 94382607683583,
+STORE, 94382607683584, 94382607687679,
+STORE, 94382607687680, 94382607695871,
+STORE, 140252451454976, 140252451627007,
+ERASE, 140252451454976, 140252451454976,
+STORE, 140252451454976, 140252451459071,
+STORE, 140252451459072, 140252451627007,
+ERASE, 140252451459072, 140252451459072,
+STORE, 140252451459072, 140252451581951,
+STORE, 140252451581952, 140252451614719,
+STORE, 140252451614720, 140252451622911,
+STORE, 140252451622912, 140252451627007,
+STORE, 140730013548544, 140730013552639,
+STORE, 140730013536256, 140730013548543,
+STORE, 47380343541760, 47380343549951,
+STORE, 47380343549952, 47380343558143,
+STORE, 47380343558144, 47380345397247,
+STORE, 47380343697408, 47380345397247,
+STORE, 47380343558144, 47380343697407,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345356287,
+STORE, 47380345356288, 47380345397247,
+STORE, 47380345040896, 47380345356287,
+STORE, 47380343697408, 47380345040895,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345040895,
+STORE, 47380345352192, 47380345356287,
+STORE, 47380345040896, 47380345352191,
+ERASE, 47380345040896, 47380345040896,
+STORE, 47380345040896, 47380345352191,
+STORE, 47380345380864, 47380345397247,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345380864, 47380345380864,
+STORE, 47380345380864, 47380345397247,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345372671,
+STORE, 47380345372672, 47380345380863,
+ERASE, 94382607687680, 94382607687680,
+STORE, 94382607687680, 94382607691775,
+STORE, 94382607691776, 94382607695871,
+ERASE, 140252451614720, 140252451614720,
+STORE, 140252451614720, 140252451618815,
+STORE, 140252451618816, 140252451622911,
+ERASE, 47380343541760, 47380343541760,
+STORE, 94382626803712, 94382626938879,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730900271104, 140737488351231,
+ERASE, 140730900271104, 140730900271104,
+STORE, 140730900271104, 140730900275199,
+STORE, 93855478120448, 93855478337535,
+ERASE, 93855478120448, 93855478120448,
+STORE, 93855478120448, 93855478198271,
+STORE, 93855478198272, 93855478337535,
+ERASE, 93855478198272, 93855478198272,
+STORE, 93855478198272, 93855478243327,
+STORE, 93855478243328, 93855478288383,
+STORE, 93855478288384, 93855478337535,
+STORE, 140092686573568, 140092686745599,
+ERASE, 140092686573568, 140092686573568,
+STORE, 140092686573568, 140092686577663,
+STORE, 140092686577664, 140092686745599,
+ERASE, 140092686577664, 140092686577664,
+STORE, 140092686577664, 140092686700543,
+STORE, 140092686700544, 140092686733311,
+STORE, 140092686733312, 140092686741503,
+STORE, 140092686741504, 140092686745599,
+STORE, 140730900537344, 140730900541439,
+STORE, 140730900525056, 140730900537343,
+STORE, 47540108423168, 47540108431359,
+STORE, 47540108431360, 47540108439551,
+STORE, 47540108439552, 47540110278655,
+STORE, 47540108578816, 47540110278655,
+STORE, 47540108439552, 47540108578815,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540110237695,
+STORE, 47540110237696, 47540110278655,
+STORE, 47540109922304, 47540110237695,
+STORE, 47540108578816, 47540109922303,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540109922303,
+STORE, 47540110233600, 47540110237695,
+STORE, 47540109922304, 47540110233599,
+ERASE, 47540109922304, 47540109922304,
+STORE, 47540109922304, 47540110233599,
+STORE, 47540110262272, 47540110278655,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110262272, 47540110262272,
+STORE, 47540110262272, 47540110278655,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110254079,
+STORE, 47540110254080, 47540110262271,
+ERASE, 93855478288384, 93855478288384,
+STORE, 93855478288384, 93855478333439,
+STORE, 93855478333440, 93855478337535,
+ERASE, 140092686733312, 140092686733312,
+STORE, 140092686733312, 140092686737407,
+STORE, 140092686737408, 140092686741503,
+ERASE, 47540108423168, 47540108423168,
+STORE, 93855492222976, 93855492358143,
+STORE, 93855492222976, 93855492493311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733498146816, 140737488351231,
+ERASE, 140733498146816, 140733498146816,
+STORE, 140733498146816, 140733498150911,
+STORE, 94170739654656, 94170740367359,
+ERASE, 94170739654656, 94170739654656,
+STORE, 94170739654656, 94170739703807,
+STORE, 94170739703808, 94170740367359,
+ERASE, 94170739703808, 94170739703808,
+STORE, 94170739703808, 94170740248575,
+STORE, 94170740248576, 94170740346879,
+STORE, 94170740346880, 94170740367359,
+STORE, 140024788877312, 140024789049343,
+ERASE, 140024788877312, 140024788877312,
+STORE, 140024788877312, 140024788881407,
+STORE, 140024788881408, 140024789049343,
+ERASE, 140024788881408, 140024788881408,
+STORE, 140024788881408, 140024789004287,
+STORE, 140024789004288, 140024789037055,
+STORE, 140024789037056, 140024789045247,
+STORE, 140024789045248, 140024789049343,
+STORE, 140733499023360, 140733499027455,
+STORE, 140733499011072, 140733499023359,
+STORE, 47608006119424, 47608006127615,
+STORE, 47608006127616, 47608006135807,
+STORE, 47608006135808, 47608006242303,
+STORE, 47608006152192, 47608006242303,
+STORE, 47608006135808, 47608006152191,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006225919,
+STORE, 47608006225920, 47608006242303,
+STORE, 47608006205440, 47608006225919,
+STORE, 47608006152192, 47608006205439,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006205439,
+STORE, 47608006221824, 47608006225919,
+STORE, 47608006205440, 47608006221823,
+ERASE, 47608006205440, 47608006205440,
+STORE, 47608006205440, 47608006221823,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006234112, 47608006234112,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006242304, 47608008081407,
+STORE, 47608006381568, 47608008081407,
+STORE, 47608006242304, 47608006381567,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608008040447,
+STORE, 47608008040448, 47608008081407,
+STORE, 47608007725056, 47608008040447,
+STORE, 47608006381568, 47608007725055,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608007725055,
+STORE, 47608008036352, 47608008040447,
+STORE, 47608007725056, 47608008036351,
+ERASE, 47608007725056, 47608007725056,
+STORE, 47608007725056, 47608008036351,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008065024, 47608008065024,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008065024, 47608008093695,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008056831,
+STORE, 47608008056832, 47608008065023,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006230015,
+STORE, 47608006230016, 47608006234111,
+ERASE, 94170740346880, 94170740346880,
+STORE, 94170740346880, 94170740363263,
+STORE, 94170740363264, 94170740367359,
+ERASE, 140024789037056, 140024789037056,
+STORE, 140024789037056, 140024789041151,
+STORE, 140024789041152, 140024789045247,
+ERASE, 47608006119424, 47608006119424,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730264326144, 140737488351231,
+ERASE, 140730264326144, 140730264326144,
+STORE, 140730264326144, 140730264330239,
+STORE, 94653216407552, 94653217120255,
+ERASE, 94653216407552, 94653216407552,
+STORE, 94653216407552, 94653216456703,
+STORE, 94653216456704, 94653217120255,
+ERASE, 94653216456704, 94653216456704,
+STORE, 94653216456704, 94653217001471,
+STORE, 94653217001472, 94653217099775,
+STORE, 94653217099776, 94653217120255,
+STORE, 140103617011712, 140103617183743,
+ERASE, 140103617011712, 140103617011712,
+STORE, 140103617011712, 140103617015807,
+STORE, 140103617015808, 140103617183743,
+ERASE, 140103617015808, 140103617015808,
+STORE, 140103617015808, 140103617138687,
+STORE, 140103617138688, 140103617171455,
+STORE, 140103617171456, 140103617179647,
+STORE, 140103617179648, 140103617183743,
+STORE, 140730265427968, 140730265432063,
+STORE, 140730265415680, 140730265427967,
+STORE, 47529177985024, 47529177993215,
+STORE, 47529177993216, 47529178001407,
+STORE, 47529178001408, 47529178107903,
+STORE, 47529178017792, 47529178107903,
+STORE, 47529178001408, 47529178017791,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178091519,
+STORE, 47529178091520, 47529178107903,
+STORE, 47529178071040, 47529178091519,
+STORE, 47529178017792, 47529178071039,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178071039,
+STORE, 47529178087424, 47529178091519,
+STORE, 47529178071040, 47529178087423,
+ERASE, 47529178071040, 47529178071040,
+STORE, 47529178071040, 47529178087423,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178099712, 47529178099712,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178107904, 47529179947007,
+STORE, 47529178247168, 47529179947007,
+STORE, 47529178107904, 47529178247167,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179906047,
+STORE, 47529179906048, 47529179947007,
+STORE, 47529179590656, 47529179906047,
+STORE, 47529178247168, 47529179590655,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179590655,
+STORE, 47529179901952, 47529179906047,
+STORE, 47529179590656, 47529179901951,
+ERASE, 47529179590656, 47529179590656,
+STORE, 47529179590656, 47529179901951,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179930624, 47529179930624,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179930624, 47529179959295,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179922431,
+STORE, 47529179922432, 47529179930623,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178095615,
+STORE, 47529178095616, 47529178099711,
+ERASE, 94653217099776, 94653217099776,
+STORE, 94653217099776, 94653217116159,
+STORE, 94653217116160, 94653217120255,
+ERASE, 140103617171456, 140103617171456,
+STORE, 140103617171456, 140103617175551,
+STORE, 140103617175552, 140103617179647,
+ERASE, 47529177985024, 47529177985024,
+STORE, 94653241135104, 94653241270271,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736284549120, 140737488351231,
+ERASE, 140736284549120, 140736284549120,
+STORE, 140736284549120, 140736284553215,
+STORE, 93963663822848, 93963664506879,
+ERASE, 93963663822848, 93963663822848,
+STORE, 93963663822848, 93963663884287,
+STORE, 93963663884288, 93963664506879,
+ERASE, 93963663884288, 93963663884288,
+STORE, 93963663884288, 93963664240639,
+STORE, 93963664240640, 93963664379903,
+STORE, 93963664379904, 93963664506879,
+STORE, 140450188439552, 140450188611583,
+ERASE, 140450188439552, 140450188439552,
+STORE, 140450188439552, 140450188443647,
+STORE, 140450188443648, 140450188611583,
+ERASE, 140450188443648, 140450188443648,
+STORE, 140450188443648, 140450188566527,
+STORE, 140450188566528, 140450188599295,
+STORE, 140450188599296, 140450188607487,
+STORE, 140450188607488, 140450188611583,
+STORE, 140736284577792, 140736284581887,
+STORE, 140736284565504, 140736284577791,
+STORE, 47182606557184, 47182606565375,
+STORE, 47182606565376, 47182606573567,
+STORE, 47182606573568, 47182608412671,
+STORE, 47182606712832, 47182608412671,
+STORE, 47182606573568, 47182606712831,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608371711,
+STORE, 47182608371712, 47182608412671,
+STORE, 47182608056320, 47182608371711,
+STORE, 47182606712832, 47182608056319,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608056319,
+STORE, 47182608367616, 47182608371711,
+STORE, 47182608056320, 47182608367615,
+ERASE, 47182608056320, 47182608056320,
+STORE, 47182608056320, 47182608367615,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608396288, 47182608396288,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608412672, 47182608523263,
+STORE, 47182608429056, 47182608523263,
+STORE, 47182608412672, 47182608429055,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608515071,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608490496, 47182608515071,
+STORE, 47182608429056, 47182608490495,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608490495,
+STORE, 47182608510976, 47182608515071,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608490496, 47182608490496,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608523264, 47182608568319,
+ERASE, 47182608523264, 47182608523264,
+STORE, 47182608523264, 47182608531455,
+STORE, 47182608531456, 47182608568319,
+STORE, 47182608551936, 47182608568319,
+STORE, 47182608531456, 47182608551935,
+ERASE, 47182608531456, 47182608531456,
+STORE, 47182608531456, 47182608551935,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608551936, 47182608560127,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608568319,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608560127,
+STORE, 47182608560128, 47182608568319,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608568320, 47182608916479,
+STORE, 47182608609280, 47182608916479,
+STORE, 47182608568320, 47182608609279,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608891903,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608822272, 47182608891903,
+STORE, 47182608609280, 47182608822271,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608822271,
+STORE, 47182608887808, 47182608891903,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608822272, 47182608822272,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608916480, 47182611177471,
+STORE, 47182609068032, 47182611177471,
+STORE, 47182608916480, 47182609068031,
+ERASE, 47182609068032, 47182609068032,
+STORE, 47182609068032, 47182611161087,
+STORE, 47182611161088, 47182611177471,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611169280, 47182611169280,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611177472, 47182611312639,
+ERASE, 47182611177472, 47182611177472,
+STORE, 47182611177472, 47182611202047,
+STORE, 47182611202048, 47182611312639,
+STORE, 47182611263488, 47182611312639,
+STORE, 47182611202048, 47182611263487,
+ERASE, 47182611202048, 47182611202048,
+STORE, 47182611202048, 47182611263487,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611263488, 47182611288063,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611312639,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611288063,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611296256, 47182611296256,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611296256, 47182611320831,
+STORE, 47182611320832, 47182611484671,
+ERASE, 47182611320832, 47182611320832,
+STORE, 47182611320832, 47182611333119,
+STORE, 47182611333120, 47182611484671,
+STORE, 47182611431424, 47182611484671,
+STORE, 47182611333120, 47182611431423,
+ERASE, 47182611333120, 47182611333120,
+STORE, 47182611333120, 47182611431423,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611431424, 47182611476479,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611484671,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611476479,
+STORE, 47182611476480, 47182611484671,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611484672, 47182612082687,
+STORE, 47182611603456, 47182612082687,
+STORE, 47182611484672, 47182611603455,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182612029439,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182611918848, 47182612029439,
+STORE, 47182611603456, 47182611918847,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182611918847,
+STORE, 47182612025344, 47182612029439,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182611918848, 47182611918848,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182612082688, 47182615134207,
+STORE, 47182612627456, 47182615134207,
+STORE, 47182612082688, 47182612627455,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614913023,
+STORE, 47182614913024, 47182615134207,
+STORE, 47182614323200, 47182614913023,
+STORE, 47182612627456, 47182614323199,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614323199,
+STORE, 47182614908928, 47182614913023,
+STORE, 47182614323200, 47182614908927,
+ERASE, 47182614323200, 47182614323200,
+STORE, 47182614323200, 47182614908927,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182615117824, 47182615117824,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182615134208, 47182615166975,
+ERASE, 47182615134208, 47182615134208,
+STORE, 47182615134208, 47182615142399,
+STORE, 47182615142400, 47182615166975,
+STORE, 47182615154688, 47182615166975,
+STORE, 47182615142400, 47182615154687,
+ERASE, 47182615142400, 47182615142400,
+STORE, 47182615142400, 47182615154687,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615154688, 47182615158783,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615166975,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615158783,
+STORE, 47182615158784, 47182615166975,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615166976, 47182615203839,
+ERASE, 47182615166976, 47182615166976,
+STORE, 47182615166976, 47182615175167,
+STORE, 47182615175168, 47182615203839,
+STORE, 47182615191552, 47182615203839,
+STORE, 47182615175168, 47182615191551,
+ERASE, 47182615175168, 47182615175168,
+STORE, 47182615175168, 47182615191551,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615191552, 47182615195647,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615203839,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615195647,
+STORE, 47182615195648, 47182615203839,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615203840, 47182615678975,
+ERASE, 47182615203840, 47182615203840,
+STORE, 47182615203840, 47182615212031,
+STORE, 47182615212032, 47182615678975,
+STORE, 47182615547904, 47182615678975,
+STORE, 47182615212032, 47182615547903,
+ERASE, 47182615212032, 47182615212032,
+STORE, 47182615212032, 47182615547903,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615547904, 47182615670783,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615678975,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615670783,
+STORE, 47182615670784, 47182615678975,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615678976, 47182615687167,
+STORE, 47182615687168, 47182615707647,
+ERASE, 47182615687168, 47182615687168,
+STORE, 47182615687168, 47182615691263,
+STORE, 47182615691264, 47182615707647,
+STORE, 47182615695360, 47182615707647,
+STORE, 47182615691264, 47182615695359,
+ERASE, 47182615691264, 47182615691264,
+STORE, 47182615691264, 47182615695359,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615695360, 47182615699455,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615707647,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615699455,
+STORE, 47182615699456, 47182615707647,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615707648, 47182615715839,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608388095,
+STORE, 47182608388096, 47182608396287,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615703551,
+STORE, 47182615703552, 47182615707647,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611292159,
+STORE, 47182611292160, 47182611296255,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615674879,
+STORE, 47182615674880, 47182615678975,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615199743,
+STORE, 47182615199744, 47182615203839,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615162879,
+STORE, 47182615162880, 47182615166975,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615109631,
+STORE, 47182615109632, 47182615117823,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612066303,
+STORE, 47182612066304, 47182612082687,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611480575,
+STORE, 47182611480576, 47182611484671,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611165183,
+STORE, 47182611165184, 47182611169279,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608912383,
+STORE, 47182608912384, 47182608916479,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608564223,
+STORE, 47182608564224, 47182608568319,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608519167,
+STORE, 47182608519168, 47182608523263,
+ERASE, 93963664379904, 93963664379904,
+STORE, 93963664379904, 93963664502783,
+STORE, 93963664502784, 93963664506879,
+ERASE, 140450188599296, 140450188599296,
+STORE, 140450188599296, 140450188603391,
+STORE, 140450188603392, 140450188607487,
+ERASE, 47182606557184, 47182606557184,
+STORE, 93963694723072, 93963694858239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730313261056, 140737488351231,
+ERASE, 140730313261056, 140730313261056,
+STORE, 140730313261056, 140730313265151,
+STORE, 94386579017728, 94386579697663,
+ERASE, 94386579017728, 94386579017728,
+STORE, 94386579017728, 94386579083263,
+STORE, 94386579083264, 94386579697663,
+ERASE, 94386579083264, 94386579083264,
+STORE, 94386579083264, 94386579431423,
+STORE, 94386579431424, 94386579570687,
+STORE, 94386579570688, 94386579697663,
+STORE, 140124810838016, 140124811010047,
+ERASE, 140124810838016, 140124810838016,
+STORE, 140124810838016, 140124810842111,
+STORE, 140124810842112, 140124811010047,
+ERASE, 140124810842112, 140124810842112,
+STORE, 140124810842112, 140124810964991,
+STORE, 140124810964992, 140124810997759,
+STORE, 140124810997760, 140124811005951,
+STORE, 140124811005952, 140124811010047,
+STORE, 140730313601024, 140730313605119,
+STORE, 140730313588736, 140730313601023,
+STORE, 47507984158720, 47507984166911,
+STORE, 47507984166912, 47507984175103,
+STORE, 47507984175104, 47507986014207,
+STORE, 47507984314368, 47507986014207,
+STORE, 47507984175104, 47507984314367,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985973247,
+STORE, 47507985973248, 47507986014207,
+STORE, 47507985657856, 47507985973247,
+STORE, 47507984314368, 47507985657855,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985657855,
+STORE, 47507985969152, 47507985973247,
+STORE, 47507985657856, 47507985969151,
+ERASE, 47507985657856, 47507985657856,
+STORE, 47507985657856, 47507985969151,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985997824, 47507985997824,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507986014208, 47507986124799,
+STORE, 47507986030592, 47507986124799,
+STORE, 47507986014208, 47507986030591,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986116607,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986092032, 47507986116607,
+STORE, 47507986030592, 47507986092031,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986092031,
+STORE, 47507986112512, 47507986116607,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986092032, 47507986092032,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986124800, 47507986169855,
+ERASE, 47507986124800, 47507986124800,
+STORE, 47507986124800, 47507986132991,
+STORE, 47507986132992, 47507986169855,
+STORE, 47507986153472, 47507986169855,
+STORE, 47507986132992, 47507986153471,
+ERASE, 47507986132992, 47507986132992,
+STORE, 47507986132992, 47507986153471,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986153472, 47507986161663,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986169855,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986161663,
+STORE, 47507986161664, 47507986169855,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986169856, 47507986518015,
+STORE, 47507986210816, 47507986518015,
+STORE, 47507986169856, 47507986210815,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986493439,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986423808, 47507986493439,
+STORE, 47507986210816, 47507986423807,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986423807,
+STORE, 47507986489344, 47507986493439,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986423808, 47507986423808,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986518016, 47507988779007,
+STORE, 47507986669568, 47507988779007,
+STORE, 47507986518016, 47507986669567,
+ERASE, 47507986669568, 47507986669568,
+STORE, 47507986669568, 47507988762623,
+STORE, 47507988762624, 47507988779007,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988770816, 47507988770816,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988779008, 47507988914175,
+ERASE, 47507988779008, 47507988779008,
+STORE, 47507988779008, 47507988803583,
+STORE, 47507988803584, 47507988914175,
+STORE, 47507988865024, 47507988914175,
+STORE, 47507988803584, 47507988865023,
+ERASE, 47507988803584, 47507988803584,
+STORE, 47507988803584, 47507988865023,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988865024, 47507988889599,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988914175,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988889599,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988897792, 47507988897792,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988897792, 47507988922367,
+STORE, 47507988922368, 47507989086207,
+ERASE, 47507988922368, 47507988922368,
+STORE, 47507988922368, 47507988934655,
+STORE, 47507988934656, 47507989086207,
+STORE, 47507989032960, 47507989086207,
+STORE, 47507988934656, 47507989032959,
+ERASE, 47507988934656, 47507988934656,
+STORE, 47507988934656, 47507989032959,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989032960, 47507989078015,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989086207,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989078015,
+STORE, 47507989078016, 47507989086207,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989086208, 47507989684223,
+STORE, 47507989204992, 47507989684223,
+STORE, 47507989086208, 47507989204991,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989630975,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989520384, 47507989630975,
+STORE, 47507989204992, 47507989520383,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989520383,
+STORE, 47507989626880, 47507989630975,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989520384, 47507989520384,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989684224, 47507992735743,
+STORE, 47507990228992, 47507992735743,
+STORE, 47507989684224, 47507990228991,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507992514559,
+STORE, 47507992514560, 47507992735743,
+STORE, 47507991924736, 47507992514559,
+STORE, 47507990228992, 47507991924735,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507991924735,
+STORE, 47507992510464, 47507992514559,
+STORE, 47507991924736, 47507992510463,
+ERASE, 47507991924736, 47507991924736,
+STORE, 47507991924736, 47507992510463,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992719360, 47507992719360,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992735744, 47507992768511,
+ERASE, 47507992735744, 47507992735744,
+STORE, 47507992735744, 47507992743935,
+STORE, 47507992743936, 47507992768511,
+STORE, 47507992756224, 47507992768511,
+STORE, 47507992743936, 47507992756223,
+ERASE, 47507992743936, 47507992743936,
+STORE, 47507992743936, 47507992756223,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992756224, 47507992760319,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992768511,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992760319,
+STORE, 47507992760320, 47507992768511,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992768512, 47507992805375,
+ERASE, 47507992768512, 47507992768512,
+STORE, 47507992768512, 47507992776703,
+STORE, 47507992776704, 47507992805375,
+STORE, 47507992793088, 47507992805375,
+STORE, 47507992776704, 47507992793087,
+ERASE, 47507992776704, 47507992776704,
+STORE, 47507992776704, 47507992793087,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992793088, 47507992797183,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992805375,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992797183,
+STORE, 47507992797184, 47507992805375,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992805376, 47507993280511,
+ERASE, 47507992805376, 47507992805376,
+STORE, 47507992805376, 47507992813567,
+STORE, 47507992813568, 47507993280511,
+STORE, 47507993149440, 47507993280511,
+STORE, 47507992813568, 47507993149439,
+ERASE, 47507992813568, 47507992813568,
+STORE, 47507992813568, 47507993149439,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993149440, 47507993272319,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993280511,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993272319,
+STORE, 47507993272320, 47507993280511,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993280512, 47507993288703,
+STORE, 47507993288704, 47507993309183,
+ERASE, 47507993288704, 47507993288704,
+STORE, 47507993288704, 47507993292799,
+STORE, 47507993292800, 47507993309183,
+STORE, 47507993296896, 47507993309183,
+STORE, 47507993292800, 47507993296895,
+ERASE, 47507993292800, 47507993292800,
+STORE, 47507993292800, 47507993296895,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993296896, 47507993300991,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993309183,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993300991,
+STORE, 47507993300992, 47507993309183,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993309184, 47507993317375,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985989631,
+STORE, 47507985989632, 47507985997823,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993305087,
+STORE, 47507993305088, 47507993309183,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988893695,
+STORE, 47507988893696, 47507988897791,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993276415,
+STORE, 47507993276416, 47507993280511,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992801279,
+STORE, 47507992801280, 47507992805375,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992764415,
+STORE, 47507992764416, 47507992768511,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992711167,
+STORE, 47507992711168, 47507992719359,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989667839,
+STORE, 47507989667840, 47507989684223,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989082111,
+STORE, 47507989082112, 47507989086207,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988766719,
+STORE, 47507988766720, 47507988770815,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986513919,
+STORE, 47507986513920, 47507986518015,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986165759,
+STORE, 47507986165760, 47507986169855,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986120703,
+STORE, 47507986120704, 47507986124799,
+ERASE, 94386579570688, 94386579570688,
+STORE, 94386579570688, 94386579693567,
+STORE, 94386579693568, 94386579697663,
+ERASE, 140124810997760, 140124810997760,
+STORE, 140124810997760, 140124811001855,
+STORE, 140124811001856, 140124811005951,
+ERASE, 47507984158720, 47507984158720,
+STORE, 94386583982080, 94386584117247,
+STORE, 94386583982080, 94386584256511,
+ERASE, 94386583982080, 94386583982080,
+STORE, 94386583982080, 94386584223743,
+STORE, 94386584223744, 94386584256511,
+ERASE, 94386584223744, 94386584223744,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733763395584, 140737488351231,
+ERASE, 140733763395584, 140733763395584,
+STORE, 140733763395584, 140733763399679,
+STORE, 94011546472448, 94011547152383,
+ERASE, 94011546472448, 94011546472448,
+STORE, 94011546472448, 94011546537983,
+STORE, 94011546537984, 94011547152383,
+ERASE, 94011546537984, 94011546537984,
+STORE, 94011546537984, 94011546886143,
+STORE, 94011546886144, 94011547025407,
+STORE, 94011547025408, 94011547152383,
+STORE, 139757597949952, 139757598121983,
+ERASE, 139757597949952, 139757597949952,
+STORE, 139757597949952, 139757597954047,
+STORE, 139757597954048, 139757598121983,
+ERASE, 139757597954048, 139757597954048,
+STORE, 139757597954048, 139757598076927,
+STORE, 139757598076928, 139757598109695,
+STORE, 139757598109696, 139757598117887,
+STORE, 139757598117888, 139757598121983,
+STORE, 140733763596288, 140733763600383,
+STORE, 140733763584000, 140733763596287,
+STORE, 47875197046784, 47875197054975,
+STORE, 47875197054976, 47875197063167,
+STORE, 47875197063168, 47875198902271,
+STORE, 47875197202432, 47875198902271,
+STORE, 47875197063168, 47875197202431,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198861311,
+STORE, 47875198861312, 47875198902271,
+STORE, 47875198545920, 47875198861311,
+STORE, 47875197202432, 47875198545919,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198545919,
+STORE, 47875198857216, 47875198861311,
+STORE, 47875198545920, 47875198857215,
+ERASE, 47875198545920, 47875198545920,
+STORE, 47875198545920, 47875198857215,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198885888, 47875198885888,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198902272, 47875199012863,
+STORE, 47875198918656, 47875199012863,
+STORE, 47875198902272, 47875198918655,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875199004671,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875198980096, 47875199004671,
+STORE, 47875198918656, 47875198980095,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875198980095,
+STORE, 47875199000576, 47875199004671,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875198980096, 47875198980096,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875199012864, 47875199057919,
+ERASE, 47875199012864, 47875199012864,
+STORE, 47875199012864, 47875199021055,
+STORE, 47875199021056, 47875199057919,
+STORE, 47875199041536, 47875199057919,
+STORE, 47875199021056, 47875199041535,
+ERASE, 47875199021056, 47875199021056,
+STORE, 47875199021056, 47875199041535,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199041536, 47875199049727,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199057919,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199049727,
+STORE, 47875199049728, 47875199057919,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199057920, 47875199406079,
+STORE, 47875199098880, 47875199406079,
+STORE, 47875199057920, 47875199098879,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199381503,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199311872, 47875199381503,
+STORE, 47875199098880, 47875199311871,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199311871,
+STORE, 47875199377408, 47875199381503,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199311872, 47875199311872,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199406080, 47875201667071,
+STORE, 47875199557632, 47875201667071,
+STORE, 47875199406080, 47875199557631,
+ERASE, 47875199557632, 47875199557632,
+STORE, 47875199557632, 47875201650687,
+STORE, 47875201650688, 47875201667071,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201658880, 47875201658880,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201667072, 47875201802239,
+ERASE, 47875201667072, 47875201667072,
+STORE, 47875201667072, 47875201691647,
+STORE, 47875201691648, 47875201802239,
+STORE, 47875201753088, 47875201802239,
+STORE, 47875201691648, 47875201753087,
+ERASE, 47875201691648, 47875201691648,
+STORE, 47875201691648, 47875201753087,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201753088, 47875201777663,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201802239,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201777663,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201785856, 47875201785856,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201785856, 47875201810431,
+STORE, 47875201810432, 47875201974271,
+ERASE, 47875201810432, 47875201810432,
+STORE, 47875201810432, 47875201822719,
+STORE, 47875201822720, 47875201974271,
+STORE, 47875201921024, 47875201974271,
+STORE, 47875201822720, 47875201921023,
+ERASE, 47875201822720, 47875201822720,
+STORE, 47875201822720, 47875201921023,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201921024, 47875201966079,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201974271,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201966079,
+STORE, 47875201966080, 47875201974271,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201974272, 47875202572287,
+STORE, 47875202093056, 47875202572287,
+STORE, 47875201974272, 47875202093055,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202519039,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202408448, 47875202519039,
+STORE, 47875202093056, 47875202408447,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202408447,
+STORE, 47875202514944, 47875202519039,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202408448, 47875202408448,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202572288, 47875205623807,
+STORE, 47875203117056, 47875205623807,
+STORE, 47875202572288, 47875203117055,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875205402623,
+STORE, 47875205402624, 47875205623807,
+STORE, 47875204812800, 47875205402623,
+STORE, 47875203117056, 47875204812799,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875204812799,
+STORE, 47875205398528, 47875205402623,
+STORE, 47875204812800, 47875205398527,
+ERASE, 47875204812800, 47875204812800,
+STORE, 47875204812800, 47875205398527,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205607424, 47875205607424,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205623808, 47875205656575,
+ERASE, 47875205623808, 47875205623808,
+STORE, 47875205623808, 47875205631999,
+STORE, 47875205632000, 47875205656575,
+STORE, 47875205644288, 47875205656575,
+STORE, 47875205632000, 47875205644287,
+ERASE, 47875205632000, 47875205632000,
+STORE, 47875205632000, 47875205644287,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205644288, 47875205648383,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205656575,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205648383,
+STORE, 47875205648384, 47875205656575,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205656576, 47875205693439,
+ERASE, 47875205656576, 47875205656576,
+STORE, 47875205656576, 47875205664767,
+STORE, 47875205664768, 47875205693439,
+STORE, 47875205681152, 47875205693439,
+STORE, 47875205664768, 47875205681151,
+ERASE, 47875205664768, 47875205664768,
+STORE, 47875205664768, 47875205681151,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205681152, 47875205685247,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205693439,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205685247,
+STORE, 47875205685248, 47875205693439,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205693440, 47875206168575,
+ERASE, 47875205693440, 47875205693440,
+STORE, 47875205693440, 47875205701631,
+STORE, 47875205701632, 47875206168575,
+STORE, 47875206037504, 47875206168575,
+STORE, 47875205701632, 47875206037503,
+ERASE, 47875205701632, 47875205701632,
+STORE, 47875205701632, 47875206037503,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206037504, 47875206160383,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206168575,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206160383,
+STORE, 47875206160384, 47875206168575,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206168576, 47875206176767,
+STORE, 47875206176768, 47875206197247,
+ERASE, 47875206176768, 47875206176768,
+STORE, 47875206176768, 47875206180863,
+STORE, 47875206180864, 47875206197247,
+STORE, 47875206184960, 47875206197247,
+STORE, 47875206180864, 47875206184959,
+ERASE, 47875206180864, 47875206180864,
+STORE, 47875206180864, 47875206184959,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206184960, 47875206189055,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206197247,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206189055,
+STORE, 47875206189056, 47875206197247,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206197248, 47875206205439,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198877695,
+STORE, 47875198877696, 47875198885887,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206193151,
+STORE, 47875206193152, 47875206197247,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201781759,
+STORE, 47875201781760, 47875201785855,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206164479,
+STORE, 47875206164480, 47875206168575,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205689343,
+STORE, 47875205689344, 47875205693439,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205652479,
+STORE, 47875205652480, 47875205656575,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205599231,
+STORE, 47875205599232, 47875205607423,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202555903,
+STORE, 47875202555904, 47875202572287,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201970175,
+STORE, 47875201970176, 47875201974271,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201654783,
+STORE, 47875201654784, 47875201658879,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199401983,
+STORE, 47875199401984, 47875199406079,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199053823,
+STORE, 47875199053824, 47875199057919,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199008767,
+STORE, 47875199008768, 47875199012863,
+ERASE, 94011547025408, 94011547025408,
+STORE, 94011547025408, 94011547148287,
+STORE, 94011547148288, 94011547152383,
+ERASE, 139757598109696, 139757598109696,
+STORE, 139757598109696, 139757598113791,
+STORE, 139757598113792, 139757598117887,
+ERASE, 47875197046784, 47875197046784,
+STORE, 94011557584896, 94011557720063,
+STORE, 94011557584896, 94011557855231,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557851135,
+STORE, 94011557851136, 94011557855231,
+ERASE, 94011557851136, 94011557851136,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557847039,
+STORE, 94011557847040, 94011557851135,
+ERASE, 94011557847040, 94011557847040,
+STORE, 94011557584896, 94011557982207,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557978111,
+STORE, 94011557978112, 94011557982207,
+ERASE, 94011557978112, 94011557978112,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557974015,
+STORE, 94011557974016, 94011557978111,
+ERASE, 94011557974016, 94011557974016,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734130360320, 140737488351231,
+ERASE, 140734130360320, 140734130360320,
+STORE, 140734130360320, 140734130364415,
+STORE, 94641232105472, 94641232785407,
+ERASE, 94641232105472, 94641232105472,
+STORE, 94641232105472, 94641232171007,
+STORE, 94641232171008, 94641232785407,
+ERASE, 94641232171008, 94641232171008,
+STORE, 94641232171008, 94641232519167,
+STORE, 94641232519168, 94641232658431,
+STORE, 94641232658432, 94641232785407,
+STORE, 139726599516160, 139726599688191,
+ERASE, 139726599516160, 139726599516160,
+STORE, 139726599516160, 139726599520255,
+STORE, 139726599520256, 139726599688191,
+ERASE, 139726599520256, 139726599520256,
+STORE, 139726599520256, 139726599643135,
+STORE, 139726599643136, 139726599675903,
+STORE, 139726599675904, 139726599684095,
+STORE, 139726599684096, 139726599688191,
+STORE, 140734130446336, 140734130450431,
+STORE, 140734130434048, 140734130446335,
+STORE, 47906195480576, 47906195488767,
+STORE, 47906195488768, 47906195496959,
+STORE, 47906195496960, 47906197336063,
+STORE, 47906195636224, 47906197336063,
+STORE, 47906195496960, 47906195636223,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906197295103,
+STORE, 47906197295104, 47906197336063,
+STORE, 47906196979712, 47906197295103,
+STORE, 47906195636224, 47906196979711,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906196979711,
+STORE, 47906197291008, 47906197295103,
+STORE, 47906196979712, 47906197291007,
+ERASE, 47906196979712, 47906196979712,
+STORE, 47906196979712, 47906197291007,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197319680, 47906197319680,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197336064, 47906197446655,
+STORE, 47906197352448, 47906197446655,
+STORE, 47906197336064, 47906197352447,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197438463,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197413888, 47906197438463,
+STORE, 47906197352448, 47906197413887,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197413887,
+STORE, 47906197434368, 47906197438463,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197413888, 47906197413888,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197446656, 47906197491711,
+ERASE, 47906197446656, 47906197446656,
+STORE, 47906197446656, 47906197454847,
+STORE, 47906197454848, 47906197491711,
+STORE, 47906197475328, 47906197491711,
+STORE, 47906197454848, 47906197475327,
+ERASE, 47906197454848, 47906197454848,
+STORE, 47906197454848, 47906197475327,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197475328, 47906197483519,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197491711,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197483519,
+STORE, 47906197483520, 47906197491711,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197491712, 47906197839871,
+STORE, 47906197532672, 47906197839871,
+STORE, 47906197491712, 47906197532671,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197815295,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197745664, 47906197815295,
+STORE, 47906197532672, 47906197745663,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197745663,
+STORE, 47906197811200, 47906197815295,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197745664, 47906197745664,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197839872, 47906200100863,
+STORE, 47906197991424, 47906200100863,
+STORE, 47906197839872, 47906197991423,
+ERASE, 47906197991424, 47906197991424,
+STORE, 47906197991424, 47906200084479,
+STORE, 47906200084480, 47906200100863,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200092672, 47906200092672,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200100864, 47906200236031,
+ERASE, 47906200100864, 47906200100864,
+STORE, 47906200100864, 47906200125439,
+STORE, 47906200125440, 47906200236031,
+STORE, 47906200186880, 47906200236031,
+STORE, 47906200125440, 47906200186879,
+ERASE, 47906200125440, 47906200125440,
+STORE, 47906200125440, 47906200186879,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200186880, 47906200211455,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200236031,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200211455,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200219648, 47906200219648,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200219648, 47906200244223,
+STORE, 47906200244224, 47906200408063,
+ERASE, 47906200244224, 47906200244224,
+STORE, 47906200244224, 47906200256511,
+STORE, 47906200256512, 47906200408063,
+STORE, 47906200354816, 47906200408063,
+STORE, 47906200256512, 47906200354815,
+ERASE, 47906200256512, 47906200256512,
+STORE, 47906200256512, 47906200354815,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200354816, 47906200399871,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200408063,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200399871,
+STORE, 47906200399872, 47906200408063,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200408064, 47906201006079,
+STORE, 47906200526848, 47906201006079,
+STORE, 47906200408064, 47906200526847,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200952831,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906200842240, 47906200952831,
+STORE, 47906200526848, 47906200842239,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200842239,
+STORE, 47906200948736, 47906200952831,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200842240, 47906200842240,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906201006080, 47906204057599,
+STORE, 47906201550848, 47906204057599,
+STORE, 47906201006080, 47906201550847,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203836415,
+STORE, 47906203836416, 47906204057599,
+STORE, 47906203246592, 47906203836415,
+STORE, 47906201550848, 47906203246591,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203246591,
+STORE, 47906203832320, 47906203836415,
+STORE, 47906203246592, 47906203832319,
+ERASE, 47906203246592, 47906203246592,
+STORE, 47906203246592, 47906203832319,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906204041216, 47906204041216,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906204057600, 47906204090367,
+ERASE, 47906204057600, 47906204057600,
+STORE, 47906204057600, 47906204065791,
+STORE, 47906204065792, 47906204090367,
+STORE, 47906204078080, 47906204090367,
+STORE, 47906204065792, 47906204078079,
+ERASE, 47906204065792, 47906204065792,
+STORE, 47906204065792, 47906204078079,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204078080, 47906204082175,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204090367,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204082175,
+STORE, 47906204082176, 47906204090367,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204090368, 47906204127231,
+ERASE, 47906204090368, 47906204090368,
+STORE, 47906204090368, 47906204098559,
+STORE, 47906204098560, 47906204127231,
+STORE, 47906204114944, 47906204127231,
+STORE, 47906204098560, 47906204114943,
+ERASE, 47906204098560, 47906204098560,
+STORE, 47906204098560, 47906204114943,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204114944, 47906204119039,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204127231,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204119039,
+STORE, 47906204119040, 47906204127231,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204127232, 47906204602367,
+ERASE, 47906204127232, 47906204127232,
+STORE, 47906204127232, 47906204135423,
+STORE, 47906204135424, 47906204602367,
+STORE, 47906204471296, 47906204602367,
+STORE, 47906204135424, 47906204471295,
+ERASE, 47906204135424, 47906204135424,
+STORE, 47906204135424, 47906204471295,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204471296, 47906204594175,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204602367,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204594175,
+STORE, 47906204594176, 47906204602367,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204602368, 47906204610559,
+STORE, 47906204610560, 47906204631039,
+ERASE, 47906204610560, 47906204610560,
+STORE, 47906204610560, 47906204614655,
+STORE, 47906204614656, 47906204631039,
+STORE, 47906204618752, 47906204631039,
+STORE, 47906204614656, 47906204618751,
+ERASE, 47906204614656, 47906204614656,
+STORE, 47906204614656, 47906204618751,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204618752, 47906204622847,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204631039,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204622847,
+STORE, 47906204622848, 47906204631039,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204631040, 47906204639231,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197311487,
+STORE, 47906197311488, 47906197319679,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204626943,
+STORE, 47906204626944, 47906204631039,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200215551,
+STORE, 47906200215552, 47906200219647,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204598271,
+STORE, 47906204598272, 47906204602367,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204123135,
+STORE, 47906204123136, 47906204127231,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204086271,
+STORE, 47906204086272, 47906204090367,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204033023,
+STORE, 47906204033024, 47906204041215,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906200989695,
+STORE, 47906200989696, 47906201006079,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200403967,
+STORE, 47906200403968, 47906200408063,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200088575,
+STORE, 47906200088576, 47906200092671,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197835775,
+STORE, 47906197835776, 47906197839871,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197487615,
+STORE, 47906197487616, 47906197491711,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197442559,
+STORE, 47906197442560, 47906197446655,
+ERASE, 94641232658432, 94641232658432,
+STORE, 94641232658432, 94641232781311,
+STORE, 94641232781312, 94641232785407,
+ERASE, 139726599675904, 139726599675904,
+STORE, 139726599675904, 139726599679999,
+STORE, 139726599680000, 139726599684095,
+ERASE, 47906195480576, 47906195480576,
+STORE, 94641242615808, 94641242750975,
+ };
+ static const unsigned long set11[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140732658499584, 140737488351231,
+ERASE, 140732658499584, 140732658499584,
+STORE, 140732658499584, 140732658503679,
+STORE, 94029856579584, 94029856751615,
+ERASE, 94029856579584, 94029856579584,
+STORE, 94029856579584, 94029856595967,
+STORE, 94029856595968, 94029856751615,
+ERASE, 94029856595968, 94029856595968,
+STORE, 94029856595968, 94029856698367,
+STORE, 94029856698368, 94029856739327,
+STORE, 94029856739328, 94029856751615,
+STORE, 140014592573440, 140014592745471,
+ERASE, 140014592573440, 140014592573440,
+STORE, 140014592573440, 140014592577535,
+STORE, 140014592577536, 140014592745471,
+ERASE, 140014592577536, 140014592577536,
+STORE, 140014592577536, 140014592700415,
+STORE, 140014592700416, 140014592733183,
+STORE, 140014592733184, 140014592741375,
+STORE, 140014592741376, 140014592745471,
+STORE, 140732658565120, 140732658569215,
+STORE, 140732658552832, 140732658565119,
+ };
+
+ static const unsigned long set12[] = { /* contains 12 values. */
+STORE, 140737488347136, 140737488351231,
+STORE, 140732658499584, 140737488351231,
+ERASE, 140732658499584, 140732658499584,
+STORE, 140732658499584, 140732658503679,
+STORE, 94029856579584, 94029856751615,
+ERASE, 94029856579584, 94029856579584,
+STORE, 94029856579584, 94029856595967,
+STORE, 94029856595968, 94029856751615,
+ERASE, 94029856595968, 94029856595968,
+STORE, 94029856595968, 94029856698367,
+STORE, 94029856698368, 94029856739327,
+STORE, 94029856739328, 94029856751615,
+STORE, 140014592573440, 140014592745471,
+ERASE, 140014592573440, 140014592573440,
+STORE, 140014592573440, 140014592577535,
+STORE, 140014592577536, 140014592745471,
+ERASE, 140014592577536, 140014592577536,
+STORE, 140014592577536, 140014592700415,
+STORE, 140014592700416, 140014592733183,
+STORE, 140014592733184, 140014592741375,
+STORE, 140014592741376, 140014592745471,
+STORE, 140732658565120, 140732658569215,
+STORE, 140732658552832, 140732658565119,
+STORE, 140014592741375, 140014592741375, /* contrived */
+STORE, 140014592733184, 140014592741376, /* creates first entry retry. */
+ };
+ static const unsigned long set13[] = {
+STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */
+STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */
+STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */
+STORE, 140373516443648, 140373516587007,/*: ffffa2e7b05fecc0 */
+STORE, 140373516963840, 140373518647295,/*: ffffa2e7bfbdcc00 */
+STORE, 140373518647296, 140373518663679,/*: ffffa2e7bf5d59c0 */
+STORE, 140373518663680, 140373518684159,/*: deleted (257) */
+STORE, 140373518680064, 140373518684159,/*: ffffa2e7b0e1cb40 */
+STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */
+STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */
+STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */
+ };
+ static const unsigned long set14[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731667996672, 140737488351231,
+SNULL, 140731668000767, 140737488351231,
+STORE, 140731667996672, 140731668000767,
+STORE, 140731667865600, 140731668000767,
+STORE, 94077521272832, 94077521313791,
+SNULL, 94077521301503, 94077521313791,
+STORE, 94077521272832, 94077521301503,
+STORE, 94077521301504, 94077521313791,
+ERASE, 94077521301504, 94077521313791,
+STORE, 94077521305600, 94077521313791,
+STORE, 139826134630400, 139826136883199,
+SNULL, 139826134773759, 139826136883199,
+STORE, 139826134630400, 139826134773759,
+STORE, 139826134773760, 139826136883199,
+ERASE, 139826134773760, 139826136883199,
+STORE, 139826136870912, 139826136879103,
+STORE, 139826136879104, 139826136883199,
+STORE, 140731668013056, 140731668017151,
+STORE, 140731668000768, 140731668013055,
+STORE, 139826136862720, 139826136870911,
+STORE, 139826132406272, 139826134630399,
+SNULL, 139826134056959, 139826134630399,
+STORE, 139826132406272, 139826134056959,
+STORE, 139826134056960, 139826134630399,
+SNULL, 139826134056960, 139826134626303,
+STORE, 139826134626304, 139826134630399,
+STORE, 139826134056960, 139826134626303,
+ERASE, 139826134056960, 139826134626303,
+STORE, 139826134056960, 139826134626303,
+ERASE, 139826134626304, 139826134630399,
+STORE, 139826134626304, 139826134630399,
+STORE, 139826136842240, 139826136862719,
+STORE, 139826130022400, 139826132406271,
+SNULL, 139826130022400, 139826130288639,
+STORE, 139826130288640, 139826132406271,
+STORE, 139826130022400, 139826130288639,
+SNULL, 139826132381695, 139826132406271,
+STORE, 139826130288640, 139826132381695,
+STORE, 139826132381696, 139826132406271,
+SNULL, 139826132381696, 139826132402175,
+STORE, 139826132402176, 139826132406271,
+STORE, 139826132381696, 139826132402175,
+ERASE, 139826132381696, 139826132402175,
+STORE, 139826132381696, 139826132402175,
+ERASE, 139826132402176, 139826132406271,
+STORE, 139826132402176, 139826132406271,
+STORE, 139826127806464, 139826130022399,
+SNULL, 139826127806464, 139826127904767,
+STORE, 139826127904768, 139826130022399,
+STORE, 139826127806464, 139826127904767,
+SNULL, 139826129997823, 139826130022399,
+STORE, 139826127904768, 139826129997823,
+STORE, 139826129997824, 139826130022399,
+SNULL, 139826129997824, 139826130006015,
+STORE, 139826130006016, 139826130022399,
+STORE, 139826129997824, 139826130006015,
+ERASE, 139826129997824, 139826130006015,
+STORE, 139826129997824, 139826130006015,
+ERASE, 139826130006016, 139826130022399,
+STORE, 139826130006016, 139826130022399,
+STORE, 139826124009472, 139826127806463,
+SNULL, 139826124009472, 139826125668351,
+STORE, 139826125668352, 139826127806463,
+STORE, 139826124009472, 139826125668351,
+SNULL, 139826127765503, 139826127806463,
+STORE, 139826125668352, 139826127765503,
+STORE, 139826127765504, 139826127806463,
+SNULL, 139826127765504, 139826127790079,
+STORE, 139826127790080, 139826127806463,
+STORE, 139826127765504, 139826127790079,
+ERASE, 139826127765504, 139826127790079,
+STORE, 139826127765504, 139826127790079,
+ERASE, 139826127790080, 139826127806463,
+STORE, 139826127790080, 139826127806463,
+STORE, 139826121748480, 139826124009471,
+SNULL, 139826121748480, 139826121900031,
+STORE, 139826121900032, 139826124009471,
+STORE, 139826121748480, 139826121900031,
+SNULL, 139826123993087, 139826124009471,
+STORE, 139826121900032, 139826123993087,
+STORE, 139826123993088, 139826124009471,
+SNULL, 139826123993088, 139826124001279,
+STORE, 139826124001280, 139826124009471,
+STORE, 139826123993088, 139826124001279,
+ERASE, 139826123993088, 139826124001279,
+STORE, 139826123993088, 139826124001279,
+ERASE, 139826124001280, 139826124009471,
+STORE, 139826124001280, 139826124009471,
+STORE, 139826119626752, 139826121748479,
+SNULL, 139826119626752, 139826119643135,
+STORE, 139826119643136, 139826121748479,
+STORE, 139826119626752, 139826119643135,
+SNULL, 139826121740287, 139826121748479,
+STORE, 139826119643136, 139826121740287,
+STORE, 139826121740288, 139826121748479,
+ERASE, 139826121740288, 139826121748479,
+STORE, 139826121740288, 139826121748479,
+STORE, 139826136834048, 139826136842239,
+STORE, 139826117496832, 139826119626751,
+SNULL, 139826117496832, 139826117525503,
+STORE, 139826117525504, 139826119626751,
+STORE, 139826117496832, 139826117525503,
+SNULL, 139826119618559, 139826119626751,
+STORE, 139826117525504, 139826119618559,
+STORE, 139826119618560, 139826119626751,
+ERASE, 139826119618560, 139826119626751,
+STORE, 139826119618560, 139826119626751,
+STORE, 139826115244032, 139826117496831,
+SNULL, 139826115244032, 139826115395583,
+STORE, 139826115395584, 139826117496831,
+STORE, 139826115244032, 139826115395583,
+SNULL, 139826117488639, 139826117496831,
+STORE, 139826115395584, 139826117488639,
+STORE, 139826117488640, 139826117496831,
+ERASE, 139826117488640, 139826117496831,
+STORE, 139826117488640, 139826117496831,
+STORE, 139826113073152, 139826115244031,
+SNULL, 139826113073152, 139826113142783,
+STORE, 139826113142784, 139826115244031,
+STORE, 139826113073152, 139826113142783,
+SNULL, 139826115235839, 139826115244031,
+STORE, 139826113142784, 139826115235839,
+STORE, 139826115235840, 139826115244031,
+ERASE, 139826115235840, 139826115244031,
+STORE, 139826115235840, 139826115244031,
+STORE, 139826109861888, 139826113073151,
+SNULL, 139826109861888, 139826110939135,
+STORE, 139826110939136, 139826113073151,
+STORE, 139826109861888, 139826110939135,
+SNULL, 139826113036287, 139826113073151,
+STORE, 139826110939136, 139826113036287,
+STORE, 139826113036288, 139826113073151,
+ERASE, 139826113036288, 139826113073151,
+STORE, 139826113036288, 139826113073151,
+STORE, 139826107727872, 139826109861887,
+SNULL, 139826107727872, 139826107756543,
+STORE, 139826107756544, 139826109861887,
+STORE, 139826107727872, 139826107756543,
+SNULL, 139826109853695, 139826109861887,
+STORE, 139826107756544, 139826109853695,
+STORE, 139826109853696, 139826109861887,
+ERASE, 139826109853696, 139826109861887,
+STORE, 139826109853696, 139826109861887,
+STORE, 139826105417728, 139826107727871,
+SNULL, 139826105417728, 139826105622527,
+STORE, 139826105622528, 139826107727871,
+STORE, 139826105417728, 139826105622527,
+SNULL, 139826107719679, 139826107727871,
+STORE, 139826105622528, 139826107719679,
+STORE, 139826107719680, 139826107727871,
+ERASE, 139826107719680, 139826107727871,
+STORE, 139826107719680, 139826107727871,
+STORE, 139826136825856, 139826136842239,
+STORE, 139826103033856, 139826105417727,
+SNULL, 139826103033856, 139826103226367,
+STORE, 139826103226368, 139826105417727,
+STORE, 139826103033856, 139826103226367,
+SNULL, 139826105319423, 139826105417727,
+STORE, 139826103226368, 139826105319423,
+STORE, 139826105319424, 139826105417727,
+ERASE, 139826105319424, 139826105417727,
+STORE, 139826105319424, 139826105417727,
+STORE, 139826100916224, 139826103033855,
+SNULL, 139826100916224, 139826100932607,
+STORE, 139826100932608, 139826103033855,
+STORE, 139826100916224, 139826100932607,
+SNULL, 139826103025663, 139826103033855,
+STORE, 139826100932608, 139826103025663,
+STORE, 139826103025664, 139826103033855,
+ERASE, 139826103025664, 139826103033855,
+STORE, 139826103025664, 139826103033855,
+STORE, 139826098348032, 139826100916223,
+SNULL, 139826098348032, 139826098814975,
+STORE, 139826098814976, 139826100916223,
+STORE, 139826098348032, 139826098814975,
+SNULL, 139826100908031, 139826100916223,
+STORE, 139826098814976, 139826100908031,
+STORE, 139826100908032, 139826100916223,
+ERASE, 139826100908032, 139826100916223,
+STORE, 139826100908032, 139826100916223,
+STORE, 139826096234496, 139826098348031,
+SNULL, 139826096234496, 139826096246783,
+STORE, 139826096246784, 139826098348031,
+STORE, 139826096234496, 139826096246783,
+SNULL, 139826098339839, 139826098348031,
+STORE, 139826096246784, 139826098339839,
+STORE, 139826098339840, 139826098348031,
+ERASE, 139826098339840, 139826098348031,
+STORE, 139826098339840, 139826098348031,
+STORE, 139826094055424, 139826096234495,
+SNULL, 139826094055424, 139826094133247,
+STORE, 139826094133248, 139826096234495,
+STORE, 139826094055424, 139826094133247,
+SNULL, 139826096226303, 139826096234495,
+STORE, 139826094133248, 139826096226303,
+STORE, 139826096226304, 139826096234495,
+ERASE, 139826096226304, 139826096234495,
+STORE, 139826096226304, 139826096234495,
+STORE, 139826136817664, 139826136842239,
+STORE, 139826091937792, 139826094055423,
+SNULL, 139826091937792, 139826091954175,
+STORE, 139826091954176, 139826094055423,
+STORE, 139826091937792, 139826091954175,
+SNULL, 139826094047231, 139826094055423,
+STORE, 139826091954176, 139826094047231,
+STORE, 139826094047232, 139826094055423,
+ERASE, 139826094047232, 139826094055423,
+STORE, 139826094047232, 139826094055423,
+STORE, 139826136809472, 139826136842239,
+SNULL, 139826127781887, 139826127790079,
+STORE, 139826127765504, 139826127781887,
+STORE, 139826127781888, 139826127790079,
+SNULL, 139826094051327, 139826094055423,
+STORE, 139826094047232, 139826094051327,
+STORE, 139826094051328, 139826094055423,
+SNULL, 139826096230399, 139826096234495,
+STORE, 139826096226304, 139826096230399,
+STORE, 139826096230400, 139826096234495,
+SNULL, 139826098343935, 139826098348031,
+STORE, 139826098339840, 139826098343935,
+STORE, 139826098343936, 139826098348031,
+SNULL, 139826130001919, 139826130006015,
+STORE, 139826129997824, 139826130001919,
+STORE, 139826130001920, 139826130006015,
+SNULL, 139826100912127, 139826100916223,
+STORE, 139826100908032, 139826100912127,
+STORE, 139826100912128, 139826100916223,
+SNULL, 139826103029759, 139826103033855,
+STORE, 139826103025664, 139826103029759,
+STORE, 139826103029760, 139826103033855,
+SNULL, 139826105413631, 139826105417727,
+STORE, 139826105319424, 139826105413631,
+STORE, 139826105413632, 139826105417727,
+SNULL, 139826107723775, 139826107727871,
+STORE, 139826107719680, 139826107723775,
+STORE, 139826107723776, 139826107727871,
+SNULL, 139826109857791, 139826109861887,
+STORE, 139826109853696, 139826109857791,
+STORE, 139826109857792, 139826109861887,
+SNULL, 139826113044479, 139826113073151,
+STORE, 139826113036288, 139826113044479,
+STORE, 139826113044480, 139826113073151,
+SNULL, 139826115239935, 139826115244031,
+STORE, 139826115235840, 139826115239935,
+STORE, 139826115239936, 139826115244031,
+SNULL, 139826117492735, 139826117496831,
+STORE, 139826117488640, 139826117492735,
+STORE, 139826117492736, 139826117496831,
+SNULL, 139826119622655, 139826119626751,
+STORE, 139826119618560, 139826119622655,
+STORE, 139826119622656, 139826119626751,
+SNULL, 139826121744383, 139826121748479,
+STORE, 139826121740288, 139826121744383,
+STORE, 139826121744384, 139826121748479,
+SNULL, 139826123997183, 139826124001279,
+STORE, 139826123993088, 139826123997183,
+STORE, 139826123997184, 139826124001279,
+SNULL, 139826132398079, 139826132402175,
+STORE, 139826132381696, 139826132398079,
+STORE, 139826132398080, 139826132402175,
+SNULL, 139826134622207, 139826134626303,
+STORE, 139826134056960, 139826134622207,
+STORE, 139826134622208, 139826134626303,
+SNULL, 94077521309695, 94077521313791,
+STORE, 94077521305600, 94077521309695,
+STORE, 94077521309696, 94077521313791,
+SNULL, 139826136875007, 139826136879103,
+STORE, 139826136870912, 139826136875007,
+STORE, 139826136875008, 139826136879103,
+ERASE, 139826136842240, 139826136862719,
+STORE, 94077554049024, 94077554184191,
+STORE, 139826136543232, 139826136842239,
+STORE, 139826136276992, 139826136842239,
+STORE, 139826136010752, 139826136842239,
+STORE, 139826135744512, 139826136842239,
+SNULL, 139826136543231, 139826136842239,
+STORE, 139826135744512, 139826136543231,
+STORE, 139826136543232, 139826136842239,
+SNULL, 139826136543232, 139826136809471,
+STORE, 139826136809472, 139826136842239,
+STORE, 139826136543232, 139826136809471,
+ };
+ static const unsigned long set15[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722061451264, 140737488351231,
+SNULL, 140722061455359, 140737488351231,
+STORE, 140722061451264, 140722061455359,
+STORE, 140722061320192, 140722061455359,
+STORE, 94728600248320, 94728600289279,
+SNULL, 94728600276991, 94728600289279,
+STORE, 94728600248320, 94728600276991,
+STORE, 94728600276992, 94728600289279,
+ERASE, 94728600276992, 94728600289279,
+STORE, 94728600281088, 94728600289279,
+STORE, 139906806779904, 139906809032703,
+SNULL, 139906806923263, 139906809032703,
+STORE, 139906806779904, 139906806923263,
+STORE, 139906806923264, 139906809032703,
+ERASE, 139906806923264, 139906809032703,
+STORE, 139906809020416, 139906809028607,
+STORE, 139906809028608, 139906809032703,
+STORE, 140722061692928, 140722061697023,
+STORE, 140722061680640, 140722061692927,
+STORE, 139906809012224, 139906809020415,
+STORE, 139906804555776, 139906806779903,
+SNULL, 139906806206463, 139906806779903,
+STORE, 139906804555776, 139906806206463,
+STORE, 139906806206464, 139906806779903,
+SNULL, 139906806206464, 139906806775807,
+STORE, 139906806775808, 139906806779903,
+STORE, 139906806206464, 139906806775807,
+ERASE, 139906806206464, 139906806775807,
+STORE, 139906806206464, 139906806775807,
+ERASE, 139906806775808, 139906806779903,
+STORE, 139906806775808, 139906806779903,
+STORE, 139906808991744, 139906809012223,
+STORE, 139906802171904, 139906804555775,
+SNULL, 139906802171904, 139906802438143,
+STORE, 139906802438144, 139906804555775,
+STORE, 139906802171904, 139906802438143,
+SNULL, 139906804531199, 139906804555775,
+STORE, 139906802438144, 139906804531199,
+STORE, 139906804531200, 139906804555775,
+SNULL, 139906804531200, 139906804551679,
+STORE, 139906804551680, 139906804555775,
+STORE, 139906804531200, 139906804551679,
+ERASE, 139906804531200, 139906804551679,
+STORE, 139906804531200, 139906804551679,
+ERASE, 139906804551680, 139906804555775,
+STORE, 139906804551680, 139906804555775,
+STORE, 139906799955968, 139906802171903,
+SNULL, 139906799955968, 139906800054271,
+STORE, 139906800054272, 139906802171903,
+STORE, 139906799955968, 139906800054271,
+SNULL, 139906802147327, 139906802171903,
+STORE, 139906800054272, 139906802147327,
+STORE, 139906802147328, 139906802171903,
+SNULL, 139906802147328, 139906802155519,
+STORE, 139906802155520, 139906802171903,
+STORE, 139906802147328, 139906802155519,
+ERASE, 139906802147328, 139906802155519,
+STORE, 139906802147328, 139906802155519,
+ERASE, 139906802155520, 139906802171903,
+STORE, 139906802155520, 139906802171903,
+STORE, 139906796158976, 139906799955967,
+SNULL, 139906796158976, 139906797817855,
+STORE, 139906797817856, 139906799955967,
+STORE, 139906796158976, 139906797817855,
+SNULL, 139906799915007, 139906799955967,
+STORE, 139906797817856, 139906799915007,
+STORE, 139906799915008, 139906799955967,
+SNULL, 139906799915008, 139906799939583,
+STORE, 139906799939584, 139906799955967,
+STORE, 139906799915008, 139906799939583,
+ERASE, 139906799915008, 139906799939583,
+STORE, 139906799915008, 139906799939583,
+ERASE, 139906799939584, 139906799955967,
+STORE, 139906799939584, 139906799955967,
+STORE, 139906793897984, 139906796158975,
+SNULL, 139906793897984, 139906794049535,
+STORE, 139906794049536, 139906796158975,
+STORE, 139906793897984, 139906794049535,
+SNULL, 139906796142591, 139906796158975,
+STORE, 139906794049536, 139906796142591,
+STORE, 139906796142592, 139906796158975,
+SNULL, 139906796142592, 139906796150783,
+STORE, 139906796150784, 139906796158975,
+STORE, 139906796142592, 139906796150783,
+ERASE, 139906796142592, 139906796150783,
+STORE, 139906796142592, 139906796150783,
+ERASE, 139906796150784, 139906796158975,
+STORE, 139906796150784, 139906796158975,
+STORE, 139906791776256, 139906793897983,
+SNULL, 139906791776256, 139906791792639,
+STORE, 139906791792640, 139906793897983,
+STORE, 139906791776256, 139906791792639,
+SNULL, 139906793889791, 139906793897983,
+STORE, 139906791792640, 139906793889791,
+STORE, 139906793889792, 139906793897983,
+ERASE, 139906793889792, 139906793897983,
+STORE, 139906793889792, 139906793897983,
+STORE, 139906808983552, 139906808991743,
+STORE, 139906789646336, 139906791776255,
+SNULL, 139906789646336, 139906789675007,
+STORE, 139906789675008, 139906791776255,
+STORE, 139906789646336, 139906789675007,
+SNULL, 139906791768063, 139906791776255,
+STORE, 139906789675008, 139906791768063,
+STORE, 139906791768064, 139906791776255,
+ERASE, 139906791768064, 139906791776255,
+STORE, 139906791768064, 139906791776255,
+STORE, 139906787393536, 139906789646335,
+SNULL, 139906787393536, 139906787545087,
+STORE, 139906787545088, 139906789646335,
+STORE, 139906787393536, 139906787545087,
+SNULL, 139906789638143, 139906789646335,
+STORE, 139906787545088, 139906789638143,
+STORE, 139906789638144, 139906789646335,
+ERASE, 139906789638144, 139906789646335,
+STORE, 139906789638144, 139906789646335,
+STORE, 139906785222656, 139906787393535,
+SNULL, 139906785222656, 139906785292287,
+STORE, 139906785292288, 139906787393535,
+STORE, 139906785222656, 139906785292287,
+SNULL, 139906787385343, 139906787393535,
+STORE, 139906785292288, 139906787385343,
+STORE, 139906787385344, 139906787393535,
+ERASE, 139906787385344, 139906787393535,
+STORE, 139906787385344, 139906787393535,
+STORE, 139906782011392, 139906785222655,
+SNULL, 139906782011392, 139906783088639,
+STORE, 139906783088640, 139906785222655,
+STORE, 139906782011392, 139906783088639,
+SNULL, 139906785185791, 139906785222655,
+STORE, 139906783088640, 139906785185791,
+STORE, 139906785185792, 139906785222655,
+ERASE, 139906785185792, 139906785222655,
+STORE, 139906785185792, 139906785222655,
+STORE, 139906779877376, 139906782011391,
+SNULL, 139906779877376, 139906779906047,
+STORE, 139906779906048, 139906782011391,
+STORE, 139906779877376, 139906779906047,
+SNULL, 139906782003199, 139906782011391,
+STORE, 139906779906048, 139906782003199,
+STORE, 139906782003200, 139906782011391,
+ERASE, 139906782003200, 139906782011391,
+STORE, 139906782003200, 139906782011391,
+STORE, 139906777567232, 139906779877375,
+SNULL, 139906777567232, 139906777772031,
+STORE, 139906777772032, 139906779877375,
+STORE, 139906777567232, 139906777772031,
+SNULL, 139906779869183, 139906779877375,
+STORE, 139906777772032, 139906779869183,
+STORE, 139906779869184, 139906779877375,
+ERASE, 139906779869184, 139906779877375,
+STORE, 139906779869184, 139906779877375,
+STORE, 139906808975360, 139906808991743,
+STORE, 139906775183360, 139906777567231,
+SNULL, 139906775183360, 139906775375871,
+STORE, 139906775375872, 139906777567231,
+STORE, 139906775183360, 139906775375871,
+SNULL, 139906777468927, 139906777567231,
+STORE, 139906775375872, 139906777468927,
+STORE, 139906777468928, 139906777567231,
+ERASE, 139906777468928, 139906777567231,
+STORE, 139906777468928, 139906777567231,
+STORE, 139906773065728, 139906775183359,
+SNULL, 139906773065728, 139906773082111,
+STORE, 139906773082112, 139906775183359,
+STORE, 139906773065728, 139906773082111,
+SNULL, 139906775175167, 139906775183359,
+STORE, 139906773082112, 139906775175167,
+STORE, 139906775175168, 139906775183359,
+ERASE, 139906775175168, 139906775183359,
+STORE, 139906775175168, 139906775183359,
+STORE, 139906770497536, 139906773065727,
+SNULL, 139906770497536, 139906770964479,
+STORE, 139906770964480, 139906773065727,
+STORE, 139906770497536, 139906770964479,
+SNULL, 139906773057535, 139906773065727,
+STORE, 139906770964480, 139906773057535,
+STORE, 139906773057536, 139906773065727,
+ERASE, 139906773057536, 139906773065727,
+STORE, 139906773057536, 139906773065727,
+STORE, 139906768384000, 139906770497535,
+SNULL, 139906768384000, 139906768396287,
+STORE, 139906768396288, 139906770497535,
+STORE, 139906768384000, 139906768396287,
+SNULL, 139906770489343, 139906770497535,
+STORE, 139906768396288, 139906770489343,
+STORE, 139906770489344, 139906770497535,
+ERASE, 139906770489344, 139906770497535,
+STORE, 139906770489344, 139906770497535,
+STORE, 139906766204928, 139906768383999,
+SNULL, 139906766204928, 139906766282751,
+STORE, 139906766282752, 139906768383999,
+STORE, 139906766204928, 139906766282751,
+SNULL, 139906768375807, 139906768383999,
+STORE, 139906766282752, 139906768375807,
+STORE, 139906768375808, 139906768383999,
+ERASE, 139906768375808, 139906768383999,
+STORE, 139906768375808, 139906768383999,
+STORE, 139906808967168, 139906808991743,
+STORE, 139906764087296, 139906766204927,
+SNULL, 139906764087296, 139906764103679,
+STORE, 139906764103680, 139906766204927,
+STORE, 139906764087296, 139906764103679,
+SNULL, 139906766196735, 139906766204927,
+STORE, 139906764103680, 139906766196735,
+STORE, 139906766196736, 139906766204927,
+ERASE, 139906766196736, 139906766204927,
+STORE, 139906766196736, 139906766204927,
+STORE, 139906808958976, 139906808991743,
+SNULL, 139906799931391, 139906799939583,
+STORE, 139906799915008, 139906799931391,
+STORE, 139906799931392, 139906799939583,
+SNULL, 139906766200831, 139906766204927,
+STORE, 139906766196736, 139906766200831,
+STORE, 139906766200832, 139906766204927,
+SNULL, 139906768379903, 139906768383999,
+STORE, 139906768375808, 139906768379903,
+STORE, 139906768379904, 139906768383999,
+SNULL, 139906770493439, 139906770497535,
+STORE, 139906770489344, 139906770493439,
+STORE, 139906770493440, 139906770497535,
+SNULL, 139906802151423, 139906802155519,
+STORE, 139906802147328, 139906802151423,
+STORE, 139906802151424, 139906802155519,
+SNULL, 139906773061631, 139906773065727,
+STORE, 139906773057536, 139906773061631,
+STORE, 139906773061632, 139906773065727,
+SNULL, 139906775179263, 139906775183359,
+STORE, 139906775175168, 139906775179263,
+STORE, 139906775179264, 139906775183359,
+SNULL, 139906777563135, 139906777567231,
+STORE, 139906777468928, 139906777563135,
+STORE, 139906777563136, 139906777567231,
+SNULL, 139906779873279, 139906779877375,
+STORE, 139906779869184, 139906779873279,
+STORE, 139906779873280, 139906779877375,
+SNULL, 139906782007295, 139906782011391,
+STORE, 139906782003200, 139906782007295,
+STORE, 139906782007296, 139906782011391,
+SNULL, 139906785193983, 139906785222655,
+STORE, 139906785185792, 139906785193983,
+STORE, 139906785193984, 139906785222655,
+SNULL, 139906787389439, 139906787393535,
+STORE, 139906787385344, 139906787389439,
+STORE, 139906787389440, 139906787393535,
+SNULL, 139906789642239, 139906789646335,
+STORE, 139906789638144, 139906789642239,
+STORE, 139906789642240, 139906789646335,
+SNULL, 139906791772159, 139906791776255,
+STORE, 139906791768064, 139906791772159,
+STORE, 139906791772160, 139906791776255,
+SNULL, 139906793893887, 139906793897983,
+STORE, 139906793889792, 139906793893887,
+STORE, 139906793893888, 139906793897983,
+SNULL, 139906796146687, 139906796150783,
+STORE, 139906796142592, 139906796146687,
+STORE, 139906796146688, 139906796150783,
+SNULL, 139906804547583, 139906804551679,
+STORE, 139906804531200, 139906804547583,
+STORE, 139906804547584, 139906804551679,
+SNULL, 139906806771711, 139906806775807,
+STORE, 139906806206464, 139906806771711,
+STORE, 139906806771712, 139906806775807,
+SNULL, 94728600285183, 94728600289279,
+STORE, 94728600281088, 94728600285183,
+STORE, 94728600285184, 94728600289279,
+SNULL, 139906809024511, 139906809028607,
+STORE, 139906809020416, 139906809024511,
+STORE, 139906809024512, 139906809028607,
+ERASE, 139906808991744, 139906809012223,
+STORE, 94728620138496, 94728620273663,
+STORE, 139906808692736, 139906808991743,
+STORE, 139906808426496, 139906808991743,
+STORE, 139906808160256, 139906808991743,
+STORE, 139906807894016, 139906808991743,
+SNULL, 139906808692735, 139906808991743,
+STORE, 139906807894016, 139906808692735,
+STORE, 139906808692736, 139906808991743,
+SNULL, 139906808692736, 139906808958975,
+STORE, 139906808958976, 139906808991743,
+STORE, 139906808692736, 139906808958975,
+ };
+
+ static const unsigned long set16[] = {
+STORE, 94174808662016, 94174809321471,
+STORE, 94174811414528, 94174811426815,
+STORE, 94174811426816, 94174811430911,
+STORE, 94174811430912, 94174811443199,
+STORE, 94174841700352, 94174841835519,
+STORE, 140173257838592, 140173259497471,
+STORE, 140173259497472, 140173261594623,
+STORE, 140173261594624, 140173261611007,
+STORE, 140173261611008, 140173261619199,
+STORE, 140173261619200, 140173261635583,
+STORE, 140173261635584, 140173261778943,
+STORE, 140173263863808, 140173263871999,
+STORE, 140173263876096, 140173263880191,
+STORE, 140173263880192, 140173263884287,
+STORE, 140173263884288, 140173263888383,
+STORE, 140729801007104, 140729801142271,
+STORE, 140729801617408, 140729801629695,
+STORE, 140729801629696, 140729801633791,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728166858752, 140737488351231,
+SNULL, 140728166862847, 140737488351231,
+STORE, 140728166858752, 140728166862847,
+STORE, 140728166727680, 140728166862847,
+STORE, 93912949866496, 93912950337535,
+SNULL, 93912950288383, 93912950337535,
+STORE, 93912949866496, 93912950288383,
+STORE, 93912950288384, 93912950337535,
+ERASE, 93912950288384, 93912950337535,
+STORE, 93912950292480, 93912950337535,
+STORE, 139921863385088, 139921865637887,
+SNULL, 139921863528447, 139921865637887,
+STORE, 139921863385088, 139921863528447,
+STORE, 139921863528448, 139921865637887,
+ERASE, 139921863528448, 139921865637887,
+STORE, 139921865625600, 139921865633791,
+STORE, 139921865633792, 139921865637887,
+STORE, 140728167899136, 140728167903231,
+STORE, 140728167886848, 140728167899135,
+STORE, 139921865601024, 139921865625599,
+STORE, 139921865592832, 139921865601023,
+STORE, 139921861251072, 139921863385087,
+SNULL, 139921861251072, 139921861279743,
+STORE, 139921861279744, 139921863385087,
+STORE, 139921861251072, 139921861279743,
+SNULL, 139921863376895, 139921863385087,
+STORE, 139921861279744, 139921863376895,
+STORE, 139921863376896, 139921863385087,
+ERASE, 139921863376896, 139921863385087,
+STORE, 139921863376896, 139921863385087,
+STORE, 139921858867200, 139921861251071,
+SNULL, 139921858867200, 139921859133439,
+STORE, 139921859133440, 139921861251071,
+STORE, 139921858867200, 139921859133439,
+SNULL, 139921861226495, 139921861251071,
+STORE, 139921859133440, 139921861226495,
+STORE, 139921861226496, 139921861251071,
+SNULL, 139921861226496, 139921861246975,
+STORE, 139921861246976, 139921861251071,
+STORE, 139921861226496, 139921861246975,
+ERASE, 139921861226496, 139921861246975,
+STORE, 139921861226496, 139921861246975,
+ERASE, 139921861246976, 139921861251071,
+STORE, 139921861246976, 139921861251071,
+STORE, 139921856675840, 139921858867199,
+SNULL, 139921856675840, 139921856765951,
+STORE, 139921856765952, 139921858867199,
+STORE, 139921856675840, 139921856765951,
+SNULL, 139921858859007, 139921858867199,
+STORE, 139921856765952, 139921858859007,
+STORE, 139921858859008, 139921858867199,
+ERASE, 139921858859008, 139921858867199,
+STORE, 139921858859008, 139921858867199,
+STORE, 139921854414848, 139921856675839,
+SNULL, 139921854414848, 139921854566399,
+STORE, 139921854566400, 139921856675839,
+STORE, 139921854414848, 139921854566399,
+SNULL, 139921856659455, 139921856675839,
+STORE, 139921854566400, 139921856659455,
+STORE, 139921856659456, 139921856675839,
+SNULL, 139921856659456, 139921856667647,
+STORE, 139921856667648, 139921856675839,
+STORE, 139921856659456, 139921856667647,
+ERASE, 139921856659456, 139921856667647,
+STORE, 139921856659456, 139921856667647,
+ERASE, 139921856667648, 139921856675839,
+STORE, 139921856667648, 139921856675839,
+STORE, 139921852284928, 139921854414847,
+SNULL, 139921852284928, 139921852313599,
+STORE, 139921852313600, 139921854414847,
+STORE, 139921852284928, 139921852313599,
+SNULL, 139921854406655, 139921854414847,
+STORE, 139921852313600, 139921854406655,
+STORE, 139921854406656, 139921854414847,
+ERASE, 139921854406656, 139921854414847,
+STORE, 139921854406656, 139921854414847,
+STORE, 139921850068992, 139921852284927,
+SNULL, 139921850068992, 139921850167295,
+STORE, 139921850167296, 139921852284927,
+STORE, 139921850068992, 139921850167295,
+SNULL, 139921852260351, 139921852284927,
+STORE, 139921850167296, 139921852260351,
+STORE, 139921852260352, 139921852284927,
+SNULL, 139921852260352, 139921852268543,
+STORE, 139921852268544, 139921852284927,
+STORE, 139921852260352, 139921852268543,
+ERASE, 139921852260352, 139921852268543,
+STORE, 139921852260352, 139921852268543,
+ERASE, 139921852268544, 139921852284927,
+STORE, 139921852268544, 139921852284927,
+STORE, 139921865584640, 139921865601023,
+STORE, 139921846272000, 139921850068991,
+SNULL, 139921846272000, 139921847930879,
+STORE, 139921847930880, 139921850068991,
+STORE, 139921846272000, 139921847930879,
+SNULL, 139921850028031, 139921850068991,
+STORE, 139921847930880, 139921850028031,
+STORE, 139921850028032, 139921850068991,
+SNULL, 139921850028032, 139921850052607,
+STORE, 139921850052608, 139921850068991,
+STORE, 139921850028032, 139921850052607,
+ERASE, 139921850028032, 139921850052607,
+STORE, 139921850028032, 139921850052607,
+ERASE, 139921850052608, 139921850068991,
+STORE, 139921850052608, 139921850068991,
+STORE, 139921844154368, 139921846271999,
+SNULL, 139921844154368, 139921844170751,
+STORE, 139921844170752, 139921846271999,
+STORE, 139921844154368, 139921844170751,
+SNULL, 139921846263807, 139921846271999,
+STORE, 139921844170752, 139921846263807,
+STORE, 139921846263808, 139921846271999,
+ERASE, 139921846263808, 139921846271999,
+STORE, 139921846263808, 139921846271999,
+STORE, 139921842036736, 139921844154367,
+SNULL, 139921842036736, 139921842053119,
+STORE, 139921842053120, 139921844154367,
+STORE, 139921842036736, 139921842053119,
+SNULL, 139921844146175, 139921844154367,
+STORE, 139921842053120, 139921844146175,
+STORE, 139921844146176, 139921844154367,
+ERASE, 139921844146176, 139921844154367,
+STORE, 139921844146176, 139921844154367,
+STORE, 139921839468544, 139921842036735,
+SNULL, 139921839468544, 139921839935487,
+STORE, 139921839935488, 139921842036735,
+STORE, 139921839468544, 139921839935487,
+SNULL, 139921842028543, 139921842036735,
+STORE, 139921839935488, 139921842028543,
+STORE, 139921842028544, 139921842036735,
+ERASE, 139921842028544, 139921842036735,
+STORE, 139921842028544, 139921842036735,
+STORE, 139921837355008, 139921839468543,
+SNULL, 139921837355008, 139921837367295,
+STORE, 139921837367296, 139921839468543,
+STORE, 139921837355008, 139921837367295,
+SNULL, 139921839460351, 139921839468543,
+STORE, 139921837367296, 139921839460351,
+STORE, 139921839460352, 139921839468543,
+ERASE, 139921839460352, 139921839468543,
+STORE, 139921839460352, 139921839468543,
+STORE, 139921865576448, 139921865601023,
+STORE, 139921865564160, 139921865601023,
+SNULL, 139921850044415, 139921850052607,
+STORE, 139921850028032, 139921850044415,
+STORE, 139921850044416, 139921850052607,
+SNULL, 139921839464447, 139921839468543,
+STORE, 139921839460352, 139921839464447,
+STORE, 139921839464448, 139921839468543,
+SNULL, 139921852264447, 139921852268543,
+STORE, 139921852260352, 139921852264447,
+STORE, 139921852264448, 139921852268543,
+SNULL, 139921842032639, 139921842036735,
+STORE, 139921842028544, 139921842032639,
+STORE, 139921842032640, 139921842036735,
+SNULL, 139921844150271, 139921844154367,
+STORE, 139921844146176, 139921844150271,
+STORE, 139921844150272, 139921844154367,
+SNULL, 139921846267903, 139921846271999,
+STORE, 139921846263808, 139921846267903,
+STORE, 139921846267904, 139921846271999,
+SNULL, 139921854410751, 139921854414847,
+STORE, 139921854406656, 139921854410751,
+STORE, 139921854410752, 139921854414847,
+SNULL, 139921856663551, 139921856667647,
+STORE, 139921856659456, 139921856663551,
+STORE, 139921856663552, 139921856667647,
+SNULL, 139921858863103, 139921858867199,
+STORE, 139921858859008, 139921858863103,
+STORE, 139921858863104, 139921858867199,
+SNULL, 139921861242879, 139921861246975,
+STORE, 139921861226496, 139921861242879,
+STORE, 139921861242880, 139921861246975,
+SNULL, 139921863380991, 139921863385087,
+STORE, 139921863376896, 139921863380991,
+STORE, 139921863380992, 139921863385087,
+SNULL, 93912950333439, 93912950337535,
+STORE, 93912950292480, 93912950333439,
+STORE, 93912950333440, 93912950337535,
+SNULL, 139921865629695, 139921865633791,
+STORE, 139921865625600, 139921865629695,
+STORE, 139921865629696, 139921865633791,
+ERASE, 139921865601024, 139921865625599,
+STORE, 93912968110080, 93912968245247,
+STORE, 139921828913152, 139921837355007,
+STORE, 139921865621504, 139921865625599,
+STORE, 139921865617408, 139921865621503,
+STORE, 139921865613312, 139921865617407,
+STORE, 139921865547776, 139921865564159,
+ };
+
+ static const unsigned long set17[] = {
+STORE, 94397057224704, 94397057646591,
+STORE, 94397057650688, 94397057691647,
+STORE, 94397057691648, 94397057695743,
+STORE, 94397075271680, 94397075406847,
+STORE, 139953169051648, 139953169063935,
+STORE, 139953169063936, 139953171156991,
+STORE, 139953171156992, 139953171161087,
+STORE, 139953171161088, 139953171165183,
+STORE, 139953171165184, 139953171632127,
+STORE, 139953171632128, 139953173725183,
+STORE, 139953173725184, 139953173729279,
+STORE, 139953173729280, 139953173733375,
+STORE, 139953173733376, 139953173749759,
+STORE, 139953173749760, 139953175842815,
+STORE, 139953175842816, 139953175846911,
+STORE, 139953175846912, 139953175851007,
+STORE, 139953175851008, 139953175867391,
+STORE, 139953175867392, 139953177960447,
+STORE, 139953177960448, 139953177964543,
+STORE, 139953177964544, 139953177968639,
+STORE, 139953177968640, 139953179627519,
+STORE, 139953179627520, 139953181724671,
+STORE, 139953181724672, 139953181741055,
+STORE, 139953181741056, 139953181749247,
+STORE, 139953181749248, 139953181765631,
+STORE, 139953181765632, 139953181863935,
+STORE, 139953181863936, 139953183956991,
+STORE, 139953183956992, 139953183961087,
+STORE, 139953183961088, 139953183965183,
+STORE, 139953183965184, 139953183981567,
+STORE, 139953183981568, 139953184010239,
+STORE, 139953184010240, 139953186103295,
+STORE, 139953186103296, 139953186107391,
+STORE, 139953186107392, 139953186111487,
+STORE, 139953186111488, 139953186263039,
+STORE, 139953186263040, 139953188356095,
+STORE, 139953188356096, 139953188360191,
+STORE, 139953188360192, 139953188364287,
+STORE, 139953188364288, 139953188372479,
+STORE, 139953188372480, 139953188462591,
+STORE, 139953188462592, 139953190555647,
+STORE, 139953190555648, 139953190559743,
+STORE, 139953190559744, 139953190563839,
+STORE, 139953190563840, 139953190830079,
+STORE, 139953190830080, 139953192923135,
+STORE, 139953192923136, 139953192939519,
+STORE, 139953192939520, 139953192943615,
+STORE, 139953192943616, 139953192947711,
+STORE, 139953192947712, 139953192976383,
+STORE, 139953192976384, 139953195073535,
+STORE, 139953195073536, 139953195077631,
+STORE, 139953195077632, 139953195081727,
+STORE, 139953195081728, 139953195225087,
+STORE, 139953197281280, 139953197318143,
+STORE, 139953197322240, 139953197326335,
+STORE, 139953197326336, 139953197330431,
+STORE, 139953197330432, 139953197334527,
+STORE, 140720477511680, 140720477646847,
+STORE, 140720478302208, 140720478314495,
+STORE, 140720478314496, 140720478318591,
+ };
+ static const unsigned long set18[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140724953673728, 140737488351231,
+SNULL, 140724953677823, 140737488351231,
+STORE, 140724953673728, 140724953677823,
+STORE, 140724953542656, 140724953677823,
+STORE, 94675199266816, 94675199311871,
+SNULL, 94675199303679, 94675199311871,
+STORE, 94675199266816, 94675199303679,
+STORE, 94675199303680, 94675199311871,
+ERASE, 94675199303680, 94675199311871,
+STORE, 94675199303680, 94675199311871,
+STORE, 140222970605568, 140222972858367,
+SNULL, 140222970748927, 140222972858367,
+STORE, 140222970605568, 140222970748927,
+STORE, 140222970748928, 140222972858367,
+ERASE, 140222970748928, 140222972858367,
+STORE, 140222972846080, 140222972854271,
+STORE, 140222972854272, 140222972858367,
+STORE, 140724954365952, 140724954370047,
+STORE, 140724954353664, 140724954365951,
+STORE, 140222972841984, 140222972846079,
+STORE, 140222972833792, 140222972841983,
+STORE, 140222968475648, 140222970605567,
+SNULL, 140222968475648, 140222968504319,
+STORE, 140222968504320, 140222970605567,
+STORE, 140222968475648, 140222968504319,
+SNULL, 140222970597375, 140222970605567,
+STORE, 140222968504320, 140222970597375,
+STORE, 140222970597376, 140222970605567,
+ERASE, 140222970597376, 140222970605567,
+STORE, 140222970597376, 140222970605567,
+ };
+ static const unsigned long set19[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140725182459904, 140737488351231,
+SNULL, 140725182463999, 140737488351231,
+STORE, 140725182459904, 140725182463999,
+STORE, 140725182328832, 140725182463999,
+STORE, 94730166636544, 94730166763519,
+SNULL, 94730166747135, 94730166763519,
+STORE, 94730166636544, 94730166747135,
+STORE, 94730166747136, 94730166763519,
+ERASE, 94730166747136, 94730166763519,
+STORE, 94730166751232, 94730166763519,
+STORE, 140656834555904, 140656836808703,
+SNULL, 140656834699263, 140656836808703,
+STORE, 140656834555904, 140656834699263,
+STORE, 140656834699264, 140656836808703,
+ERASE, 140656834699264, 140656836808703,
+STORE, 140656836796416, 140656836804607,
+STORE, 140656836804608, 140656836808703,
+STORE, 140725183389696, 140725183393791,
+STORE, 140725183377408, 140725183389695,
+STORE, 140656836788224, 140656836796415,
+STORE, 140656832331776, 140656834555903,
+SNULL, 140656833982463, 140656834555903,
+STORE, 140656832331776, 140656833982463,
+STORE, 140656833982464, 140656834555903,
+SNULL, 140656833982464, 140656834551807,
+STORE, 140656834551808, 140656834555903,
+STORE, 140656833982464, 140656834551807,
+ERASE, 140656833982464, 140656834551807,
+STORE, 140656833982464, 140656834551807,
+ERASE, 140656834551808, 140656834555903,
+STORE, 140656834551808, 140656834555903,
+STORE, 140656836763648, 140656836788223,
+STORE, 140656830070784, 140656832331775,
+SNULL, 140656830070784, 140656830222335,
+STORE, 140656830222336, 140656832331775,
+STORE, 140656830070784, 140656830222335,
+SNULL, 140656832315391, 140656832331775,
+STORE, 140656830222336, 140656832315391,
+STORE, 140656832315392, 140656832331775,
+SNULL, 140656832315392, 140656832323583,
+STORE, 140656832323584, 140656832331775,
+STORE, 140656832315392, 140656832323583,
+ERASE, 140656832315392, 140656832323583,
+STORE, 140656832315392, 140656832323583,
+ERASE, 140656832323584, 140656832331775,
+STORE, 140656832323584, 140656832331775,
+STORE, 140656827940864, 140656830070783,
+SNULL, 140656827940864, 140656827969535,
+STORE, 140656827969536, 140656830070783,
+STORE, 140656827940864, 140656827969535,
+SNULL, 140656830062591, 140656830070783,
+STORE, 140656827969536, 140656830062591,
+STORE, 140656830062592, 140656830070783,
+ERASE, 140656830062592, 140656830070783,
+STORE, 140656830062592, 140656830070783,
+STORE, 140656825724928, 140656827940863,
+SNULL, 140656825724928, 140656825823231,
+STORE, 140656825823232, 140656827940863,
+STORE, 140656825724928, 140656825823231,
+SNULL, 140656827916287, 140656827940863,
+STORE, 140656825823232, 140656827916287,
+STORE, 140656827916288, 140656827940863,
+SNULL, 140656827916288, 140656827924479,
+STORE, 140656827924480, 140656827940863,
+STORE, 140656827916288, 140656827924479,
+ERASE, 140656827916288, 140656827924479,
+STORE, 140656827916288, 140656827924479,
+ERASE, 140656827924480, 140656827940863,
+STORE, 140656827924480, 140656827940863,
+STORE, 140656821927936, 140656825724927,
+SNULL, 140656821927936, 140656823586815,
+STORE, 140656823586816, 140656825724927,
+STORE, 140656821927936, 140656823586815,
+SNULL, 140656825683967, 140656825724927,
+STORE, 140656823586816, 140656825683967,
+STORE, 140656825683968, 140656825724927,
+SNULL, 140656825683968, 140656825708543,
+STORE, 140656825708544, 140656825724927,
+STORE, 140656825683968, 140656825708543,
+ERASE, 140656825683968, 140656825708543,
+STORE, 140656825683968, 140656825708543,
+ERASE, 140656825708544, 140656825724927,
+STORE, 140656825708544, 140656825724927,
+STORE, 140656819806208, 140656821927935,
+SNULL, 140656819806208, 140656819822591,
+STORE, 140656819822592, 140656821927935,
+STORE, 140656819806208, 140656819822591,
+SNULL, 140656821919743, 140656821927935,
+STORE, 140656819822592, 140656821919743,
+STORE, 140656821919744, 140656821927935,
+ERASE, 140656821919744, 140656821927935,
+STORE, 140656821919744, 140656821927935,
+STORE, 140656836755456, 140656836763647,
+STORE, 140656817553408, 140656819806207,
+SNULL, 140656817553408, 140656817704959,
+STORE, 140656817704960, 140656819806207,
+STORE, 140656817553408, 140656817704959,
+SNULL, 140656819798015, 140656819806207,
+STORE, 140656817704960, 140656819798015,
+STORE, 140656819798016, 140656819806207,
+ERASE, 140656819798016, 140656819806207,
+STORE, 140656819798016, 140656819806207,
+STORE, 140656815382528, 140656817553407,
+SNULL, 140656815382528, 140656815452159,
+STORE, 140656815452160, 140656817553407,
+STORE, 140656815382528, 140656815452159,
+SNULL, 140656817545215, 140656817553407,
+STORE, 140656815452160, 140656817545215,
+STORE, 140656817545216, 140656817553407,
+ERASE, 140656817545216, 140656817553407,
+STORE, 140656817545216, 140656817553407,
+STORE, 140656812171264, 140656815382527,
+SNULL, 140656812171264, 140656813248511,
+STORE, 140656813248512, 140656815382527,
+STORE, 140656812171264, 140656813248511,
+SNULL, 140656815345663, 140656815382527,
+STORE, 140656813248512, 140656815345663,
+STORE, 140656815345664, 140656815382527,
+ERASE, 140656815345664, 140656815382527,
+STORE, 140656815345664, 140656815382527,
+STORE, 140656810037248, 140656812171263,
+SNULL, 140656810037248, 140656810065919,
+STORE, 140656810065920, 140656812171263,
+STORE, 140656810037248, 140656810065919,
+SNULL, 140656812163071, 140656812171263,
+STORE, 140656810065920, 140656812163071,
+STORE, 140656812163072, 140656812171263,
+ERASE, 140656812163072, 140656812171263,
+STORE, 140656812163072, 140656812171263,
+STORE, 140656807727104, 140656810037247,
+SNULL, 140656807727104, 140656807931903,
+STORE, 140656807931904, 140656810037247,
+STORE, 140656807727104, 140656807931903,
+SNULL, 140656810029055, 140656810037247,
+STORE, 140656807931904, 140656810029055,
+STORE, 140656810029056, 140656810037247,
+ERASE, 140656810029056, 140656810037247,
+STORE, 140656810029056, 140656810037247,
+STORE, 140656805343232, 140656807727103,
+SNULL, 140656805343232, 140656805535743,
+STORE, 140656805535744, 140656807727103,
+STORE, 140656805343232, 140656805535743,
+SNULL, 140656807628799, 140656807727103,
+STORE, 140656805535744, 140656807628799,
+STORE, 140656807628800, 140656807727103,
+ERASE, 140656807628800, 140656807727103,
+STORE, 140656807628800, 140656807727103,
+STORE, 140656836747264, 140656836763647,
+STORE, 140656802775040, 140656805343231,
+SNULL, 140656802775040, 140656803241983,
+STORE, 140656803241984, 140656805343231,
+STORE, 140656802775040, 140656803241983,
+SNULL, 140656805335039, 140656805343231,
+STORE, 140656803241984, 140656805335039,
+STORE, 140656805335040, 140656805343231,
+ERASE, 140656805335040, 140656805343231,
+STORE, 140656805335040, 140656805343231,
+STORE, 140656800661504, 140656802775039,
+SNULL, 140656800661504, 140656800673791,
+STORE, 140656800673792, 140656802775039,
+STORE, 140656800661504, 140656800673791,
+SNULL, 140656802766847, 140656802775039,
+STORE, 140656800673792, 140656802766847,
+STORE, 140656802766848, 140656802775039,
+ERASE, 140656802766848, 140656802775039,
+STORE, 140656802766848, 140656802775039,
+STORE, 140656798482432, 140656800661503,
+SNULL, 140656798482432, 140656798560255,
+STORE, 140656798560256, 140656800661503,
+STORE, 140656798482432, 140656798560255,
+SNULL, 140656800653311, 140656800661503,
+STORE, 140656798560256, 140656800653311,
+STORE, 140656800653312, 140656800661503,
+ERASE, 140656800653312, 140656800661503,
+STORE, 140656800653312, 140656800661503,
+STORE, 140656796364800, 140656798482431,
+SNULL, 140656796364800, 140656796381183,
+STORE, 140656796381184, 140656798482431,
+STORE, 140656796364800, 140656796381183,
+SNULL, 140656798474239, 140656798482431,
+STORE, 140656796381184, 140656798474239,
+STORE, 140656798474240, 140656798482431,
+ERASE, 140656798474240, 140656798482431,
+STORE, 140656798474240, 140656798482431,
+STORE, 140656836739072, 140656836763647,
+STORE, 140656836726784, 140656836763647,
+SNULL, 140656825700351, 140656825708543,
+STORE, 140656825683968, 140656825700351,
+STORE, 140656825700352, 140656825708543,
+SNULL, 140656798478335, 140656798482431,
+STORE, 140656798474240, 140656798478335,
+STORE, 140656798478336, 140656798482431,
+SNULL, 140656800657407, 140656800661503,
+STORE, 140656800653312, 140656800657407,
+STORE, 140656800657408, 140656800661503,
+SNULL, 140656802770943, 140656802775039,
+STORE, 140656802766848, 140656802770943,
+STORE, 140656802770944, 140656802775039,
+SNULL, 140656827920383, 140656827924479,
+STORE, 140656827916288, 140656827920383,
+STORE, 140656827920384, 140656827924479,
+SNULL, 140656805339135, 140656805343231,
+STORE, 140656805335040, 140656805339135,
+STORE, 140656805339136, 140656805343231,
+SNULL, 140656807723007, 140656807727103,
+STORE, 140656807628800, 140656807723007,
+STORE, 140656807723008, 140656807727103,
+SNULL, 140656810033151, 140656810037247,
+STORE, 140656810029056, 140656810033151,
+STORE, 140656810033152, 140656810037247,
+SNULL, 140656812167167, 140656812171263,
+STORE, 140656812163072, 140656812167167,
+STORE, 140656812167168, 140656812171263,
+SNULL, 140656815353855, 140656815382527,
+STORE, 140656815345664, 140656815353855,
+STORE, 140656815353856, 140656815382527,
+SNULL, 140656817549311, 140656817553407,
+STORE, 140656817545216, 140656817549311,
+STORE, 140656817549312, 140656817553407,
+SNULL, 140656819802111, 140656819806207,
+STORE, 140656819798016, 140656819802111,
+STORE, 140656819802112, 140656819806207,
+SNULL, 140656821923839, 140656821927935,
+STORE, 140656821919744, 140656821923839,
+STORE, 140656821923840, 140656821927935,
+SNULL, 140656830066687, 140656830070783,
+STORE, 140656830062592, 140656830066687,
+STORE, 140656830066688, 140656830070783,
+SNULL, 140656832319487, 140656832323583,
+STORE, 140656832315392, 140656832319487,
+STORE, 140656832319488, 140656832323583,
+SNULL, 140656834547711, 140656834551807,
+STORE, 140656833982464, 140656834547711,
+STORE, 140656834547712, 140656834551807,
+SNULL, 94730166759423, 94730166763519,
+STORE, 94730166751232, 94730166759423,
+STORE, 94730166759424, 94730166763519,
+SNULL, 140656836800511, 140656836804607,
+STORE, 140656836796416, 140656836800511,
+STORE, 140656836800512, 140656836804607,
+ERASE, 140656836763648, 140656836788223,
+STORE, 94730171318272, 94730171453439,
+STORE, 140656836784128, 140656836788223,
+STORE, 140656836780032, 140656836784127,
+STORE, 140656791920640, 140656796364799,
+STORE, 140656836775936, 140656836780031,
+STORE, 140656787476480, 140656791920639,
+STORE, 140656779083776, 140656787476479,
+SNULL, 140656779087871, 140656787476479,
+STORE, 140656779083776, 140656779087871,
+STORE, 140656779087872, 140656787476479,
+STORE, 140656836771840, 140656836775935,
+STORE, 140656774639616, 140656779083775,
+STORE, 140656766246912, 140656774639615,
+SNULL, 140656766251007, 140656774639615,
+STORE, 140656766246912, 140656766251007,
+STORE, 140656766251008, 140656774639615,
+ERASE, 140656791920640, 140656796364799,
+ERASE, 140656836780032, 140656836784127,
+ERASE, 140656787476480, 140656791920639,
+ERASE, 140656836775936, 140656836780031,
+STORE, 140656836780032, 140656836784127,
+STORE, 140656791920640, 140656796364799,
+STORE, 140656836775936, 140656836780031,
+STORE, 140656787476480, 140656791920639,
+ERASE, 140656774639616, 140656779083775,
+ };
+ static const unsigned long set20[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735952392192, 140737488351231,
+SNULL, 140735952396287, 140737488351231,
+STORE, 140735952392192, 140735952396287,
+STORE, 140735952261120, 140735952396287,
+STORE, 94849008947200, 94849009414143,
+SNULL, 94849009364991, 94849009414143,
+STORE, 94849008947200, 94849009364991,
+STORE, 94849009364992, 94849009414143,
+ERASE, 94849009364992, 94849009414143,
+STORE, 94849009364992, 94849009414143,
+STORE, 140590397943808, 140590400196607,
+SNULL, 140590398087167, 140590400196607,
+STORE, 140590397943808, 140590398087167,
+STORE, 140590398087168, 140590400196607,
+ERASE, 140590398087168, 140590400196607,
+STORE, 140590400184320, 140590400192511,
+STORE, 140590400192512, 140590400196607,
+STORE, 140735952850944, 140735952855039,
+STORE, 140735952838656, 140735952850943,
+STORE, 140590400180224, 140590400184319,
+STORE, 140590400172032, 140590400180223,
+STORE, 140590395809792, 140590397943807,
+SNULL, 140590395809792, 140590395838463,
+STORE, 140590395838464, 140590397943807,
+STORE, 140590395809792, 140590395838463,
+SNULL, 140590397935615, 140590397943807,
+STORE, 140590395838464, 140590397935615,
+STORE, 140590397935616, 140590397943807,
+ERASE, 140590397935616, 140590397943807,
+STORE, 140590397935616, 140590397943807,
+STORE, 140590393425920, 140590395809791,
+SNULL, 140590393425920, 140590393692159,
+STORE, 140590393692160, 140590395809791,
+STORE, 140590393425920, 140590393692159,
+SNULL, 140590395785215, 140590395809791,
+STORE, 140590393692160, 140590395785215,
+STORE, 140590395785216, 140590395809791,
+SNULL, 140590395785216, 140590395805695,
+STORE, 140590395805696, 140590395809791,
+STORE, 140590395785216, 140590395805695,
+ERASE, 140590395785216, 140590395805695,
+STORE, 140590395785216, 140590395805695,
+ERASE, 140590395805696, 140590395809791,
+STORE, 140590395805696, 140590395809791,
+STORE, 140590391234560, 140590393425919,
+SNULL, 140590391234560, 140590391324671,
+STORE, 140590391324672, 140590393425919,
+STORE, 140590391234560, 140590391324671,
+SNULL, 140590393417727, 140590393425919,
+STORE, 140590391324672, 140590393417727,
+STORE, 140590393417728, 140590393425919,
+ERASE, 140590393417728, 140590393425919,
+STORE, 140590393417728, 140590393425919,
+STORE, 140590388973568, 140590391234559,
+SNULL, 140590388973568, 140590389125119,
+STORE, 140590389125120, 140590391234559,
+STORE, 140590388973568, 140590389125119,
+SNULL, 140590391218175, 140590391234559,
+STORE, 140590389125120, 140590391218175,
+STORE, 140590391218176, 140590391234559,
+SNULL, 140590391218176, 140590391226367,
+STORE, 140590391226368, 140590391234559,
+STORE, 140590391218176, 140590391226367,
+ERASE, 140590391218176, 140590391226367,
+STORE, 140590391218176, 140590391226367,
+ERASE, 140590391226368, 140590391234559,
+STORE, 140590391226368, 140590391234559,
+STORE, 140590386843648, 140590388973567,
+SNULL, 140590386843648, 140590386872319,
+STORE, 140590386872320, 140590388973567,
+STORE, 140590386843648, 140590386872319,
+SNULL, 140590388965375, 140590388973567,
+STORE, 140590386872320, 140590388965375,
+STORE, 140590388965376, 140590388973567,
+ERASE, 140590388965376, 140590388973567,
+STORE, 140590388965376, 140590388973567,
+STORE, 140590384627712, 140590386843647,
+SNULL, 140590384627712, 140590384726015,
+STORE, 140590384726016, 140590386843647,
+STORE, 140590384627712, 140590384726015,
+SNULL, 140590386819071, 140590386843647,
+STORE, 140590384726016, 140590386819071,
+STORE, 140590386819072, 140590386843647,
+SNULL, 140590386819072, 140590386827263,
+STORE, 140590386827264, 140590386843647,
+STORE, 140590386819072, 140590386827263,
+ERASE, 140590386819072, 140590386827263,
+STORE, 140590386819072, 140590386827263,
+ERASE, 140590386827264, 140590386843647,
+STORE, 140590386827264, 140590386843647,
+STORE, 140590400163840, 140590400180223,
+STORE, 140590380830720, 140590384627711,
+SNULL, 140590380830720, 140590382489599,
+STORE, 140590382489600, 140590384627711,
+STORE, 140590380830720, 140590382489599,
+SNULL, 140590384586751, 140590384627711,
+STORE, 140590382489600, 140590384586751,
+STORE, 140590384586752, 140590384627711,
+SNULL, 140590384586752, 140590384611327,
+STORE, 140590384611328, 140590384627711,
+STORE, 140590384586752, 140590384611327,
+ERASE, 140590384586752, 140590384611327,
+STORE, 140590384586752, 140590384611327,
+ERASE, 140590384611328, 140590384627711,
+STORE, 140590384611328, 140590384627711,
+STORE, 140590378713088, 140590380830719,
+SNULL, 140590378713088, 140590378729471,
+STORE, 140590378729472, 140590380830719,
+STORE, 140590378713088, 140590378729471,
+SNULL, 140590380822527, 140590380830719,
+STORE, 140590378729472, 140590380822527,
+STORE, 140590380822528, 140590380830719,
+ERASE, 140590380822528, 140590380830719,
+STORE, 140590380822528, 140590380830719,
+STORE, 140590376595456, 140590378713087,
+SNULL, 140590376595456, 140590376611839,
+STORE, 140590376611840, 140590378713087,
+STORE, 140590376595456, 140590376611839,
+SNULL, 140590378704895, 140590378713087,
+STORE, 140590376611840, 140590378704895,
+STORE, 140590378704896, 140590378713087,
+ERASE, 140590378704896, 140590378713087,
+STORE, 140590378704896, 140590378713087,
+STORE, 140590374027264, 140590376595455,
+SNULL, 140590374027264, 140590374494207,
+STORE, 140590374494208, 140590376595455,
+STORE, 140590374027264, 140590374494207,
+SNULL, 140590376587263, 140590376595455,
+STORE, 140590374494208, 140590376587263,
+STORE, 140590376587264, 140590376595455,
+ERASE, 140590376587264, 140590376595455,
+STORE, 140590376587264, 140590376595455,
+STORE, 140590371913728, 140590374027263,
+SNULL, 140590371913728, 140590371926015,
+STORE, 140590371926016, 140590374027263,
+STORE, 140590371913728, 140590371926015,
+SNULL, 140590374019071, 140590374027263,
+STORE, 140590371926016, 140590374019071,
+STORE, 140590374019072, 140590374027263,
+ERASE, 140590374019072, 140590374027263,
+STORE, 140590374019072, 140590374027263,
+STORE, 140590400155648, 140590400180223,
+STORE, 140590400143360, 140590400180223,
+SNULL, 140590384603135, 140590384611327,
+STORE, 140590384586752, 140590384603135,
+STORE, 140590384603136, 140590384611327,
+SNULL, 140590374023167, 140590374027263,
+STORE, 140590374019072, 140590374023167,
+STORE, 140590374023168, 140590374027263,
+SNULL, 140590386823167, 140590386827263,
+STORE, 140590386819072, 140590386823167,
+STORE, 140590386823168, 140590386827263,
+SNULL, 140590376591359, 140590376595455,
+ };
+ static const unsigned long set21[] = {
+STORE, 93874710941696, 93874711363583,
+STORE, 93874711367680, 93874711408639,
+STORE, 93874711408640, 93874711412735,
+STORE, 93874720989184, 93874721124351,
+STORE, 140708365086720, 140708365099007,
+STORE, 140708365099008, 140708367192063,
+STORE, 140708367192064, 140708367196159,
+STORE, 140708367196160, 140708367200255,
+STORE, 140708367200256, 140708367667199,
+STORE, 140708367667200, 140708369760255,
+STORE, 140708369760256, 140708369764351,
+STORE, 140708369764352, 140708369768447,
+STORE, 140708369768448, 140708369784831,
+STORE, 140708369784832, 140708371877887,
+STORE, 140708371877888, 140708371881983,
+STORE, 140708371881984, 140708371886079,
+STORE, 140708371886080, 140708371902463,
+STORE, 140708371902464, 140708373995519,
+STORE, 140708373995520, 140708373999615,
+STORE, 140708373999616, 140708374003711,
+STORE, 140708374003712, 140708375662591,
+STORE, 140708375662592, 140708377759743,
+STORE, 140708377759744, 140708377776127,
+STORE, 140708377776128, 140708377784319,
+STORE, 140708377784320, 140708377800703,
+STORE, 140708377800704, 140708377899007,
+STORE, 140708377899008, 140708379992063,
+STORE, 140708379992064, 140708379996159,
+STORE, 140708379996160, 140708380000255,
+STORE, 140708380000256, 140708380016639,
+STORE, 140708380016640, 140708380045311,
+STORE, 140708380045312, 140708382138367,
+STORE, 140708382138368, 140708382142463,
+STORE, 140708382142464, 140708382146559,
+STORE, 140708382146560, 140708382298111,
+STORE, 140708382298112, 140708384391167,
+STORE, 140708384391168, 140708384395263,
+STORE, 140708384395264, 140708384399359,
+STORE, 140708384399360, 140708384407551,
+STORE, 140708384407552, 140708384497663,
+STORE, 140708384497664, 140708386590719,
+STORE, 140708386590720, 140708386594815,
+STORE, 140708386594816, 140708386598911,
+STORE, 140708386598912, 140708386865151,
+STORE, 140708386865152, 140708388958207,
+STORE, 140708388958208, 140708388974591,
+STORE, 140708388974592, 140708388978687,
+STORE, 140708388978688, 140708388982783,
+STORE, 140708388982784, 140708389011455,
+STORE, 140708389011456, 140708391108607,
+STORE, 140708391108608, 140708391112703,
+STORE, 140708391112704, 140708391116799,
+STORE, 140708391116800, 140708391260159,
+STORE, 140708393291776, 140708393308159,
+STORE, 140708393308160, 140708393312255,
+STORE, 140708393312256, 140708393316351,
+STORE, 140708393316352, 140708393353215,
+STORE, 140708393353216, 140708393357311,
+STORE, 140708393357312, 140708393361407,
+STORE, 140708393361408, 140708393365503,
+STORE, 140708393365504, 140708393369599,
+STORE, 140730557042688, 140730557177855,
+STORE, 140730557235200, 140730557247487,
+STORE, 140730557247488, 140730557251583,
+ERASE, 140708393353216, 140708393357311,
+ERASE, 140708393312256, 140708393316351,
+ERASE, 140708393308160, 140708393312255,
+ERASE, 140708393291776, 140708393308159,
+ };
+ static const unsigned long set22[] = {
+STORE, 93951397134336, 93951397183487,
+STORE, 93951397183488, 93951397728255,
+STORE, 93951397728256, 93951397826559,
+STORE, 93951397826560, 93951397842943,
+STORE, 93951397842944, 93951397847039,
+STORE, 93951425974272, 93951426109439,
+STORE, 140685152665600, 140685152677887,
+STORE, 140685152677888, 140685152829439,
+STORE, 140685152829440, 140685154181119,
+STORE, 140685154181120, 140685154484223,
+STORE, 140685154484224, 140685154496511,
+STORE, 140685154496512, 140685154508799,
+STORE, 140685154508800, 140685154525183,
+STORE, 140685154525184, 140685154541567,
+STORE, 140685154541568, 140685154590719,
+STORE, 140685154590720, 140685154603007,
+STORE, 140685154603008, 140685154607103,
+STORE, 140685154607104, 140685154611199,
+STORE, 140685154611200, 140685154615295,
+STORE, 140685154615296, 140685154631679,
+STORE, 140685154639872, 140685154643967,
+STORE, 140685154643968, 140685154766847,
+STORE, 140685154766848, 140685154799615,
+STORE, 140685154803712, 140685154807807,
+STORE, 140685154807808, 140685154811903,
+STORE, 140685154811904, 140685154815999,
+STORE, 140722188902400, 140722189037567,
+STORE, 140722189512704, 140722189524991,
+STORE, 140722189524992, 140722189529087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733429354496, 140737488351231,
+SNULL, 140733429358591, 140737488351231,
+STORE, 140733429354496, 140733429358591,
+STORE, 140733429223424, 140733429358591,
+STORE, 94526683537408, 94526683660287,
+SNULL, 94526683553791, 94526683660287,
+STORE, 94526683537408, 94526683553791,
+STORE, 94526683553792, 94526683660287,
+ERASE, 94526683553792, 94526683660287,
+STORE, 94526683553792, 94526683623423,
+STORE, 94526683623424, 94526683647999,
+STORE, 94526683652096, 94526683660287,
+STORE, 140551363747840, 140551363923967,
+SNULL, 140551363751935, 140551363923967,
+STORE, 140551363747840, 140551363751935,
+STORE, 140551363751936, 140551363923967,
+ERASE, 140551363751936, 140551363923967,
+STORE, 140551363751936, 140551363874815,
+STORE, 140551363874816, 140551363907583,
+STORE, 140551363911680, 140551363919871,
+STORE, 140551363919872, 140551363923967,
+STORE, 140733429690368, 140733429694463,
+STORE, 140733429678080, 140733429690367,
+STORE, 140551363739648, 140551363747839,
+STORE, 140551363731456, 140551363739647,
+STORE, 140551363379200, 140551363731455,
+SNULL, 140551363379200, 140551363420159,
+STORE, 140551363420160, 140551363731455,
+STORE, 140551363379200, 140551363420159,
+SNULL, 140551363706879, 140551363731455,
+STORE, 140551363420160, 140551363706879,
+STORE, 140551363706880, 140551363731455,
+SNULL, 140551363420160, 140551363637247,
+STORE, 140551363637248, 140551363706879,
+STORE, 140551363420160, 140551363637247,
+ERASE, 140551363420160, 140551363637247,
+STORE, 140551363420160, 140551363637247,
+SNULL, 140551363637248, 140551363702783,
+STORE, 140551363702784, 140551363706879,
+STORE, 140551363637248, 140551363702783,
+ERASE, 140551363637248, 140551363702783,
+STORE, 140551363637248, 140551363702783,
+ERASE, 140551363706880, 140551363731455,
+STORE, 140551363706880, 140551363731455,
+STORE, 140551361531904, 140551363379199,
+SNULL, 140551361683455, 140551363379199,
+STORE, 140551361531904, 140551361683455,
+STORE, 140551361683456, 140551363379199,
+SNULL, 140551361683456, 140551363035135,
+STORE, 140551363035136, 140551363379199,
+STORE, 140551361683456, 140551363035135,
+ERASE, 140551361683456, 140551363035135,
+STORE, 140551361683456, 140551363035135,
+SNULL, 140551363035136, 140551363338239,
+STORE, 140551363338240, 140551363379199,
+STORE, 140551363035136, 140551363338239,
+ERASE, 140551363035136, 140551363338239,
+STORE, 140551363035136, 140551363379199,
+SNULL, 140551363338239, 140551363379199,
+STORE, 140551363035136, 140551363338239,
+STORE, 140551363338240, 140551363379199,
+SNULL, 140551363338240, 140551363362815,
+STORE, 140551363362816, 140551363379199,
+STORE, 140551363338240, 140551363362815,
+ERASE, 140551363338240, 140551363362815,
+STORE, 140551363338240, 140551363362815,
+ERASE, 140551363362816, 140551363379199,
+STORE, 140551363362816, 140551363379199,
+STORE, 140551361519616, 140551361531903,
+SNULL, 140551363350527, 140551363362815,
+STORE, 140551363338240, 140551363350527,
+STORE, 140551363350528, 140551363362815,
+SNULL, 140551363727359, 140551363731455,
+STORE, 140551363706880, 140551363727359,
+STORE, 140551363727360, 140551363731455,
+SNULL, 94526683656191, 94526683660287,
+STORE, 94526683652096, 94526683656191,
+STORE, 94526683656192, 94526683660287,
+SNULL, 140551363915775, 140551363919871,
+STORE, 140551363911680, 140551363915775,
+STORE, 140551363915776, 140551363919871,
+ERASE, 140551363739648, 140551363747839,
+STORE, 94526715490304, 94526715625471,
+STORE, 140551361253376, 140551361531903,
+STORE, 140551360987136, 140551361531903,
+STORE, 140551360720896, 140551361531903,
+STORE, 140551360454656, 140551361531903,
+SNULL, 140551361253375, 140551361531903,
+STORE, 140551360454656, 140551361253375,
+STORE, 140551361253376, 140551361531903,
+SNULL, 140551361253376, 140551361519615,
+STORE, 140551361519616, 140551361531903,
+STORE, 140551361253376, 140551361519615,
+ERASE, 140551361253376, 140551361519615,
+ };
+
+ static const unsigned long set23[] = {
+STORE, 94014447943680, 94014448156671,
+STORE, 94014450253824, 94014450257919,
+STORE, 94014450257920, 94014450266111,
+STORE, 94014450266112, 94014450278399,
+STORE, 94014464225280, 94014464630783,
+STORE, 139761764306944, 139761765965823,
+STORE, 139761765965824, 139761768062975,
+STORE, 139761768062976, 139761768079359,
+STORE, 139761768079360, 139761768087551,
+STORE, 139761768087552, 139761768103935,
+STORE, 139761768103936, 139761768116223,
+STORE, 139761768116224, 139761770209279,
+STORE, 139761770209280, 139761770213375,
+STORE, 139761770213376, 139761770217471,
+STORE, 139761770217472, 139761770360831,
+STORE, 139761770729472, 139761772412927,
+STORE, 139761772412928, 139761772429311,
+STORE, 139761772457984, 139761772462079,
+STORE, 139761772462080, 139761772466175,
+STORE, 139761772466176, 139761772470271,
+STORE, 140724336517120, 140724336652287,
+STORE, 140724336955392, 140724336967679,
+STORE, 140724336967680, 140724336971775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140721840295936, 140737488351231,
+SNULL, 140721840300031, 140737488351231,
+STORE, 140721840295936, 140721840300031,
+STORE, 140721840164864, 140721840300031,
+STORE, 93937913667584, 93937915830271,
+SNULL, 93937913729023, 93937915830271,
+STORE, 93937913667584, 93937913729023,
+STORE, 93937913729024, 93937915830271,
+ERASE, 93937913729024, 93937915830271,
+STORE, 93937915822080, 93937915830271,
+STORE, 140598835335168, 140598837587967,
+SNULL, 140598835478527, 140598837587967,
+STORE, 140598835335168, 140598835478527,
+STORE, 140598835478528, 140598837587967,
+ERASE, 140598835478528, 140598837587967,
+STORE, 140598837575680, 140598837583871,
+STORE, 140598837583872, 140598837587967,
+STORE, 140721841086464, 140721841090559,
+STORE, 140721841074176, 140721841086463,
+STORE, 140598837547008, 140598837575679,
+STORE, 140598837538816, 140598837547007,
+STORE, 140598831538176, 140598835335167,
+SNULL, 140598831538176, 140598833197055,
+STORE, 140598833197056, 140598835335167,
+STORE, 140598831538176, 140598833197055,
+SNULL, 140598835294207, 140598835335167,
+STORE, 140598833197056, 140598835294207,
+STORE, 140598835294208, 140598835335167,
+SNULL, 140598835294208, 140598835318783,
+STORE, 140598835318784, 140598835335167,
+STORE, 140598835294208, 140598835318783,
+ERASE, 140598835294208, 140598835318783,
+STORE, 140598835294208, 140598835318783,
+ERASE, 140598835318784, 140598835335167,
+STORE, 140598835318784, 140598835335167,
+SNULL, 140598835310591, 140598835318783,
+STORE, 140598835294208, 140598835310591,
+STORE, 140598835310592, 140598835318783,
+SNULL, 93937915826175, 93937915830271,
+STORE, 93937915822080, 93937915826175,
+STORE, 93937915826176, 93937915830271,
+SNULL, 140598837579775, 140598837583871,
+STORE, 140598837575680, 140598837579775,
+STORE, 140598837579776, 140598837583871,
+ERASE, 140598837547008, 140598837575679,
+STORE, 93937929179136, 93937929314303,
+STORE, 140598835855360, 140598837538815,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728187723776, 140737488351231,
+SNULL, 140728187727871, 140737488351231,
+STORE, 140728187723776, 140728187727871,
+STORE, 140728187592704, 140728187727871,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140583951437824, 140583953690623,
+SNULL, 140583951581183, 140583953690623,
+STORE, 140583951437824, 140583951581183,
+STORE, 140583951581184, 140583953690623,
+ERASE, 140583951581184, 140583953690623,
+STORE, 140583953678336, 140583953686527,
+STORE, 140583953686528, 140583953690623,
+STORE, 140728189116416, 140728189120511,
+STORE, 140728189104128, 140728189116415,
+STORE, 140583953649664, 140583953678335,
+STORE, 140583953641472, 140583953649663,
+STORE, 140583948275712, 140583951437823,
+SNULL, 140583948275712, 140583949336575,
+STORE, 140583949336576, 140583951437823,
+STORE, 140583948275712, 140583949336575,
+SNULL, 140583951429631, 140583951437823,
+STORE, 140583949336576, 140583951429631,
+STORE, 140583951429632, 140583951437823,
+ERASE, 140583951429632, 140583951437823,
+STORE, 140583951429632, 140583951437823,
+STORE, 140583944478720, 140583948275711,
+SNULL, 140583944478720, 140583946137599,
+STORE, 140583946137600, 140583948275711,
+STORE, 140583944478720, 140583946137599,
+SNULL, 140583948234751, 140583948275711,
+STORE, 140583946137600, 140583948234751,
+STORE, 140583948234752, 140583948275711,
+SNULL, 140583948234752, 140583948259327,
+STORE, 140583948259328, 140583948275711,
+STORE, 140583948234752, 140583948259327,
+ERASE, 140583948234752, 140583948259327,
+STORE, 140583948234752, 140583948259327,
+ERASE, 140583948259328, 140583948275711,
+STORE, 140583948259328, 140583948275711,
+STORE, 140583953629184, 140583953649663,
+SNULL, 140583948251135, 140583948259327,
+STORE, 140583948234752, 140583948251135,
+STORE, 140583948251136, 140583948259327,
+SNULL, 140583951433727, 140583951437823,
+STORE, 140583951429632, 140583951433727,
+STORE, 140583951433728, 140583951437823,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140583953682431, 140583953686527,
+STORE, 140583953678336, 140583953682431,
+STORE, 140583953682432, 140583953686527,
+ERASE, 140583953649664, 140583953678335,
+STORE, 17821696, 17956863,
+STORE, 17821696, 18104319,
+STORE, 140583951945728, 140583953629183,
+STORE, 94014447943680, 94014448156671,
+STORE, 94014450253824, 94014450257919,
+STORE, 94014450257920, 94014450266111,
+STORE, 94014450266112, 94014450278399,
+STORE, 94014464225280, 94014465196031,
+STORE, 139761764306944, 139761765965823,
+STORE, 139761765965824, 139761768062975,
+STORE, 139761768062976, 139761768079359,
+STORE, 139761768079360, 139761768087551,
+STORE, 139761768087552, 139761768103935,
+STORE, 139761768103936, 139761768116223,
+STORE, 139761768116224, 139761770209279,
+STORE, 139761770209280, 139761770213375,
+STORE, 139761770213376, 139761770217471,
+STORE, 139761770217472, 139761770360831,
+STORE, 139761770729472, 139761772412927,
+STORE, 139761772412928, 139761772429311,
+STORE, 139761772457984, 139761772462079,
+STORE, 139761772462080, 139761772466175,
+STORE, 139761772466176, 139761772470271,
+STORE, 140724336517120, 140724336652287,
+STORE, 140724336955392, 140724336967679,
+STORE, 140724336967680, 140724336971775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726063296512, 140737488351231,
+SNULL, 140726063300607, 140737488351231,
+STORE, 140726063296512, 140726063300607,
+STORE, 140726063165440, 140726063300607,
+STORE, 94016795934720, 94016798158847,
+SNULL, 94016796045311, 94016798158847,
+STORE, 94016795934720, 94016796045311,
+STORE, 94016796045312, 94016798158847,
+ERASE, 94016796045312, 94016798158847,
+STORE, 94016798138368, 94016798150655,
+STORE, 94016798150656, 94016798158847,
+STORE, 139975915966464, 139975918219263,
+SNULL, 139975916109823, 139975918219263,
+STORE, 139975915966464, 139975916109823,
+STORE, 139975916109824, 139975918219263,
+ERASE, 139975916109824, 139975918219263,
+STORE, 139975918206976, 139975918215167,
+STORE, 139975918215168, 139975918219263,
+STORE, 140726064541696, 140726064545791,
+STORE, 140726064529408, 140726064541695,
+STORE, 139975918178304, 139975918206975,
+STORE, 139975918170112, 139975918178303,
+STORE, 139975912169472, 139975915966463,
+SNULL, 139975912169472, 139975913828351,
+STORE, 139975913828352, 139975915966463,
+STORE, 139975912169472, 139975913828351,
+SNULL, 139975915925503, 139975915966463,
+STORE, 139975913828352, 139975915925503,
+STORE, 139975915925504, 139975915966463,
+SNULL, 139975915925504, 139975915950079,
+STORE, 139975915950080, 139975915966463,
+STORE, 139975915925504, 139975915950079,
+ERASE, 139975915925504, 139975915950079,
+STORE, 139975915925504, 139975915950079,
+ERASE, 139975915950080, 139975915966463,
+STORE, 139975915950080, 139975915966463,
+SNULL, 139975915941887, 139975915950079,
+STORE, 139975915925504, 139975915941887,
+STORE, 139975915941888, 139975915950079,
+SNULL, 94016798146559, 94016798150655,
+STORE, 94016798138368, 94016798146559,
+STORE, 94016798146560, 94016798150655,
+SNULL, 139975918211071, 139975918215167,
+STORE, 139975918206976, 139975918211071,
+STORE, 139975918211072, 139975918215167,
+ERASE, 139975918178304, 139975918206975,
+STORE, 94016804925440, 94016805060607,
+STORE, 94596177661952, 94596177772543,
+STORE, 94596179865600, 94596179873791,
+STORE, 94596179873792, 94596179877887,
+STORE, 94596179877888, 94596179886079,
+STORE, 94596211597312, 94596211863551,
+STORE, 140127351840768, 140127353499647,
+STORE, 140127353499648, 140127355596799,
+STORE, 140127355596800, 140127355613183,
+STORE, 140127355613184, 140127355621375,
+STORE, 140127355621376, 140127355637759,
+STORE, 140127355637760, 140127355781119,
+STORE, 140127357841408, 140127357849599,
+STORE, 140127357878272, 140127357882367,
+STORE, 140127357882368, 140127357886463,
+STORE, 140127357886464, 140127357890559,
+STORE, 140726167252992, 140726167392255,
+STORE, 140726167838720, 140726167851007,
+STORE, 140726167851008, 140726167855103,
+STORE, 140737488347136, 140737488351231,
+STORE, 140731874017280, 140737488351231,
+SNULL, 140731874021375, 140737488351231,
+STORE, 140731874017280, 140731874021375,
+STORE, 140731873886208, 140731874021375,
+STORE, 94178682265600, 94178684489727,
+SNULL, 94178682376191, 94178684489727,
+STORE, 94178682265600, 94178682376191,
+STORE, 94178682376192, 94178684489727,
+ERASE, 94178682376192, 94178684489727,
+STORE, 94178684469248, 94178684481535,
+STORE, 94178684481536, 94178684489727,
+STORE, 140460853403648, 140460855656447,
+SNULL, 140460853547007, 140460855656447,
+STORE, 140460853403648, 140460853547007,
+STORE, 140460853547008, 140460855656447,
+ERASE, 140460853547008, 140460855656447,
+STORE, 140460855644160, 140460855652351,
+STORE, 140460855652352, 140460855656447,
+STORE, 140731874103296, 140731874107391,
+STORE, 140731874091008, 140731874103295,
+STORE, 140460855615488, 140460855644159,
+STORE, 140460855607296, 140460855615487,
+STORE, 140460849606656, 140460853403647,
+SNULL, 140460849606656, 140460851265535,
+STORE, 140460851265536, 140460853403647,
+STORE, 140460849606656, 140460851265535,
+SNULL, 140460853362687, 140460853403647,
+STORE, 140460851265536, 140460853362687,
+STORE, 140460853362688, 140460853403647,
+SNULL, 140460853362688, 140460853387263,
+STORE, 140460853387264, 140460853403647,
+STORE, 140460853362688, 140460853387263,
+ERASE, 140460853362688, 140460853387263,
+STORE, 140460853362688, 140460853387263,
+ERASE, 140460853387264, 140460853403647,
+STORE, 140460853387264, 140460853403647,
+SNULL, 140460853379071, 140460853387263,
+STORE, 140460853362688, 140460853379071,
+STORE, 140460853379072, 140460853387263,
+SNULL, 94178684477439, 94178684481535,
+STORE, 94178684469248, 94178684477439,
+STORE, 94178684477440, 94178684481535,
+SNULL, 140460855648255, 140460855652351,
+STORE, 140460855644160, 140460855648255,
+STORE, 140460855648256, 140460855652351,
+ERASE, 140460855615488, 140460855644159,
+STORE, 94178692063232, 94178692198399,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733096603648, 140737488351231,
+SNULL, 140733096611839, 140737488351231,
+STORE, 140733096603648, 140733096611839,
+STORE, 140733096472576, 140733096611839,
+STORE, 94796716122112, 94796718325759,
+SNULL, 94796716224511, 94796718325759,
+STORE, 94796716122112, 94796716224511,
+STORE, 94796716224512, 94796718325759,
+ERASE, 94796716224512, 94796718325759,
+STORE, 94796718317568, 94796718325759,
+STORE, 139667892793344, 139667895046143,
+SNULL, 139667892936703, 139667895046143,
+STORE, 139667892793344, 139667892936703,
+STORE, 139667892936704, 139667895046143,
+ERASE, 139667892936704, 139667895046143,
+STORE, 139667895033856, 139667895042047,
+STORE, 139667895042048, 139667895046143,
+STORE, 140733096857600, 140733096861695,
+STORE, 140733096845312, 140733096857599,
+STORE, 139667895005184, 139667895033855,
+STORE, 139667894996992, 139667895005183,
+STORE, 139667890532352, 139667892793343,
+SNULL, 139667890532352, 139667890683903,
+STORE, 139667890683904, 139667892793343,
+STORE, 139667890532352, 139667890683903,
+SNULL, 139667892776959, 139667892793343,
+STORE, 139667890683904, 139667892776959,
+STORE, 139667892776960, 139667892793343,
+SNULL, 139667892776960, 139667892785151,
+STORE, 139667892785152, 139667892793343,
+STORE, 139667892776960, 139667892785151,
+ERASE, 139667892776960, 139667892785151,
+STORE, 139667892776960, 139667892785151,
+ERASE, 139667892785152, 139667892793343,
+STORE, 139667892785152, 139667892793343,
+STORE, 139667886735360, 139667890532351,
+SNULL, 139667886735360, 139667888394239,
+STORE, 139667888394240, 139667890532351,
+STORE, 139667886735360, 139667888394239,
+SNULL, 139667890491391, 139667890532351,
+STORE, 139667888394240, 139667890491391,
+STORE, 139667890491392, 139667890532351,
+SNULL, 139667890491392, 139667890515967,
+STORE, 139667890515968, 139667890532351,
+STORE, 139667890491392, 139667890515967,
+ERASE, 139667890491392, 139667890515967,
+STORE, 139667890491392, 139667890515967,
+ERASE, 139667890515968, 139667890532351,
+STORE, 139667890515968, 139667890532351,
+STORE, 139667884167168, 139667886735359,
+SNULL, 139667884167168, 139667884634111,
+STORE, 139667884634112, 139667886735359,
+STORE, 139667884167168, 139667884634111,
+SNULL, 139667886727167, 139667886735359,
+STORE, 139667884634112, 139667886727167,
+STORE, 139667886727168, 139667886735359,
+ERASE, 139667886727168, 139667886735359,
+STORE, 139667886727168, 139667886735359,
+STORE, 139667882053632, 139667884167167,
+SNULL, 139667882053632, 139667882065919,
+STORE, 139667882065920, 139667884167167,
+STORE, 139667882053632, 139667882065919,
+SNULL, 139667884158975, 139667884167167,
+STORE, 139667882065920, 139667884158975,
+STORE, 139667884158976, 139667884167167,
+ERASE, 139667884158976, 139667884167167,
+STORE, 139667884158976, 139667884167167,
+STORE, 139667879837696, 139667882053631,
+SNULL, 139667879837696, 139667879935999,
+STORE, 139667879936000, 139667882053631,
+STORE, 139667879837696, 139667879935999,
+SNULL, 139667882029055, 139667882053631,
+STORE, 139667879936000, 139667882029055,
+STORE, 139667882029056, 139667882053631,
+SNULL, 139667882029056, 139667882037247,
+STORE, 139667882037248, 139667882053631,
+STORE, 139667882029056, 139667882037247,
+ERASE, 139667882029056, 139667882037247,
+STORE, 139667882029056, 139667882037247,
+ERASE, 139667882037248, 139667882053631,
+STORE, 139667882037248, 139667882053631,
+STORE, 139667894988800, 139667895005183,
+SNULL, 139667890507775, 139667890515967,
+STORE, 139667890491392, 139667890507775,
+STORE, 139667890507776, 139667890515967,
+SNULL, 139667882033151, 139667882037247,
+STORE, 139667882029056, 139667882033151,
+STORE, 139667882033152, 139667882037247,
+SNULL, 139667884163071, 139667884167167,
+STORE, 139667884158976, 139667884163071,
+STORE, 139667884163072, 139667884167167,
+SNULL, 139667886731263, 139667886735359,
+STORE, 139667886727168, 139667886731263,
+STORE, 139667886731264, 139667886735359,
+SNULL, 139667892781055, 139667892785151,
+STORE, 139667892776960, 139667892781055,
+STORE, 139667892781056, 139667892785151,
+SNULL, 94796718321663, 94796718325759,
+STORE, 94796718317568, 94796718321663,
+STORE, 94796718321664, 94796718325759,
+SNULL, 139667895037951, 139667895042047,
+STORE, 139667895033856, 139667895037951,
+STORE, 139667895037952, 139667895042047,
+ERASE, 139667895005184, 139667895033855,
+STORE, 94796726063104, 94796726198271,
+STORE, 139667893305344, 139667894988799,
+STORE, 139667895005184, 139667895033855,
+STORE, 94796726063104, 94796726333439,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722489507840, 140737488351231,
+SNULL, 140722489516031, 140737488351231,
+STORE, 140722489507840, 140722489516031,
+STORE, 140722489376768, 140722489516031,
+STORE, 93980993265664, 93980995489791,
+SNULL, 93980993376255, 93980995489791,
+STORE, 93980993265664, 93980993376255,
+STORE, 93980993376256, 93980995489791,
+ERASE, 93980993376256, 93980995489791,
+STORE, 93980995469312, 93980995481599,
+STORE, 93980995481600, 93980995489791,
+STORE, 140261313593344, 140261315846143,
+SNULL, 140261313736703, 140261315846143,
+STORE, 140261313593344, 140261313736703,
+STORE, 140261313736704, 140261315846143,
+ERASE, 140261313736704, 140261315846143,
+STORE, 140261315833856, 140261315842047,
+STORE, 140261315842048, 140261315846143,
+STORE, 140722489675776, 140722489679871,
+STORE, 140722489663488, 140722489675775,
+STORE, 140261315805184, 140261315833855,
+STORE, 140261315796992, 140261315805183,
+STORE, 140261309796352, 140261313593343,
+SNULL, 140261309796352, 140261311455231,
+STORE, 140261311455232, 140261313593343,
+STORE, 140261309796352, 140261311455231,
+SNULL, 140261313552383, 140261313593343,
+STORE, 140261311455232, 140261313552383,
+STORE, 140261313552384, 140261313593343,
+SNULL, 140261313552384, 140261313576959,
+STORE, 140261313576960, 140261313593343,
+STORE, 140261313552384, 140261313576959,
+ERASE, 140261313552384, 140261313576959,
+STORE, 140261313552384, 140261313576959,
+ERASE, 140261313576960, 140261313593343,
+STORE, 140261313576960, 140261313593343,
+SNULL, 140261313568767, 140261313576959,
+STORE, 140261313552384, 140261313568767,
+STORE, 140261313568768, 140261313576959,
+SNULL, 93980995477503, 93980995481599,
+STORE, 93980995469312, 93980995477503,
+STORE, 93980995477504, 93980995481599,
+SNULL, 140261315837951, 140261315842047,
+STORE, 140261315833856, 140261315837951,
+STORE, 140261315837952, 140261315842047,
+ERASE, 140261315805184, 140261315833855,
+STORE, 93980997443584, 93980997578751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140734059450368, 140737488351231,
+SNULL, 140734059462655, 140737488351231,
+STORE, 140734059450368, 140734059462655,
+STORE, 140734059319296, 140734059462655,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140307554983936, 140307557236735,
+SNULL, 140307555127295, 140307557236735,
+STORE, 140307554983936, 140307555127295,
+STORE, 140307555127296, 140307557236735,
+ERASE, 140307555127296, 140307557236735,
+STORE, 140307557224448, 140307557232639,
+STORE, 140307557232640, 140307557236735,
+STORE, 140734059483136, 140734059487231,
+STORE, 140734059470848, 140734059483135,
+STORE, 140307557195776, 140307557224447,
+STORE, 140307557187584, 140307557195775,
+STORE, 140307551821824, 140307554983935,
+SNULL, 140307551821824, 140307552882687,
+STORE, 140307552882688, 140307554983935,
+STORE, 140307551821824, 140307552882687,
+SNULL, 140307554975743, 140307554983935,
+STORE, 140307552882688, 140307554975743,
+STORE, 140307554975744, 140307554983935,
+ERASE, 140307554975744, 140307554983935,
+STORE, 140307554975744, 140307554983935,
+STORE, 140307548024832, 140307551821823,
+SNULL, 140307548024832, 140307549683711,
+STORE, 140307549683712, 140307551821823,
+STORE, 140307548024832, 140307549683711,
+SNULL, 140307551780863, 140307551821823,
+STORE, 140307549683712, 140307551780863,
+STORE, 140307551780864, 140307551821823,
+SNULL, 140307551780864, 140307551805439,
+STORE, 140307551805440, 140307551821823,
+STORE, 140307551780864, 140307551805439,
+ERASE, 140307551780864, 140307551805439,
+STORE, 140307551780864, 140307551805439,
+ERASE, 140307551805440, 140307551821823,
+STORE, 140307551805440, 140307551821823,
+STORE, 140307557175296, 140307557195775,
+SNULL, 140307551797247, 140307551805439,
+STORE, 140307551780864, 140307551797247,
+STORE, 140307551797248, 140307551805439,
+SNULL, 140307554979839, 140307554983935,
+STORE, 140307554975744, 140307554979839,
+STORE, 140307554979840, 140307554983935,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140307557228543, 140307557232639,
+STORE, 140307557224448, 140307557228543,
+STORE, 140307557228544, 140307557232639,
+ERASE, 140307557195776, 140307557224447,
+STORE, 39698432, 39833599,
+STORE, 39698432, 39981055,
+STORE, 94306485321728, 94306485432319,
+STORE, 94306487525376, 94306487533567,
+STORE, 94306487533568, 94306487537663,
+STORE, 94306487537664, 94306487545855,
+STORE, 94306488868864, 94306489004031,
+STORE, 140497673998336, 140497675657215,
+STORE, 140497675657216, 140497677754367,
+STORE, 140497677754368, 140497677770751,
+STORE, 140497677770752, 140497677778943,
+STORE, 140497677778944, 140497677795327,
+STORE, 140497677795328, 140497677938687,
+STORE, 140497679998976, 140497680007167,
+STORE, 140497680035840, 140497680039935,
+STORE, 140497680039936, 140497680044031,
+STORE, 140497680044032, 140497680048127,
+STORE, 140732780462080, 140732780601343,
+STORE, 140732782239744, 140732782252031,
+STORE, 140732782252032, 140732782256127,
+STORE, 94236915900416, 94236916011007,
+STORE, 94236918104064, 94236918112255,
+STORE, 94236918112256, 94236918116351,
+STORE, 94236918116352, 94236918124543,
+STORE, 94236939489280, 94236939624447,
+STORE, 140046091743232, 140046093402111,
+STORE, 140046093402112, 140046095499263,
+STORE, 140046095499264, 140046095515647,
+STORE, 140046095515648, 140046095523839,
+STORE, 140046095523840, 140046095540223,
+STORE, 140046095540224, 140046095683583,
+STORE, 140046097743872, 140046097752063,
+STORE, 140046097780736, 140046097784831,
+STORE, 140046097784832, 140046097788927,
+STORE, 140046097788928, 140046097793023,
+STORE, 140726694449152, 140726694588415,
+STORE, 140726695313408, 140726695325695,
+STORE, 140726695325696, 140726695329791,
+STORE, 94894582779904, 94894582992895,
+STORE, 94894585090048, 94894585094143,
+STORE, 94894585094144, 94894585102335,
+STORE, 94894585102336, 94894585114623,
+STORE, 94894592868352, 94894594293759,
+STORE, 139733563842560, 139733565501439,
+STORE, 139733565501440, 139733567598591,
+STORE, 139733567598592, 139733567614975,
+STORE, 139733567614976, 139733567623167,
+STORE, 139733567623168, 139733567639551,
+STORE, 139733567639552, 139733567651839,
+STORE, 139733567651840, 139733569744895,
+STORE, 139733569744896, 139733569748991,
+STORE, 139733569748992, 139733569753087,
+STORE, 139733569753088, 139733569896447,
+STORE, 139733570265088, 139733571948543,
+STORE, 139733571948544, 139733571964927,
+STORE, 139733571993600, 139733571997695,
+STORE, 139733571997696, 139733572001791,
+STORE, 139733572001792, 139733572005887,
+STORE, 140726369255424, 140726369394687,
+STORE, 140726370402304, 140726370414591,
+STORE, 140726370414592, 140726370418687,
+STORE, 94899236483072, 94899236696063,
+STORE, 94899238793216, 94899238797311,
+STORE, 94899238797312, 94899238805503,
+STORE, 94899238805504, 94899238817791,
+STORE, 94899263045632, 94899263979519,
+STORE, 140040959893504, 140040961552383,
+STORE, 140040961552384, 140040963649535,
+STORE, 140040963649536, 140040963665919,
+STORE, 140040963665920, 140040963674111,
+STORE, 140040963674112, 140040963690495,
+STORE, 140040963690496, 140040963702783,
+STORE, 140040963702784, 140040965795839,
+STORE, 140040965795840, 140040965799935,
+STORE, 140040965799936, 140040965804031,
+STORE, 140040965804032, 140040965947391,
+STORE, 140040966316032, 140040967999487,
+STORE, 140040967999488, 140040968015871,
+STORE, 140040968044544, 140040968048639,
+STORE, 140040968048640, 140040968052735,
+STORE, 140040968052736, 140040968056831,
+STORE, 140729921359872, 140729921499135,
+STORE, 140729921613824, 140729921626111,
+STORE, 140729921626112, 140729921630207,
+STORE, 94818265190400, 94818265403391,
+STORE, 94818267500544, 94818267504639,
+STORE, 94818267504640, 94818267512831,
+STORE, 94818267512832, 94818267525119,
+STORE, 94818283372544, 94818285858815,
+STORE, 139818425675776, 139818427334655,
+STORE, 139818427334656, 139818429431807,
+STORE, 139818429431808, 139818429448191,
+STORE, 139818429448192, 139818429456383,
+STORE, 139818429456384, 139818429472767,
+STORE, 139818429472768, 139818429485055,
+STORE, 139818429485056, 139818431578111,
+STORE, 139818431578112, 139818431582207,
+STORE, 139818431582208, 139818431586303,
+STORE, 139818431586304, 139818431729663,
+STORE, 139818432098304, 139818433781759,
+STORE, 139818433781760, 139818433798143,
+STORE, 139818433826816, 139818433830911,
+STORE, 139818433830912, 139818433835007,
+STORE, 139818433835008, 139818433839103,
+STORE, 140726170509312, 140726170648575,
+STORE, 140726171824128, 140726171836415,
+STORE, 140726171836416, 140726171840511,
+STORE, 94611513188352, 94611513401343,
+STORE, 94611515498496, 94611515502591,
+STORE, 94611515502592, 94611515510783,
+STORE, 94611515510784, 94611515523071,
+STORE, 94611516502016, 94611516907519,
+STORE, 140596246388736, 140596248047615,
+STORE, 140596248047616, 140596250144767,
+STORE, 140596250144768, 140596250161151,
+STORE, 140596250161152, 140596250169343,
+STORE, 140596250169344, 140596250185727,
+STORE, 140596250185728, 140596250198015,
+STORE, 140596250198016, 140596252291071,
+STORE, 140596252291072, 140596252295167,
+STORE, 140596252295168, 140596252299263,
+STORE, 140596252299264, 140596252442623,
+STORE, 140596252811264, 140596254494719,
+STORE, 140596254494720, 140596254511103,
+STORE, 140596254539776, 140596254543871,
+STORE, 140596254543872, 140596254547967,
+STORE, 140596254547968, 140596254552063,
+STORE, 140731551338496, 140731551477759,
+STORE, 140731551780864, 140731551793151,
+STORE, 140731551793152, 140731551797247,
+STORE, 94313835851776, 94313836064767,
+STORE, 94313838161920, 94313838166015,
+STORE, 94313838166016, 94313838174207,
+STORE, 94313838174208, 94313838186495,
+STORE, 94313858416640, 94313861906431,
+STORE, 140693503918080, 140693505576959,
+STORE, 140693505576960, 140693507674111,
+STORE, 140693507674112, 140693507690495,
+STORE, 140693507690496, 140693507698687,
+STORE, 140693507698688, 140693507715071,
+STORE, 140693507715072, 140693507727359,
+STORE, 140693507727360, 140693509820415,
+STORE, 140693509820416, 140693509824511,
+STORE, 140693509824512, 140693509828607,
+STORE, 140693509828608, 140693509971967,
+STORE, 140693510340608, 140693512024063,
+STORE, 140693512024064, 140693512040447,
+STORE, 140693512069120, 140693512073215,
+STORE, 140693512073216, 140693512077311,
+STORE, 140693512077312, 140693512081407,
+STORE, 140721116065792, 140721116205055,
+STORE, 140721117831168, 140721117843455,
+STORE, 140721117843456, 140721117847551,
+STORE, 94843650150400, 94843650363391,
+STORE, 94843652460544, 94843652464639,
+STORE, 94843652464640, 94843652472831,
+STORE, 94843652472832, 94843652485119,
+STORE, 94843685388288, 94843686281215,
+STORE, 140484193681408, 140484195340287,
+STORE, 140484195340288, 140484197437439,
+STORE, 140484197437440, 140484197453823,
+STORE, 140484197453824, 140484197462015,
+STORE, 140484197462016, 140484197478399,
+STORE, 140484197478400, 140484197490687,
+STORE, 140484197490688, 140484199583743,
+STORE, 140484199583744, 140484199587839,
+STORE, 140484199587840, 140484199591935,
+STORE, 140484199591936, 140484199735295,
+STORE, 140484200103936, 140484201787391,
+STORE, 140484201787392, 140484201803775,
+STORE, 140484201832448, 140484201836543,
+STORE, 140484201836544, 140484201840639,
+STORE, 140484201840640, 140484201844735,
+STORE, 140726294315008, 140726294454271,
+STORE, 140726295646208, 140726295658495,
+STORE, 140726295658496, 140726295662591,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720422371328, 140737488351231,
+SNULL, 140720422379519, 140737488351231,
+STORE, 140720422371328, 140720422379519,
+STORE, 140720422240256, 140720422379519,
+STORE, 94417967845376, 94417970180095,
+SNULL, 94417968058367, 94417970180095,
+STORE, 94417967845376, 94417968058367,
+STORE, 94417968058368, 94417970180095,
+ERASE, 94417968058368, 94417970180095,
+STORE, 94417970155520, 94417970167807,
+STORE, 94417970167808, 94417970180095,
+STORE, 140252450045952, 140252452298751,
+SNULL, 140252450189311, 140252452298751,
+STORE, 140252450045952, 140252450189311,
+STORE, 140252450189312, 140252452298751,
+ERASE, 140252450189312, 140252452298751,
+STORE, 140252452286464, 140252452294655,
+STORE, 140252452294656, 140252452298751,
+STORE, 140720422416384, 140720422420479,
+STORE, 140720422404096, 140720422416383,
+STORE, 140252452257792, 140252452286463,
+STORE, 140252452249600, 140252452257791,
+STORE, 140252447932416, 140252450045951,
+SNULL, 140252447932416, 140252447944703,
+STORE, 140252447944704, 140252450045951,
+STORE, 140252447932416, 140252447944703,
+SNULL, 140252450037759, 140252450045951,
+STORE, 140252447944704, 140252450037759,
+STORE, 140252450037760, 140252450045951,
+ERASE, 140252450037760, 140252450045951,
+STORE, 140252450037760, 140252450045951,
+STORE, 140252444135424, 140252447932415,
+SNULL, 140252444135424, 140252445794303,
+STORE, 140252445794304, 140252447932415,
+STORE, 140252444135424, 140252445794303,
+SNULL, 140252447891455, 140252447932415,
+STORE, 140252445794304, 140252447891455,
+STORE, 140252447891456, 140252447932415,
+SNULL, 140252447891456, 140252447916031,
+STORE, 140252447916032, 140252447932415,
+STORE, 140252447891456, 140252447916031,
+ERASE, 140252447891456, 140252447916031,
+STORE, 140252447891456, 140252447916031,
+ERASE, 140252447916032, 140252447932415,
+STORE, 140252447916032, 140252447932415,
+STORE, 140252452241408, 140252452257791,
+SNULL, 140252447907839, 140252447916031,
+STORE, 140252447891456, 140252447907839,
+STORE, 140252447907840, 140252447916031,
+SNULL, 140252450041855, 140252450045951,
+STORE, 140252450037760, 140252450041855,
+STORE, 140252450041856, 140252450045951,
+SNULL, 94417970159615, 94417970167807,
+STORE, 94417970155520, 94417970159615,
+STORE, 94417970159616, 94417970167807,
+SNULL, 140252452290559, 140252452294655,
+STORE, 140252452286464, 140252452290559,
+STORE, 140252452290560, 140252452294655,
+ERASE, 140252452257792, 140252452286463,
+STORE, 94417996333056, 94417996468223,
+STORE, 140252450557952, 140252452241407,
+STORE, 94417996333056, 94417996603391,
+STORE, 94417996333056, 94417996738559,
+STORE, 94417996333056, 94417996910591,
+SNULL, 94417996881919, 94417996910591,
+STORE, 94417996333056, 94417996881919,
+STORE, 94417996881920, 94417996910591,
+ERASE, 94417996881920, 94417996910591,
+STORE, 94417996333056, 94417997017087,
+STORE, 94417996333056, 94417997152255,
+SNULL, 94417997135871, 94417997152255,
+STORE, 94417996333056, 94417997135871,
+STORE, 94417997135872, 94417997152255,
+ERASE, 94417997135872, 94417997152255,
+STORE, 94417996333056, 94417997291519,
+SNULL, 94417997271039, 94417997291519,
+STORE, 94417996333056, 94417997271039,
+STORE, 94417997271040, 94417997291519,
+ERASE, 94417997271040, 94417997291519,
+STORE, 94417996333056, 94417997406207,
+SNULL, 94417997381631, 94417997406207,
+STORE, 94417996333056, 94417997381631,
+STORE, 94417997381632, 94417997406207,
+ERASE, 94417997381632, 94417997406207,
+STORE, 94417996333056, 94417997516799,
+SNULL, 94417997488127, 94417997516799,
+STORE, 94417996333056, 94417997488127,
+STORE, 94417997488128, 94417997516799,
+ERASE, 94417997488128, 94417997516799,
+STORE, 94417996333056, 94417997643775,
+SNULL, 94417997631487, 94417997643775,
+STORE, 94417996333056, 94417997631487,
+STORE, 94417997631488, 94417997643775,
+ERASE, 94417997631488, 94417997643775,
+SNULL, 94417997590527, 94417997631487,
+STORE, 94417996333056, 94417997590527,
+STORE, 94417997590528, 94417997631487,
+ERASE, 94417997590528, 94417997631487,
+STORE, 94417996333056, 94417997733887,
+STORE, 94417996333056, 94417997869055,
+STORE, 94417996333056, 94417998004223,
+SNULL, 94417998000127, 94417998004223,
+STORE, 94417996333056, 94417998000127,
+STORE, 94417998000128, 94417998004223,
+ERASE, 94417998000128, 94417998004223,
+STORE, 94049170993152, 94049171206143,
+STORE, 94049173303296, 94049173307391,
+STORE, 94049173307392, 94049173315583,
+STORE, 94049173315584, 94049173327871,
+STORE, 94049176236032, 94049183645695,
+STORE, 139807795544064, 139807797202943,
+STORE, 139807797202944, 139807799300095,
+STORE, 139807799300096, 139807799316479,
+STORE, 139807799316480, 139807799324671,
+STORE, 139807799324672, 139807799341055,
+STORE, 139807799341056, 139807799353343,
+STORE, 139807799353344, 139807801446399,
+STORE, 139807801446400, 139807801450495,
+STORE, 139807801450496, 139807801454591,
+STORE, 139807801454592, 139807801597951,
+STORE, 139807801966592, 139807803650047,
+STORE, 139807803650048, 139807803666431,
+STORE, 139807803695104, 139807803699199,
+STORE, 139807803699200, 139807803703295,
+STORE, 139807803703296, 139807803707391,
+STORE, 140727555538944, 140727555678207,
+STORE, 140727555940352, 140727555952639,
+STORE, 140727555952640, 140727555956735,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722483441664, 140737488351231,
+SNULL, 140722483449855, 140737488351231,
+STORE, 140722483441664, 140722483449855,
+STORE, 140722483310592, 140722483449855,
+STORE, 94416704921600, 94416707145727,
+SNULL, 94416705032191, 94416707145727,
+STORE, 94416704921600, 94416705032191,
+STORE, 94416705032192, 94416707145727,
+ERASE, 94416705032192, 94416707145727,
+STORE, 94416707125248, 94416707137535,
+STORE, 94416707137536, 94416707145727,
+STORE, 140555439296512, 140555441549311,
+SNULL, 140555439439871, 140555441549311,
+STORE, 140555439296512, 140555439439871,
+STORE, 140555439439872, 140555441549311,
+ERASE, 140555439439872, 140555441549311,
+STORE, 140555441537024, 140555441545215,
+STORE, 140555441545216, 140555441549311,
+STORE, 140722484781056, 140722484785151,
+STORE, 140722484768768, 140722484781055,
+STORE, 140555441508352, 140555441537023,
+STORE, 140555441500160, 140555441508351,
+STORE, 140555435499520, 140555439296511,
+SNULL, 140555435499520, 140555437158399,
+STORE, 140555437158400, 140555439296511,
+STORE, 140555435499520, 140555437158399,
+SNULL, 140555439255551, 140555439296511,
+STORE, 140555437158400, 140555439255551,
+STORE, 140555439255552, 140555439296511,
+SNULL, 140555439255552, 140555439280127,
+STORE, 140555439280128, 140555439296511,
+STORE, 140555439255552, 140555439280127,
+ERASE, 140555439255552, 140555439280127,
+STORE, 140555439255552, 140555439280127,
+ERASE, 140555439280128, 140555439296511,
+STORE, 140555439280128, 140555439296511,
+SNULL, 140555439271935, 140555439280127,
+STORE, 140555439255552, 140555439271935,
+STORE, 140555439271936, 140555439280127,
+SNULL, 94416707133439, 94416707137535,
+STORE, 94416707125248, 94416707133439,
+STORE, 94416707133440, 94416707137535,
+SNULL, 140555441541119, 140555441545215,
+STORE, 140555441537024, 140555441541119,
+STORE, 140555441541120, 140555441545215,
+ERASE, 140555441508352, 140555441537023,
+STORE, 94416724672512, 94416724807679,
+STORE, 94686636953600, 94686637166591,
+STORE, 94686639263744, 94686639267839,
+STORE, 94686639267840, 94686639276031,
+STORE, 94686639276032, 94686639288319,
+STORE, 94686662193152, 94686663163903,
+STORE, 140312944431104, 140312946089983,
+STORE, 140312946089984, 140312948187135,
+STORE, 140312948187136, 140312948203519,
+STORE, 140312948203520, 140312948211711,
+STORE, 140312948211712, 140312948228095,
+STORE, 140312948228096, 140312948240383,
+STORE, 140312948240384, 140312950333439,
+STORE, 140312950333440, 140312950337535,
+STORE, 140312950337536, 140312950341631,
+STORE, 140312950341632, 140312950484991,
+STORE, 140312950853632, 140312952537087,
+STORE, 140312952537088, 140312952553471,
+STORE, 140312952582144, 140312952586239,
+STORE, 140312952586240, 140312952590335,
+STORE, 140312952590336, 140312952594431,
+STORE, 140730598920192, 140730599059455,
+STORE, 140730599108608, 140730599120895,
+STORE, 140730599120896, 140730599124991,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726234079232, 140737488351231,
+SNULL, 140726234087423, 140737488351231,
+STORE, 140726234079232, 140726234087423,
+STORE, 140726233948160, 140726234087423,
+STORE, 94589467578368, 94589469802495,
+SNULL, 94589467688959, 94589469802495,
+STORE, 94589467578368, 94589467688959,
+STORE, 94589467688960, 94589469802495,
+ERASE, 94589467688960, 94589469802495,
+STORE, 94589469782016, 94589469794303,
+STORE, 94589469794304, 94589469802495,
+STORE, 140587082842112, 140587085094911,
+SNULL, 140587082985471, 140587085094911,
+STORE, 140587082842112, 140587082985471,
+STORE, 140587082985472, 140587085094911,
+ERASE, 140587082985472, 140587085094911,
+STORE, 140587085082624, 140587085090815,
+STORE, 140587085090816, 140587085094911,
+STORE, 140726234103808, 140726234107903,
+STORE, 140726234091520, 140726234103807,
+STORE, 140587085053952, 140587085082623,
+STORE, 140587085045760, 140587085053951,
+STORE, 140587079045120, 140587082842111,
+SNULL, 140587079045120, 140587080703999,
+STORE, 140587080704000, 140587082842111,
+STORE, 140587079045120, 140587080703999,
+SNULL, 140587082801151, 140587082842111,
+STORE, 140587080704000, 140587082801151,
+STORE, 140587082801152, 140587082842111,
+SNULL, 140587082801152, 140587082825727,
+STORE, 140587082825728, 140587082842111,
+STORE, 140587082801152, 140587082825727,
+ERASE, 140587082801152, 140587082825727,
+STORE, 140587082801152, 140587082825727,
+ERASE, 140587082825728, 140587082842111,
+STORE, 140587082825728, 140587082842111,
+SNULL, 140587082817535, 140587082825727,
+STORE, 140587082801152, 140587082817535,
+STORE, 140587082817536, 140587082825727,
+SNULL, 94589469790207, 94589469794303,
+STORE, 94589469782016, 94589469790207,
+STORE, 94589469790208, 94589469794303,
+SNULL, 140587085086719, 140587085090815,
+STORE, 140587085082624, 140587085086719,
+STORE, 140587085086720, 140587085090815,
+ERASE, 140587085053952, 140587085082623,
+STORE, 94589477507072, 94589477642239,
+STORE, 94225448325120, 94225448538111,
+STORE, 94225450635264, 94225450639359,
+STORE, 94225450639360, 94225450647551,
+STORE, 94225450647552, 94225450659839,
+STORE, 94225470246912, 94225473548287,
+STORE, 140199245496320, 140199247155199,
+STORE, 140199247155200, 140199249252351,
+STORE, 140199249252352, 140199249268735,
+STORE, 140199249268736, 140199249276927,
+STORE, 140199249276928, 140199249293311,
+STORE, 140199249293312, 140199249305599,
+STORE, 140199249305600, 140199251398655,
+STORE, 140199251398656, 140199251402751,
+STORE, 140199251402752, 140199251406847,
+STORE, 140199251406848, 140199251550207,
+STORE, 140199251918848, 140199253602303,
+STORE, 140199253602304, 140199253618687,
+STORE, 140199253647360, 140199253651455,
+STORE, 140199253651456, 140199253655551,
+STORE, 140199253655552, 140199253659647,
+STORE, 140726264414208, 140726264553471,
+STORE, 140726265843712, 140726265855999,
+STORE, 140726265856000, 140726265860095,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733508358144, 140737488351231,
+SNULL, 140733508366335, 140737488351231,
+STORE, 140733508358144, 140733508366335,
+STORE, 140733508227072, 140733508366335,
+STORE, 94766263947264, 94766266171391,
+SNULL, 94766264057855, 94766266171391,
+STORE, 94766263947264, 94766264057855,
+STORE, 94766264057856, 94766266171391,
+ERASE, 94766264057856, 94766266171391,
+STORE, 94766266150912, 94766266163199,
+STORE, 94766266163200, 94766266171391,
+STORE, 140693985132544, 140693987385343,
+SNULL, 140693985275903, 140693987385343,
+STORE, 140693985132544, 140693985275903,
+STORE, 140693985275904, 140693987385343,
+ERASE, 140693985275904, 140693987385343,
+STORE, 140693987373056, 140693987381247,
+STORE, 140693987381248, 140693987385343,
+STORE, 140733509939200, 140733509943295,
+STORE, 140733509926912, 140733509939199,
+STORE, 140693987344384, 140693987373055,
+STORE, 140693987336192, 140693987344383,
+STORE, 140693981335552, 140693985132543,
+SNULL, 140693981335552, 140693982994431,
+STORE, 140693982994432, 140693985132543,
+STORE, 140693981335552, 140693982994431,
+SNULL, 140693985091583, 140693985132543,
+STORE, 140693982994432, 140693985091583,
+STORE, 140693985091584, 140693985132543,
+SNULL, 140693985091584, 140693985116159,
+STORE, 140693985116160, 140693985132543,
+STORE, 140693985091584, 140693985116159,
+ERASE, 140693985091584, 140693985116159,
+STORE, 140693985091584, 140693985116159,
+ERASE, 140693985116160, 140693985132543,
+STORE, 140693985116160, 140693985132543,
+SNULL, 140693985107967, 140693985116159,
+STORE, 140693985091584, 140693985107967,
+STORE, 140693985107968, 140693985116159,
+SNULL, 94766266159103, 94766266163199,
+STORE, 94766266150912, 94766266159103,
+STORE, 94766266159104, 94766266163199,
+SNULL, 140693987377151, 140693987381247,
+STORE, 140693987373056, 140693987377151,
+STORE, 140693987377152, 140693987381247,
+ERASE, 140693987344384, 140693987373055,
+STORE, 94766282035200, 94766282170367,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724769353728, 140737488351231,
+SNULL, 140724769361919, 140737488351231,
+STORE, 140724769353728, 140724769361919,
+STORE, 140724769222656, 140724769361919,
+STORE, 94710460526592, 94710462750719,
+SNULL, 94710460637183, 94710462750719,
+STORE, 94710460526592, 94710460637183,
+STORE, 94710460637184, 94710462750719,
+ERASE, 94710460637184, 94710462750719,
+STORE, 94710462730240, 94710462742527,
+STORE, 94710462742528, 94710462750719,
+STORE, 140469764395008, 140469766647807,
+SNULL, 140469764538367, 140469766647807,
+STORE, 140469764395008, 140469764538367,
+STORE, 140469764538368, 140469766647807,
+ERASE, 140469764538368, 140469766647807,
+STORE, 140469766635520, 140469766643711,
+STORE, 140469766643712, 140469766647807,
+STORE, 140724770877440, 140724770881535,
+STORE, 140724770865152, 140724770877439,
+STORE, 140469766606848, 140469766635519,
+STORE, 140469766598656, 140469766606847,
+STORE, 140469760598016, 140469764395007,
+SNULL, 140469760598016, 140469762256895,
+STORE, 140469762256896, 140469764395007,
+STORE, 140469760598016, 140469762256895,
+SNULL, 140469764354047, 140469764395007,
+STORE, 140469762256896, 140469764354047,
+STORE, 140469764354048, 140469764395007,
+SNULL, 140469764354048, 140469764378623,
+STORE, 140469764378624, 140469764395007,
+STORE, 140469764354048, 140469764378623,
+ERASE, 140469764354048, 140469764378623,
+STORE, 140469764354048, 140469764378623,
+ERASE, 140469764378624, 140469764395007,
+STORE, 140469764378624, 140469764395007,
+SNULL, 140469764370431, 140469764378623,
+STORE, 140469764354048, 140469764370431,
+STORE, 140469764370432, 140469764378623,
+SNULL, 94710462738431, 94710462742527,
+STORE, 94710462730240, 94710462738431,
+STORE, 94710462738432, 94710462742527,
+SNULL, 140469766639615, 140469766643711,
+STORE, 140469766635520, 140469766639615,
+STORE, 140469766639616, 140469766643711,
+ERASE, 140469766606848, 140469766635519,
+STORE, 94710485581824, 94710485716991,
+STORE, 94105755795456, 94105756008447,
+STORE, 94105758105600, 94105758109695,
+STORE, 94105758109696, 94105758117887,
+STORE, 94105758117888, 94105758130175,
+STORE, 94105788981248, 94105794871295,
+STORE, 140641190031360, 140641191690239,
+STORE, 140641191690240, 140641193787391,
+STORE, 140641193787392, 140641193803775,
+STORE, 140641193803776, 140641193811967,
+STORE, 140641193811968, 140641193828351,
+STORE, 140641193828352, 140641193840639,
+STORE, 140641193840640, 140641195933695,
+STORE, 140641195933696, 140641195937791,
+STORE, 140641195937792, 140641195941887,
+STORE, 140641195941888, 140641196085247,
+STORE, 140641196453888, 140641198137343,
+STORE, 140641198137344, 140641198153727,
+STORE, 140641198182400, 140641198186495,
+STORE, 140641198186496, 140641198190591,
+STORE, 140641198190592, 140641198194687,
+STORE, 140731980034048, 140731980173311,
+STORE, 140731981078528, 140731981090815,
+STORE, 140731981090816, 140731981094911,
+STORE, 93828086431744, 93828086644735,
+STORE, 93828088741888, 93828088745983,
+STORE, 93828088745984, 93828088754175,
+STORE, 93828088754176, 93828088766463,
+STORE, 93828094193664, 93828096831487,
+STORE, 139844717334528, 139844718993407,
+STORE, 139844718993408, 139844721090559,
+STORE, 139844721090560, 139844721106943,
+STORE, 139844721106944, 139844721115135,
+STORE, 139844721115136, 139844721131519,
+STORE, 139844721131520, 139844721143807,
+STORE, 139844721143808, 139844723236863,
+STORE, 139844723236864, 139844723240959,
+STORE, 139844723240960, 139844723245055,
+STORE, 139844723245056, 139844723388415,
+STORE, 139844723757056, 139844725440511,
+STORE, 139844725440512, 139844725456895,
+STORE, 139844725485568, 139844725489663,
+STORE, 139844725489664, 139844725493759,
+STORE, 139844725493760, 139844725497855,
+STORE, 140729996185600, 140729996324863,
+STORE, 140729996828672, 140729996840959,
+STORE, 140729996840960, 140729996845055,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722494771200, 140737488351231,
+SNULL, 140722494775295, 140737488351231,
+STORE, 140722494771200, 140722494775295,
+STORE, 140722494640128, 140722494775295,
+STORE, 94324011311104, 94324013535231,
+SNULL, 94324011421695, 94324013535231,
+STORE, 94324011311104, 94324011421695,
+STORE, 94324011421696, 94324013535231,
+ERASE, 94324011421696, 94324013535231,
+STORE, 94324013514752, 94324013527039,
+STORE, 94324013527040, 94324013535231,
+STORE, 140151462309888, 140151464562687,
+SNULL, 140151462453247, 140151464562687,
+STORE, 140151462309888, 140151462453247,
+STORE, 140151462453248, 140151464562687,
+ERASE, 140151462453248, 140151464562687,
+STORE, 140151464550400, 140151464558591,
+STORE, 140151464558592, 140151464562687,
+STORE, 140722495467520, 140722495471615,
+STORE, 140722495455232, 140722495467519,
+STORE, 140151464521728, 140151464550399,
+STORE, 140151464513536, 140151464521727,
+STORE, 140151458512896, 140151462309887,
+SNULL, 140151458512896, 140151460171775,
+STORE, 140151460171776, 140151462309887,
+STORE, 140151458512896, 140151460171775,
+SNULL, 140151462268927, 140151462309887,
+STORE, 140151460171776, 140151462268927,
+STORE, 140151462268928, 140151462309887,
+SNULL, 140151462268928, 140151462293503,
+STORE, 140151462293504, 140151462309887,
+STORE, 140151462268928, 140151462293503,
+ERASE, 140151462268928, 140151462293503,
+STORE, 140151462268928, 140151462293503,
+ERASE, 140151462293504, 140151462309887,
+STORE, 140151462293504, 140151462309887,
+SNULL, 140151462285311, 140151462293503,
+STORE, 140151462268928, 140151462285311,
+STORE, 140151462285312, 140151462293503,
+SNULL, 94324013522943, 94324013527039,
+STORE, 94324013514752, 94324013522943,
+STORE, 94324013522944, 94324013527039,
+SNULL, 140151464554495, 140151464558591,
+STORE, 140151464550400, 140151464554495,
+STORE, 140151464554496, 140151464558591,
+ERASE, 140151464521728, 140151464550399,
+STORE, 94324024778752, 94324024913919,
+STORE, 94899262967808, 94899263180799,
+STORE, 94899265277952, 94899265282047,
+STORE, 94899265282048, 94899265290239,
+STORE, 94899265290240, 94899265302527,
+STORE, 94899295469568, 94899298689023,
+STORE, 140434388418560, 140434390077439,
+STORE, 140434390077440, 140434392174591,
+STORE, 140434392174592, 140434392190975,
+STORE, 140434392190976, 140434392199167,
+STORE, 140434392199168, 140434392215551,
+STORE, 140434392215552, 140434392227839,
+STORE, 140434392227840, 140434394320895,
+STORE, 140434394320896, 140434394324991,
+STORE, 140434394324992, 140434394329087,
+STORE, 140434394329088, 140434394472447,
+STORE, 140434394841088, 140434396524543,
+STORE, 140434396524544, 140434396540927,
+STORE, 140434396569600, 140434396573695,
+STORE, 140434396573696, 140434396577791,
+STORE, 140434396577792, 140434396581887,
+STORE, 140720618135552, 140720618274815,
+STORE, 140720618418176, 140720618430463,
+STORE, 140720618430464, 140720618434559,
+STORE, 94425529798656, 94425530011647,
+STORE, 94425532108800, 94425532112895,
+STORE, 94425532112896, 94425532121087,
+STORE, 94425532121088, 94425532133375,
+STORE, 94425557753856, 94425566576639,
+STORE, 140600528470016, 140600530128895,
+STORE, 140600530128896, 140600532226047,
+STORE, 140600532226048, 140600532242431,
+STORE, 140600532242432, 140600532250623,
+STORE, 140600532250624, 140600532267007,
+STORE, 140600532267008, 140600532279295,
+STORE, 140600532279296, 140600534372351,
+STORE, 140600534372352, 140600534376447,
+STORE, 140600534376448, 140600534380543,
+STORE, 140600534380544, 140600534523903,
+STORE, 140600534892544, 140600536575999,
+STORE, 140600536576000, 140600536592383,
+STORE, 140600536621056, 140600536625151,
+STORE, 140600536625152, 140600536629247,
+STORE, 140600536629248, 140600536633343,
+STORE, 140721857785856, 140721857925119,
+STORE, 140721858068480, 140721858080767,
+STORE, 140721858080768, 140721858084863,
+STORE, 94425529798656, 94425530011647,
+STORE, 94425532108800, 94425532112895,
+STORE, 94425532112896, 94425532121087,
+STORE, 94425532121088, 94425532133375,
+STORE, 94425557753856, 94425568772095,
+STORE, 140600528470016, 140600530128895,
+STORE, 140600530128896, 140600532226047,
+STORE, 140600532226048, 140600532242431,
+STORE, 140600532242432, 140600532250623,
+STORE, 140600532250624, 140600532267007,
+STORE, 140600532267008, 140600532279295,
+STORE, 140600532279296, 140600534372351,
+STORE, 140600534372352, 140600534376447,
+STORE, 140600534376448, 140600534380543,
+STORE, 140600534380544, 140600534523903,
+STORE, 140600534892544, 140600536575999,
+STORE, 140600536576000, 140600536592383,
+STORE, 140600536621056, 140600536625151,
+STORE, 140600536625152, 140600536629247,
+STORE, 140600536629248, 140600536633343,
+STORE, 140721857785856, 140721857925119,
+STORE, 140721858068480, 140721858080767,
+STORE, 140721858080768, 140721858084863,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735611645952, 140737488351231,
+SNULL, 140735611654143, 140737488351231,
+STORE, 140735611645952, 140735611654143,
+STORE, 140735611514880, 140735611654143,
+STORE, 94592137641984, 94592139866111,
+SNULL, 94592137752575, 94592139866111,
+STORE, 94592137641984, 94592137752575,
+STORE, 94592137752576, 94592139866111,
+ERASE, 94592137752576, 94592139866111,
+STORE, 94592139845632, 94592139857919,
+STORE, 94592139857920, 94592139866111,
+STORE, 140350425030656, 140350427283455,
+SNULL, 140350425174015, 140350427283455,
+STORE, 140350425030656, 140350425174015,
+STORE, 140350425174016, 140350427283455,
+ERASE, 140350425174016, 140350427283455,
+STORE, 140350427271168, 140350427279359,
+STORE, 140350427279360, 140350427283455,
+STORE, 140735612043264, 140735612047359,
+STORE, 140735612030976, 140735612043263,
+STORE, 140350427242496, 140350427271167,
+STORE, 140350427234304, 140350427242495,
+STORE, 140350421233664, 140350425030655,
+SNULL, 140350421233664, 140350422892543,
+STORE, 140350422892544, 140350425030655,
+STORE, 140350421233664, 140350422892543,
+SNULL, 140350424989695, 140350425030655,
+STORE, 140350422892544, 140350424989695,
+STORE, 140350424989696, 140350425030655,
+SNULL, 140350424989696, 140350425014271,
+STORE, 140350425014272, 140350425030655,
+STORE, 140350424989696, 140350425014271,
+ERASE, 140350424989696, 140350425014271,
+STORE, 140350424989696, 140350425014271,
+ERASE, 140350425014272, 140350425030655,
+STORE, 140350425014272, 140350425030655,
+SNULL, 140350425006079, 140350425014271,
+STORE, 140350424989696, 140350425006079,
+STORE, 140350425006080, 140350425014271,
+SNULL, 94592139853823, 94592139857919,
+STORE, 94592139845632, 94592139853823,
+STORE, 94592139853824, 94592139857919,
+SNULL, 140350427275263, 140350427279359,
+STORE, 140350427271168, 140350427275263,
+STORE, 140350427275264, 140350427279359,
+ERASE, 140350427242496, 140350427271167,
+STORE, 94592164823040, 94592164958207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723500535808, 140737488351231,
+SNULL, 140723500543999, 140737488351231,
+STORE, 140723500535808, 140723500543999,
+STORE, 140723500404736, 140723500543999,
+STORE, 94458379010048, 94458381234175,
+SNULL, 94458379120639, 94458381234175,
+STORE, 94458379010048, 94458379120639,
+STORE, 94458379120640, 94458381234175,
+ERASE, 94458379120640, 94458381234175,
+STORE, 94458381213696, 94458381225983,
+STORE, 94458381225984, 94458381234175,
+STORE, 139771674230784, 139771676483583,
+SNULL, 139771674374143, 139771676483583,
+STORE, 139771674230784, 139771674374143,
+STORE, 139771674374144, 139771676483583,
+ERASE, 139771674374144, 139771676483583,
+STORE, 139771676471296, 139771676479487,
+STORE, 139771676479488, 139771676483583,
+STORE, 140723500769280, 140723500773375,
+STORE, 140723500756992, 140723500769279,
+STORE, 139771676442624, 139771676471295,
+STORE, 139771676434432, 139771676442623,
+STORE, 139771670433792, 139771674230783,
+SNULL, 139771670433792, 139771672092671,
+STORE, 139771672092672, 139771674230783,
+STORE, 139771670433792, 139771672092671,
+SNULL, 139771674189823, 139771674230783,
+STORE, 139771672092672, 139771674189823,
+STORE, 139771674189824, 139771674230783,
+SNULL, 139771674189824, 139771674214399,
+STORE, 139771674214400, 139771674230783,
+STORE, 139771674189824, 139771674214399,
+ERASE, 139771674189824, 139771674214399,
+STORE, 139771674189824, 139771674214399,
+ERASE, 139771674214400, 139771674230783,
+STORE, 139771674214400, 139771674230783,
+SNULL, 139771674206207, 139771674214399,
+STORE, 139771674189824, 139771674206207,
+STORE, 139771674206208, 139771674214399,
+SNULL, 94458381221887, 94458381225983,
+STORE, 94458381213696, 94458381221887,
+STORE, 94458381221888, 94458381225983,
+SNULL, 139771676475391, 139771676479487,
+STORE, 139771676471296, 139771676475391,
+STORE, 139771676475392, 139771676479487,
+ERASE, 139771676442624, 139771676471295,
+STORE, 94458401873920, 94458402009087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731316264960, 140737488351231,
+SNULL, 140731316273151, 140737488351231,
+STORE, 140731316264960, 140731316273151,
+STORE, 140731316133888, 140731316273151,
+STORE, 94437830881280, 94437833215999,
+SNULL, 94437831094271, 94437833215999,
+STORE, 94437830881280, 94437831094271,
+STORE, 94437831094272, 94437833215999,
+ERASE, 94437831094272, 94437833215999,
+STORE, 94437833191424, 94437833203711,
+STORE, 94437833203712, 94437833215999,
+STORE, 140265986031616, 140265988284415,
+SNULL, 140265986174975, 140265988284415,
+STORE, 140265986031616, 140265986174975,
+STORE, 140265986174976, 140265988284415,
+ERASE, 140265986174976, 140265988284415,
+STORE, 140265988272128, 140265988280319,
+STORE, 140265988280320, 140265988284415,
+STORE, 140731316318208, 140731316322303,
+STORE, 140731316305920, 140731316318207,
+STORE, 140265988243456, 140265988272127,
+STORE, 140265988235264, 140265988243455,
+STORE, 140265983918080, 140265986031615,
+SNULL, 140265983918080, 140265983930367,
+STORE, 140265983930368, 140265986031615,
+STORE, 140265983918080, 140265983930367,
+SNULL, 140265986023423, 140265986031615,
+STORE, 140265983930368, 140265986023423,
+STORE, 140265986023424, 140265986031615,
+ERASE, 140265986023424, 140265986031615,
+STORE, 140265986023424, 140265986031615,
+STORE, 140265980121088, 140265983918079,
+SNULL, 140265980121088, 140265981779967,
+STORE, 140265981779968, 140265983918079,
+STORE, 140265980121088, 140265981779967,
+SNULL, 140265983877119, 140265983918079,
+STORE, 140265981779968, 140265983877119,
+STORE, 140265983877120, 140265983918079,
+SNULL, 140265983877120, 140265983901695,
+STORE, 140265983901696, 140265983918079,
+STORE, 140265983877120, 140265983901695,
+ERASE, 140265983877120, 140265983901695,
+STORE, 140265983877120, 140265983901695,
+ERASE, 140265983901696, 140265983918079,
+STORE, 140265983901696, 140265983918079,
+STORE, 140265988227072, 140265988243455,
+SNULL, 140265983893503, 140265983901695,
+STORE, 140265983877120, 140265983893503,
+STORE, 140265983893504, 140265983901695,
+SNULL, 140265986027519, 140265986031615,
+STORE, 140265986023424, 140265986027519,
+STORE, 140265986027520, 140265986031615,
+SNULL, 94437833195519, 94437833203711,
+STORE, 94437833191424, 94437833195519,
+STORE, 94437833195520, 94437833203711,
+SNULL, 140265988276223, 140265988280319,
+STORE, 140265988272128, 140265988276223,
+STORE, 140265988276224, 140265988280319,
+ERASE, 140265988243456, 140265988272127,
+STORE, 94437847638016, 94437847773183,
+STORE, 140265986543616, 140265988227071,
+STORE, 94437847638016, 94437847908351,
+STORE, 94437847638016, 94437848043519,
+STORE, 94437847638016, 94437848190975,
+SNULL, 94437848178687, 94437848190975,
+STORE, 94437847638016, 94437848178687,
+STORE, 94437848178688, 94437848190975,
+ERASE, 94437848178688, 94437848190975,
+STORE, 94437847638016, 94437848330239,
+STORE, 94437847638016, 94437848465407,
+SNULL, 94437848444927, 94437848465407,
+STORE, 94437847638016, 94437848444927,
+STORE, 94437848444928, 94437848465407,
+ERASE, 94437848444928, 94437848465407,
+STORE, 94437847638016, 94437848584191,
+STORE, 94437847638016, 94437848719359,
+SNULL, 94437848678399, 94437848719359,
+STORE, 94437847638016, 94437848678399,
+STORE, 94437848678400, 94437848719359,
+ERASE, 94437848678400, 94437848719359,
+STORE, 94437847638016, 94437848842239,
+SNULL, 94437848825855, 94437848842239,
+STORE, 94437847638016, 94437848825855,
+STORE, 94437848825856, 94437848842239,
+ERASE, 94437848825856, 94437848842239,
+STORE, 94437847638016, 94437848961023,
+STORE, 94437847638016, 94437849096191,
+STORE, 94661814710272, 94661814923263,
+STORE, 94661817020416, 94661817024511,
+STORE, 94661817024512, 94661817032703,
+STORE, 94661817032704, 94661817044991,
+STORE, 94661840424960, 94661841240063,
+STORE, 140582259814400, 140582261473279,
+STORE, 140582261473280, 140582263570431,
+STORE, 140582263570432, 140582263586815,
+STORE, 140582263586816, 140582263595007,
+STORE, 140582263595008, 140582263611391,
+STORE, 140582263611392, 140582263623679,
+STORE, 140582263623680, 140582265716735,
+STORE, 140582265716736, 140582265720831,
+STORE, 140582265720832, 140582265724927,
+STORE, 140582265724928, 140582265868287,
+STORE, 140582266236928, 140582267920383,
+STORE, 140582267920384, 140582267936767,
+STORE, 140582267965440, 140582267969535,
+STORE, 140582267969536, 140582267973631,
+STORE, 140582267973632, 140582267977727,
+STORE, 140735472508928, 140735472648191,
+STORE, 140735472672768, 140735472685055,
+STORE, 140735472685056, 140735472689151,
+STORE, 94440069140480, 94440069353471,
+STORE, 94440071450624, 94440071454719,
+STORE, 94440071454720, 94440071462911,
+STORE, 94440071462912, 94440071475199,
+STORE, 94440072122368, 94440079048703,
+STORE, 140112218095616, 140112219754495,
+STORE, 140112219754496, 140112221851647,
+STORE, 140112221851648, 140112221868031,
+STORE, 140112221868032, 140112221876223,
+STORE, 140112221876224, 140112221892607,
+STORE, 140112221892608, 140112221904895,
+STORE, 140112221904896, 140112223997951,
+STORE, 140112223997952, 140112224002047,
+STORE, 140112224002048, 140112224006143,
+STORE, 140112224006144, 140112224149503,
+STORE, 140112224518144, 140112226201599,
+STORE, 140112226201600, 140112226217983,
+STORE, 140112226246656, 140112226250751,
+STORE, 140112226250752, 140112226254847,
+STORE, 140112226254848, 140112226258943,
+STORE, 140737460969472, 140737461108735,
+STORE, 140737462083584, 140737462095871,
+STORE, 140737462095872, 140737462099967,
+STORE, 94257654345728, 94257654390783,
+STORE, 94257656483840, 94257656487935,
+STORE, 94257656487936, 94257656492031,
+STORE, 94257656492032, 94257656496127,
+STORE, 94257665859584, 94257665994751,
+STORE, 140507070345216, 140507070386175,
+STORE, 140507070386176, 140507072483327,
+STORE, 140507072483328, 140507072487423,
+STORE, 140507072487424, 140507072491519,
+STORE, 140507072491520, 140507072516095,
+STORE, 140507072516096, 140507072561151,
+STORE, 140507072561152, 140507074654207,
+STORE, 140507074654208, 140507074658303,
+STORE, 140507074658304, 140507074662399,
+STORE, 140507074662400, 140507074744319,
+STORE, 140507074744320, 140507076841471,
+STORE, 140507076841472, 140507076845567,
+STORE, 140507076845568, 140507076849663,
+STORE, 140507076849664, 140507076857855,
+STORE, 140507076857856, 140507076886527,
+STORE, 140507076886528, 140507078979583,
+STORE, 140507078979584, 140507078983679,
+STORE, 140507078983680, 140507078987775,
+STORE, 140507078987776, 140507079086079,
+STORE, 140507079086080, 140507081179135,
+STORE, 140507081179136, 140507081183231,
+STORE, 140507081183232, 140507081187327,
+STORE, 140507081187328, 140507081203711,
+STORE, 140507081203712, 140507081220095,
+STORE, 140507081220096, 140507083317247,
+STORE, 140507083317248, 140507083321343,
+STORE, 140507083321344, 140507083325439,
+STORE, 140507083325440, 140507083792383,
+STORE, 140507083792384, 140507085885439,
+STORE, 140507085885440, 140507085889535,
+STORE, 140507085889536, 140507085893631,
+STORE, 140507085893632, 140507085905919,
+STORE, 140507085905920, 140507087998975,
+STORE, 140507087998976, 140507088003071,
+STORE, 140507088003072, 140507088007167,
+STORE, 140507088007168, 140507088125951,
+STORE, 140507088125952, 140507090219007,
+STORE, 140507090219008, 140507090223103,
+STORE, 140507090223104, 140507090227199,
+STORE, 140507090227200, 140507090268159,
+STORE, 140507090268160, 140507091927039,
+STORE, 140507091927040, 140507094024191,
+STORE, 140507094024192, 140507094040575,
+STORE, 140507094040576, 140507094048767,
+STORE, 140507094048768, 140507094065151,
+STORE, 140507094065152, 140507094216703,
+STORE, 140507094216704, 140507096309759,
+STORE, 140507096309760, 140507096313855,
+STORE, 140507096313856, 140507096317951,
+STORE, 140507096317952, 140507096326143,
+STORE, 140507096326144, 140507096379391,
+STORE, 140507096379392, 140507098472447,
+STORE, 140507098472448, 140507098476543,
+STORE, 140507098476544, 140507098480639,
+STORE, 140507098480640, 140507098623999,
+STORE, 140507098980352, 140507100663807,
+STORE, 140507100663808, 140507100692479,
+STORE, 140507100721152, 140507100725247,
+STORE, 140507100725248, 140507100729343,
+STORE, 140507100729344, 140507100733439,
+STORE, 140728152780800, 140728152915967,
+STORE, 140728153698304, 140728153710591,
+STORE, 140728153710592, 140728153714687,
+STORE, 140507068137472, 140507070345215,
+SNULL, 140507068137472, 140507068190719,
+STORE, 140507068190720, 140507070345215,
+STORE, 140507068137472, 140507068190719,
+SNULL, 140507070287871, 140507070345215,
+STORE, 140507068190720, 140507070287871,
+STORE, 140507070287872, 140507070345215,
+SNULL, 140507070287872, 140507070296063,
+STORE, 140507070296064, 140507070345215,
+STORE, 140507070287872, 140507070296063,
+ERASE, 140507070287872, 140507070296063,
+STORE, 140507070287872, 140507070296063,
+ERASE, 140507070296064, 140507070345215,
+STORE, 140507070296064, 140507070345215,
+STORE, 140507100692480, 140507100721151,
+STORE, 140507065810944, 140507068137471,
+SNULL, 140507065810944, 140507065843711,
+STORE, 140507065843712, 140507068137471,
+STORE, 140507065810944, 140507065843711,
+SNULL, 140507067940863, 140507068137471,
+STORE, 140507065843712, 140507067940863,
+STORE, 140507067940864, 140507068137471,
+SNULL, 140507067940864, 140507067949055,
+STORE, 140507067949056, 140507068137471,
+STORE, 140507067940864, 140507067949055,
+ERASE, 140507067940864, 140507067949055,
+STORE, 140507067940864, 140507067949055,
+ERASE, 140507067949056, 140507068137471,
+STORE, 140507067949056, 140507068137471,
+SNULL, 140507067944959, 140507067949055,
+STORE, 140507067940864, 140507067944959,
+STORE, 140507067944960, 140507067949055,
+SNULL, 140507070291967, 140507070296063,
+STORE, 140507070287872, 140507070291967,
+STORE, 140507070291968, 140507070296063,
+ERASE, 140507100692480, 140507100721151,
+STORE, 140507063705600, 140507065810943,
+SNULL, 140507063705600, 140507063709695,
+STORE, 140507063709696, 140507065810943,
+STORE, 140507063705600, 140507063709695,
+SNULL, 140507065802751, 140507065810943,
+STORE, 140507063709696, 140507065802751,
+STORE, 140507065802752, 140507065810943,
+ERASE, 140507065802752, 140507065810943,
+STORE, 140507065802752, 140507065810943,
+SNULL, 140507065806847, 140507065810943,
+STORE, 140507065802752, 140507065806847,
+STORE, 140507065806848, 140507065810943,
+STORE, 140507061600256, 140507063705599,
+SNULL, 140507061600256, 140507061604351,
+STORE, 140507061604352, 140507063705599,
+STORE, 140507061600256, 140507061604351,
+SNULL, 140507063697407, 140507063705599,
+STORE, 140507061604352, 140507063697407,
+STORE, 140507063697408, 140507063705599,
+ERASE, 140507063697408, 140507063705599,
+STORE, 140507063697408, 140507063705599,
+SNULL, 140507063701503, 140507063705599,
+STORE, 140507063697408, 140507063701503,
+STORE, 140507063701504, 140507063705599,
+STORE, 140507059490816, 140507061600255,
+SNULL, 140507059490816, 140507059499007,
+STORE, 140507059499008, 140507061600255,
+STORE, 140507059490816, 140507059499007,
+SNULL, 140507061592063, 140507061600255,
+STORE, 140507059499008, 140507061592063,
+STORE, 140507061592064, 140507061600255,
+ERASE, 140507061592064, 140507061600255,
+STORE, 140507061592064, 140507061600255,
+SNULL, 140507061596159, 140507061600255,
+STORE, 140507061592064, 140507061596159,
+STORE, 140507061596160, 140507061600255,
+STORE, 140507057377280, 140507059490815,
+SNULL, 140507057377280, 140507057389567,
+STORE, 140507057389568, 140507059490815,
+STORE, 140507057377280, 140507057389567,
+SNULL, 140507059482623, 140507059490815,
+STORE, 140507057389568, 140507059482623,
+STORE, 140507059482624, 140507059490815,
+ERASE, 140507059482624, 140507059490815,
+STORE, 140507059482624, 140507059490815,
+SNULL, 140507059486719, 140507059490815,
+STORE, 140507059482624, 140507059486719,
+STORE, 140507059486720, 140507059490815,
+STORE, 140507055255552, 140507057377279,
+SNULL, 140507055255552, 140507055276031,
+STORE, 140507055276032, 140507057377279,
+STORE, 140507055255552, 140507055276031,
+SNULL, 140507057369087, 140507057377279,
+STORE, 140507055276032, 140507057369087,
+STORE, 140507057369088, 140507057377279,
+ERASE, 140507057369088, 140507057377279,
+STORE, 140507057369088, 140507057377279,
+SNULL, 140507057373183, 140507057377279,
+STORE, 140507057369088, 140507057373183,
+STORE, 140507057373184, 140507057377279,
+STORE, 140507098693632, 140507098980351,
+SNULL, 140507098959871, 140507098980351,
+STORE, 140507098693632, 140507098959871,
+STORE, 140507098959872, 140507098980351,
+SNULL, 140507098959872, 140507098976255,
+STORE, 140507098976256, 140507098980351,
+STORE, 140507098959872, 140507098976255,
+ERASE, 140507098959872, 140507098976255,
+STORE, 140507098959872, 140507098976255,
+ERASE, 140507098976256, 140507098980351,
+STORE, 140507098976256, 140507098980351,
+STORE, 140507100692480, 140507100721151,
+STORE, 140507053125632, 140507055255551,
+SNULL, 140507053125632, 140507053154303,
+STORE, 140507053154304, 140507055255551,
+STORE, 140507053125632, 140507053154303,
+SNULL, 140507055247359, 140507055255551,
+STORE, 140507053154304, 140507055247359,
+STORE, 140507055247360, 140507055255551,
+ERASE, 140507055247360, 140507055255551,
+STORE, 140507055247360, 140507055255551,
+STORE, 140507051012096, 140507053125631,
+SNULL, 140507051012096, 140507051024383,
+STORE, 140507051024384, 140507053125631,
+STORE, 140507051012096, 140507051024383,
+SNULL, 140507053117439, 140507053125631,
+STORE, 140507051024384, 140507053117439,
+STORE, 140507053117440, 140507053125631,
+ERASE, 140507053117440, 140507053125631,
+STORE, 140507053117440, 140507053125631,
+SNULL, 140507053121535, 140507053125631,
+STORE, 140507053117440, 140507053121535,
+STORE, 140507053121536, 140507053125631,
+SNULL, 140507055251455, 140507055255551,
+STORE, 140507055247360, 140507055251455,
+STORE, 140507055251456, 140507055255551,
+SNULL, 140507098972159, 140507098976255,
+STORE, 140507098959872, 140507098972159,
+STORE, 140507098972160, 140507098976255,
+ERASE, 140507100692480, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100692480, 140507100721151,
+ERASE, 140507068137472, 140507068190719,
+ERASE, 140507068190720, 140507070287871,
+ERASE, 140507070287872, 140507070291967,
+ERASE, 140507070291968, 140507070296063,
+ERASE, 140507070296064, 140507070345215,
+ERASE, 140507065810944, 140507065843711,
+ERASE, 140507065843712, 140507067940863,
+ERASE, 140507067940864, 140507067944959,
+ERASE, 140507067944960, 140507067949055,
+ERASE, 140507067949056, 140507068137471,
+ERASE, 140507063705600, 140507063709695,
+ERASE, 140507063709696, 140507065802751,
+ERASE, 140507065802752, 140507065806847,
+ERASE, 140507065806848, 140507065810943,
+ERASE, 140507061600256, 140507061604351,
+ERASE, 140507061604352, 140507063697407,
+ERASE, 140507063697408, 140507063701503,
+ERASE, 140507063701504, 140507063705599,
+ERASE, 140507059490816, 140507059499007,
+ERASE, 140507059499008, 140507061592063,
+ERASE, 140507061592064, 140507061596159,
+ERASE, 140507061596160, 140507061600255,
+ERASE, 140507057377280, 140507057389567,
+ERASE, 140507057389568, 140507059482623,
+ERASE, 140507059482624, 140507059486719,
+ERASE, 140507059486720, 140507059490815,
+ERASE, 140507055255552, 140507055276031,
+ERASE, 140507055276032, 140507057369087,
+ERASE, 140507057369088, 140507057373183,
+ERASE, 140507057373184, 140507057377279,
+ERASE, 140507098693632, 140507098959871,
+ERASE, 140507098959872, 140507098972159,
+ERASE, 140507098972160, 140507098976255,
+ERASE, 140507098976256, 140507098980351,
+ERASE, 140507051012096, 140507051024383,
+ERASE, 140507051024384, 140507053117439,
+ERASE, 140507053117440, 140507053121535,
+ERASE, 140507053121536, 140507053125631,
+STORE, 94036448296960, 94036448509951,
+STORE, 94036450607104, 94036450611199,
+STORE, 94036450611200, 94036450619391,
+STORE, 94036450619392, 94036450631679,
+STORE, 94036482445312, 94036502376447,
+STORE, 140469487013888, 140469488672767,
+STORE, 140469488672768, 140469490769919,
+STORE, 140469490769920, 140469490786303,
+STORE, 140469490786304, 140469490794495,
+STORE, 140469490794496, 140469490810879,
+STORE, 140469490810880, 140469490823167,
+STORE, 140469490823168, 140469492916223,
+STORE, 140469492916224, 140469492920319,
+STORE, 140469492920320, 140469492924415,
+STORE, 140469492924416, 140469493067775,
+STORE, 140469493436416, 140469495119871,
+STORE, 140469495119872, 140469495136255,
+STORE, 140469495164928, 140469495169023,
+STORE, 140469495169024, 140469495173119,
+STORE, 140469495173120, 140469495177215,
+STORE, 140732281446400, 140732281585663,
+STORE, 140732282736640, 140732282748927,
+STORE, 140732282748928, 140732282753023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723411931136, 140737488351231,
+SNULL, 140723411939327, 140737488351231,
+STORE, 140723411931136, 140723411939327,
+STORE, 140723411800064, 140723411939327,
+STORE, 93993768685568, 93993770909695,
+SNULL, 93993768796159, 93993770909695,
+STORE, 93993768685568, 93993768796159,
+STORE, 93993768796160, 93993770909695,
+ERASE, 93993768796160, 93993770909695,
+STORE, 93993770889216, 93993770901503,
+STORE, 93993770901504, 93993770909695,
+STORE, 140508681740288, 140508683993087,
+SNULL, 140508681883647, 140508683993087,
+STORE, 140508681740288, 140508681883647,
+STORE, 140508681883648, 140508683993087,
+ERASE, 140508681883648, 140508683993087,
+STORE, 140508683980800, 140508683988991,
+STORE, 140508683988992, 140508683993087,
+STORE, 140723412070400, 140723412074495,
+STORE, 140723412058112, 140723412070399,
+STORE, 140508683952128, 140508683980799,
+STORE, 140508683943936, 140508683952127,
+STORE, 140508677943296, 140508681740287,
+SNULL, 140508677943296, 140508679602175,
+STORE, 140508679602176, 140508681740287,
+STORE, 140508677943296, 140508679602175,
+SNULL, 140508681699327, 140508681740287,
+STORE, 140508679602176, 140508681699327,
+STORE, 140508681699328, 140508681740287,
+SNULL, 140508681699328, 140508681723903,
+STORE, 140508681723904, 140508681740287,
+STORE, 140508681699328, 140508681723903,
+ERASE, 140508681699328, 140508681723903,
+STORE, 140508681699328, 140508681723903,
+ERASE, 140508681723904, 140508681740287,
+STORE, 140508681723904, 140508681740287,
+SNULL, 140508681715711, 140508681723903,
+STORE, 140508681699328, 140508681715711,
+STORE, 140508681715712, 140508681723903,
+SNULL, 93993770897407, 93993770901503,
+STORE, 93993770889216, 93993770897407,
+STORE, 93993770897408, 93993770901503,
+SNULL, 140508683984895, 140508683988991,
+STORE, 140508683980800, 140508683984895,
+STORE, 140508683984896, 140508683988991,
+ERASE, 140508683952128, 140508683980799,
+STORE, 93993791582208, 93993791717375,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734685458432, 140737488351231,
+SNULL, 140734685466623, 140737488351231,
+STORE, 140734685458432, 140734685466623,
+STORE, 140734685327360, 140734685466623,
+STORE, 93832321548288, 93832323772415,
+SNULL, 93832321658879, 93832323772415,
+STORE, 93832321548288, 93832321658879,
+STORE, 93832321658880, 93832323772415,
+ERASE, 93832321658880, 93832323772415,
+STORE, 93832323751936, 93832323764223,
+STORE, 93832323764224, 93832323772415,
+STORE, 140650945118208, 140650947371007,
+SNULL, 140650945261567, 140650947371007,
+STORE, 140650945118208, 140650945261567,
+STORE, 140650945261568, 140650947371007,
+ERASE, 140650945261568, 140650947371007,
+STORE, 140650947358720, 140650947366911,
+STORE, 140650947366912, 140650947371007,
+STORE, 140734686081024, 140734686085119,
+STORE, 140734686068736, 140734686081023,
+STORE, 140650947330048, 140650947358719,
+STORE, 140650947321856, 140650947330047,
+STORE, 140650941321216, 140650945118207,
+SNULL, 140650941321216, 140650942980095,
+STORE, 140650942980096, 140650945118207,
+STORE, 140650941321216, 140650942980095,
+SNULL, 140650945077247, 140650945118207,
+STORE, 140650942980096, 140650945077247,
+STORE, 140650945077248, 140650945118207,
+SNULL, 140650945077248, 140650945101823,
+STORE, 140650945101824, 140650945118207,
+STORE, 140650945077248, 140650945101823,
+ERASE, 140650945077248, 140650945101823,
+STORE, 140650945077248, 140650945101823,
+ERASE, 140650945101824, 140650945118207,
+STORE, 140650945101824, 140650945118207,
+SNULL, 140650945093631, 140650945101823,
+STORE, 140650945077248, 140650945093631,
+STORE, 140650945093632, 140650945101823,
+SNULL, 93832323760127, 93832323764223,
+STORE, 93832323751936, 93832323760127,
+STORE, 93832323760128, 93832323764223,
+SNULL, 140650947362815, 140650947366911,
+STORE, 140650947358720, 140650947362815,
+STORE, 140650947362816, 140650947366911,
+ERASE, 140650947330048, 140650947358719,
+STORE, 93832331890688, 93832332025855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728333520896, 140737488351231,
+SNULL, 140728333529087, 140737488351231,
+STORE, 140728333520896, 140728333529087,
+STORE, 140728333389824, 140728333529087,
+STORE, 94872734732288, 94872736956415,
+SNULL, 94872734842879, 94872736956415,
+STORE, 94872734732288, 94872734842879,
+STORE, 94872734842880, 94872736956415,
+ERASE, 94872734842880, 94872736956415,
+STORE, 94872736935936, 94872736948223,
+STORE, 94872736948224, 94872736956415,
+STORE, 139755193257984, 139755195510783,
+SNULL, 139755193401343, 139755195510783,
+STORE, 139755193257984, 139755193401343,
+STORE, 139755193401344, 139755195510783,
+ERASE, 139755193401344, 139755195510783,
+STORE, 139755195498496, 139755195506687,
+STORE, 139755195506688, 139755195510783,
+STORE, 140728333926400, 140728333930495,
+STORE, 140728333914112, 140728333926399,
+STORE, 139755195469824, 139755195498495,
+STORE, 139755195461632, 139755195469823,
+STORE, 139755189460992, 139755193257983,
+SNULL, 139755189460992, 139755191119871,
+STORE, 139755191119872, 139755193257983,
+STORE, 139755189460992, 139755191119871,
+SNULL, 139755193217023, 139755193257983,
+STORE, 139755191119872, 139755193217023,
+STORE, 139755193217024, 139755193257983,
+SNULL, 139755193217024, 139755193241599,
+STORE, 139755193241600, 139755193257983,
+STORE, 139755193217024, 139755193241599,
+ERASE, 139755193217024, 139755193241599,
+STORE, 139755193217024, 139755193241599,
+ERASE, 139755193241600, 139755193257983,
+STORE, 139755193241600, 139755193257983,
+SNULL, 139755193233407, 139755193241599,
+STORE, 139755193217024, 139755193233407,
+STORE, 139755193233408, 139755193241599,
+SNULL, 94872736944127, 94872736948223,
+STORE, 94872736935936, 94872736944127,
+STORE, 94872736944128, 94872736948223,
+SNULL, 139755195502591, 139755195506687,
+STORE, 139755195498496, 139755195502591,
+STORE, 139755195502592, 139755195506687,
+ERASE, 139755195469824, 139755195498495,
+STORE, 94872749744128, 94872749879295,
+STORE, 94720243642368, 94720243855359,
+STORE, 94720245952512, 94720245956607,
+STORE, 94720245956608, 94720245964799,
+STORE, 94720245964800, 94720245977087,
+STORE, 94720277745664, 94720278151167,
+STORE, 140453174497280, 140453176156159,
+STORE, 140453176156160, 140453178253311,
+STORE, 140453178253312, 140453178269695,
+STORE, 140453178269696, 140453178277887,
+STORE, 140453178277888, 140453178294271,
+STORE, 140453178294272, 140453178306559,
+STORE, 140453178306560, 140453180399615,
+STORE, 140453180399616, 140453180403711,
+STORE, 140453180403712, 140453180407807,
+STORE, 140453180407808, 140453180551167,
+STORE, 140453180919808, 140453182603263,
+STORE, 140453182603264, 140453182619647,
+STORE, 140453182648320, 140453182652415,
+STORE, 140453182652416, 140453182656511,
+STORE, 140453182656512, 140453182660607,
+STORE, 140733223923712, 140733224062975,
+STORE, 140733224808448, 140733224820735,
+STORE, 140733224820736, 140733224824831,
+STORE, 94321091141632, 94321091354623,
+STORE, 94321093451776, 94321093455871,
+STORE, 94321093455872, 94321093464063,
+STORE, 94321093464064, 94321093476351,
+STORE, 94321115873280, 94321117229055,
+STORE, 139695978840064, 139695980498943,
+STORE, 139695980498944, 139695982596095,
+STORE, 139695982596096, 139695982612479,
+STORE, 139695982612480, 139695982620671,
+STORE, 139695982620672, 139695982637055,
+STORE, 139695982637056, 139695982649343,
+STORE, 139695982649344, 139695984742399,
+STORE, 139695984742400, 139695984746495,
+STORE, 139695984746496, 139695984750591,
+STORE, 139695984750592, 139695984893951,
+STORE, 139695985262592, 139695986946047,
+STORE, 139695986946048, 139695986962431,
+STORE, 139695986991104, 139695986995199,
+STORE, 139695986995200, 139695986999295,
+STORE, 139695986999296, 139695987003391,
+STORE, 140734650564608, 140734650703871,
+STORE, 140734650785792, 140734650798079,
+STORE, 140734650798080, 140734650802175,
+STORE, 94523438456832, 94523438669823,
+STORE, 94523440766976, 94523440771071,
+STORE, 94523440771072, 94523440779263,
+STORE, 94523440779264, 94523440791551,
+STORE, 94523464544256, 94523465842687,
+STORE, 140453231493120, 140453233151999,
+STORE, 140453233152000, 140453235249151,
+STORE, 140453235249152, 140453235265535,
+STORE, 140453235265536, 140453235273727,
+STORE, 140453235273728, 140453235290111,
+STORE, 140453235290112, 140453235302399,
+STORE, 140453235302400, 140453237395455,
+STORE, 140453237395456, 140453237399551,
+STORE, 140453237399552, 140453237403647,
+STORE, 140453237403648, 140453237547007,
+STORE, 140453237915648, 140453239599103,
+STORE, 140453239599104, 140453239615487,
+STORE, 140453239644160, 140453239648255,
+STORE, 140453239648256, 140453239652351,
+STORE, 140453239652352, 140453239656447,
+STORE, 140734679445504, 140734679584767,
+STORE, 140734680018944, 140734680031231,
+STORE, 140734680031232, 140734680035327,
+STORE, 94614776987648, 94614777200639,
+STORE, 94614779297792, 94614779301887,
+STORE, 94614779301888, 94614779310079,
+STORE, 94614779310080, 94614779322367,
+STORE, 94614798467072, 94614800699391,
+STORE, 139677037182976, 139677038841855,
+STORE, 139677038841856, 139677040939007,
+STORE, 139677040939008, 139677040955391,
+STORE, 139677040955392, 139677040963583,
+STORE, 139677040963584, 139677040979967,
+STORE, 139677040979968, 139677040992255,
+STORE, 139677040992256, 139677043085311,
+STORE, 139677043085312, 139677043089407,
+STORE, 139677043089408, 139677043093503,
+STORE, 139677043093504, 139677043236863,
+STORE, 139677043605504, 139677045288959,
+STORE, 139677045288960, 139677045305343,
+STORE, 139677045334016, 139677045338111,
+STORE, 139677045338112, 139677045342207,
+STORE, 139677045342208, 139677045346303,
+STORE, 140721604411392, 140721604550655,
+STORE, 140721606135808, 140721606148095,
+STORE, 140721606148096, 140721606152191,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729280544768, 140737488351231,
+SNULL, 140729280552959, 140737488351231,
+STORE, 140729280544768, 140729280552959,
+STORE, 140729280413696, 140729280552959,
+STORE, 94863939334144, 94863941558271,
+SNULL, 94863939444735, 94863941558271,
+STORE, 94863939334144, 94863939444735,
+STORE, 94863939444736, 94863941558271,
+ERASE, 94863939444736, 94863941558271,
+STORE, 94863941537792, 94863941550079,
+STORE, 94863941550080, 94863941558271,
+STORE, 139691047276544, 139691049529343,
+SNULL, 139691047419903, 139691049529343,
+STORE, 139691047276544, 139691047419903,
+STORE, 139691047419904, 139691049529343,
+ERASE, 139691047419904, 139691049529343,
+STORE, 139691049517056, 139691049525247,
+STORE, 139691049525248, 139691049529343,
+STORE, 140729281679360, 140729281683455,
+STORE, 140729281667072, 140729281679359,
+STORE, 139691049488384, 139691049517055,
+STORE, 139691049480192, 139691049488383,
+STORE, 139691043479552, 139691047276543,
+SNULL, 139691043479552, 139691045138431,
+STORE, 139691045138432, 139691047276543,
+STORE, 139691043479552, 139691045138431,
+SNULL, 139691047235583, 139691047276543,
+STORE, 139691045138432, 139691047235583,
+STORE, 139691047235584, 139691047276543,
+SNULL, 139691047235584, 139691047260159,
+STORE, 139691047260160, 139691047276543,
+STORE, 139691047235584, 139691047260159,
+ERASE, 139691047235584, 139691047260159,
+STORE, 139691047235584, 139691047260159,
+ERASE, 139691047260160, 139691047276543,
+STORE, 139691047260160, 139691047276543,
+SNULL, 139691047251967, 139691047260159,
+STORE, 139691047235584, 139691047251967,
+STORE, 139691047251968, 139691047260159,
+SNULL, 94863941545983, 94863941550079,
+STORE, 94863941537792, 94863941545983,
+STORE, 94863941545984, 94863941550079,
+SNULL, 139691049521151, 139691049525247,
+STORE, 139691049517056, 139691049521151,
+STORE, 139691049521152, 139691049525247,
+ERASE, 139691049488384, 139691049517055,
+STORE, 94863951294464, 94863951429631,
+STORE, 93998209294336, 93998209507327,
+STORE, 93998211604480, 93998211608575,
+STORE, 93998211608576, 93998211616767,
+STORE, 93998211616768, 93998211629055,
+STORE, 93998227210240, 93998227615743,
+STORE, 140243029913600, 140243031572479,
+STORE, 140243031572480, 140243033669631,
+STORE, 140243033669632, 140243033686015,
+STORE, 140243033686016, 140243033694207,
+STORE, 140243033694208, 140243033710591,
+STORE, 140243033710592, 140243033722879,
+STORE, 140243033722880, 140243035815935,
+STORE, 140243035815936, 140243035820031,
+STORE, 140243035820032, 140243035824127,
+STORE, 140243035824128, 140243035967487,
+STORE, 140243036336128, 140243038019583,
+STORE, 140243038019584, 140243038035967,
+STORE, 140243038064640, 140243038068735,
+STORE, 140243038068736, 140243038072831,
+STORE, 140243038072832, 140243038076927,
+STORE, 140734976479232, 140734976618495,
+STORE, 140734977978368, 140734977990655,
+STORE, 140734977990656, 140734977994751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722742775808, 140737488351231,
+SNULL, 140722742783999, 140737488351231,
+STORE, 140722742775808, 140722742783999,
+STORE, 140722742644736, 140722742783999,
+STORE, 93857673662464, 93857675997183,
+SNULL, 93857673875455, 93857675997183,
+STORE, 93857673662464, 93857673875455,
+STORE, 93857673875456, 93857675997183,
+ERASE, 93857673875456, 93857675997183,
+STORE, 93857675972608, 93857675984895,
+STORE, 93857675984896, 93857675997183,
+STORE, 140629677498368, 140629679751167,
+SNULL, 140629677641727, 140629679751167,
+STORE, 140629677498368, 140629677641727,
+STORE, 140629677641728, 140629679751167,
+ERASE, 140629677641728, 140629679751167,
+STORE, 140629679738880, 140629679747071,
+STORE, 140629679747072, 140629679751167,
+STORE, 140722743222272, 140722743226367,
+STORE, 140722743209984, 140722743222271,
+STORE, 140629679710208, 140629679738879,
+STORE, 140629679702016, 140629679710207,
+STORE, 140629675384832, 140629677498367,
+SNULL, 140629675384832, 140629675397119,
+STORE, 140629675397120, 140629677498367,
+STORE, 140629675384832, 140629675397119,
+SNULL, 140629677490175, 140629677498367,
+STORE, 140629675397120, 140629677490175,
+STORE, 140629677490176, 140629677498367,
+ERASE, 140629677490176, 140629677498367,
+STORE, 140629677490176, 140629677498367,
+STORE, 140629671587840, 140629675384831,
+SNULL, 140629671587840, 140629673246719,
+STORE, 140629673246720, 140629675384831,
+STORE, 140629671587840, 140629673246719,
+SNULL, 140629675343871, 140629675384831,
+STORE, 140629673246720, 140629675343871,
+STORE, 140629675343872, 140629675384831,
+SNULL, 140629675343872, 140629675368447,
+STORE, 140629675368448, 140629675384831,
+STORE, 140629675343872, 140629675368447,
+ERASE, 140629675343872, 140629675368447,
+STORE, 140629675343872, 140629675368447,
+ERASE, 140629675368448, 140629675384831,
+STORE, 140629675368448, 140629675384831,
+STORE, 140629679693824, 140629679710207,
+SNULL, 140629675360255, 140629675368447,
+STORE, 140629675343872, 140629675360255,
+STORE, 140629675360256, 140629675368447,
+SNULL, 140629677494271, 140629677498367,
+STORE, 140629677490176, 140629677494271,
+STORE, 140629677494272, 140629677498367,
+SNULL, 93857675976703, 93857675984895,
+STORE, 93857675972608, 93857675976703,
+STORE, 93857675976704, 93857675984895,
+SNULL, 140629679742975, 140629679747071,
+STORE, 140629679738880, 140629679742975,
+STORE, 140629679742976, 140629679747071,
+ERASE, 140629679710208, 140629679738879,
+STORE, 93857705832448, 93857705967615,
+STORE, 140629678010368, 140629679693823,
+STORE, 93857705832448, 93857706102783,
+STORE, 93857705832448, 93857706237951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735922421760, 140737488351231,
+SNULL, 140735922429951, 140737488351231,
+STORE, 140735922421760, 140735922429951,
+STORE, 140735922290688, 140735922429951,
+STORE, 94651136139264, 94651138363391,
+SNULL, 94651136249855, 94651138363391,
+STORE, 94651136139264, 94651136249855,
+STORE, 94651136249856, 94651138363391,
+ERASE, 94651136249856, 94651138363391,
+STORE, 94651138342912, 94651138355199,
+STORE, 94651138355200, 94651138363391,
+STORE, 140325788266496, 140325790519295,
+SNULL, 140325788409855, 140325790519295,
+STORE, 140325788266496, 140325788409855,
+STORE, 140325788409856, 140325790519295,
+ERASE, 140325788409856, 140325790519295,
+STORE, 140325790507008, 140325790515199,
+STORE, 140325790515200, 140325790519295,
+STORE, 140735923572736, 140735923576831,
+STORE, 140735923560448, 140735923572735,
+STORE, 140325790478336, 140325790507007,
+STORE, 140325790470144, 140325790478335,
+STORE, 140325784469504, 140325788266495,
+SNULL, 140325784469504, 140325786128383,
+STORE, 140325786128384, 140325788266495,
+STORE, 140325784469504, 140325786128383,
+SNULL, 140325788225535, 140325788266495,
+STORE, 140325786128384, 140325788225535,
+STORE, 140325788225536, 140325788266495,
+SNULL, 140325788225536, 140325788250111,
+STORE, 140325788250112, 140325788266495,
+STORE, 140325788225536, 140325788250111,
+ERASE, 140325788225536, 140325788250111,
+STORE, 140325788225536, 140325788250111,
+ERASE, 140325788250112, 140325788266495,
+STORE, 140325788250112, 140325788266495,
+SNULL, 140325788241919, 140325788250111,
+STORE, 140325788225536, 140325788241919,
+STORE, 140325788241920, 140325788250111,
+SNULL, 94651138351103, 94651138355199,
+STORE, 94651138342912, 94651138351103,
+STORE, 94651138351104, 94651138355199,
+SNULL, 140325790511103, 140325790515199,
+STORE, 140325790507008, 140325790511103,
+STORE, 140325790511104, 140325790515199,
+ERASE, 140325790478336, 140325790507007,
+STORE, 94651146297344, 94651146432511,
+STORE, 94212330168320, 94212330381311,
+STORE, 94212332478464, 94212332482559,
+STORE, 94212332482560, 94212332490751,
+STORE, 94212332490752, 94212332503039,
+STORE, 94212348891136, 94212349825023,
+STORE, 140611630604288, 140611632263167,
+STORE, 140611632263168, 140611634360319,
+STORE, 140611634360320, 140611634376703,
+STORE, 140611634376704, 140611634384895,
+STORE, 140611634384896, 140611634401279,
+STORE, 140611634401280, 140611634413567,
+STORE, 140611634413568, 140611636506623,
+STORE, 140611636506624, 140611636510719,
+STORE, 140611636510720, 140611636514815,
+STORE, 140611636514816, 140611636658175,
+STORE, 140611637026816, 140611638710271,
+STORE, 140611638710272, 140611638726655,
+STORE, 140611638755328, 140611638759423,
+STORE, 140611638759424, 140611638763519,
+STORE, 140611638763520, 140611638767615,
+STORE, 140726974533632, 140726974672895,
+STORE, 140726974943232, 140726974955519,
+STORE, 140726974955520, 140726974959615,
+STORE, 94572463521792, 94572463734783,
+STORE, 94572465831936, 94572465836031,
+STORE, 94572465836032, 94572465844223,
+STORE, 94572465844224, 94572465856511,
+STORE, 94572491534336, 94572492865535,
+STORE, 140644351492096, 140644353150975,
+STORE, 140644353150976, 140644355248127,
+STORE, 140644355248128, 140644355264511,
+STORE, 140644355264512, 140644355272703,
+STORE, 140644355272704, 140644355289087,
+STORE, 140644355289088, 140644355301375,
+STORE, 140644355301376, 140644357394431,
+STORE, 140644357394432, 140644357398527,
+STORE, 140644357398528, 140644357402623,
+STORE, 140644357402624, 140644357545983,
+STORE, 140644357914624, 140644359598079,
+STORE, 140644359598080, 140644359614463,
+STORE, 140644359643136, 140644359647231,
+STORE, 140644359647232, 140644359651327,
+STORE, 140644359651328, 140644359655423,
+STORE, 140727841824768, 140727841964031,
+STORE, 140727843188736, 140727843201023,
+STORE, 140727843201024, 140727843205119,
+STORE, 94144315457536, 94144315670527,
+STORE, 94144317767680, 94144317771775,
+STORE, 94144317771776, 94144317779967,
+STORE, 94144317779968, 94144317792255,
+STORE, 94144318369792, 94144320815103,
+STORE, 140316717645824, 140316719304703,
+STORE, 140316719304704, 140316721401855,
+STORE, 140316721401856, 140316721418239,
+STORE, 140316721418240, 140316721426431,
+STORE, 140316721426432, 140316721442815,
+STORE, 140316721442816, 140316721455103,
+STORE, 140316721455104, 140316723548159,
+STORE, 140316723548160, 140316723552255,
+STORE, 140316723552256, 140316723556351,
+STORE, 140316723556352, 140316723699711,
+STORE, 140316724068352, 140316725751807,
+STORE, 140316725751808, 140316725768191,
+STORE, 140316725796864, 140316725800959,
+STORE, 140316725800960, 140316725805055,
+STORE, 140316725805056, 140316725809151,
+STORE, 140725744283648, 140725744422911,
+STORE, 140725745852416, 140725745864703,
+STORE, 140725745864704, 140725745868799,
+STORE, 94646858846208, 94646859059199,
+STORE, 94646861156352, 94646861160447,
+STORE, 94646861160448, 94646861168639,
+STORE, 94646861168640, 94646861180927,
+STORE, 94646879805440, 94646881894399,
+STORE, 140435449745408, 140435451404287,
+STORE, 140435451404288, 140435453501439,
+STORE, 140435453501440, 140435453517823,
+STORE, 140435453517824, 140435453526015,
+STORE, 140435453526016, 140435453542399,
+STORE, 140435453542400, 140435453554687,
+STORE, 140435453554688, 140435455647743,
+STORE, 140435455647744, 140435455651839,
+STORE, 140435455651840, 140435455655935,
+STORE, 140435455655936, 140435455799295,
+STORE, 140435456167936, 140435457851391,
+STORE, 140435457851392, 140435457867775,
+STORE, 140435457896448, 140435457900543,
+STORE, 140435457900544, 140435457904639,
+STORE, 140435457904640, 140435457908735,
+STORE, 140721033818112, 140721033957375,
+STORE, 140721034018816, 140721034031103,
+STORE, 140721034031104, 140721034035199,
+STORE, 94872903438336, 94872903651327,
+STORE, 94872905748480, 94872905752575,
+STORE, 94872905752576, 94872905760767,
+STORE, 94872905760768, 94872905773055,
+STORE, 94872931246080, 94872931651583,
+STORE, 139771607810048, 139771609468927,
+STORE, 139771609468928, 139771611566079,
+STORE, 139771611566080, 139771611582463,
+STORE, 139771611582464, 139771611590655,
+STORE, 139771611590656, 139771611607039,
+STORE, 139771611607040, 139771611619327,
+STORE, 139771611619328, 139771613712383,
+STORE, 139771613712384, 139771613716479,
+STORE, 139771613716480, 139771613720575,
+STORE, 139771613720576, 139771613863935,
+STORE, 139771614232576, 139771615916031,
+STORE, 139771615916032, 139771615932415,
+STORE, 139771615961088, 139771615965183,
+STORE, 139771615965184, 139771615969279,
+STORE, 139771615969280, 139771615973375,
+STORE, 140725402931200, 140725403070463,
+STORE, 140725403852800, 140725403865087,
+STORE, 140725403865088, 140725403869183,
+STORE, 94740737736704, 94740737949695,
+STORE, 94740740046848, 94740740050943,
+STORE, 94740740050944, 94740740059135,
+STORE, 94740740059136, 94740740071423,
+STORE, 94740743249920, 94740744724479,
+STORE, 140640287010816, 140640288669695,
+STORE, 140640288669696, 140640290766847,
+STORE, 140640290766848, 140640290783231,
+STORE, 140640290783232, 140640290791423,
+STORE, 140640290791424, 140640290807807,
+STORE, 140640290807808, 140640290820095,
+STORE, 140640290820096, 140640292913151,
+STORE, 140640292913152, 140640292917247,
+STORE, 140640292917248, 140640292921343,
+STORE, 140640292921344, 140640293064703,
+STORE, 140640293433344, 140640295116799,
+STORE, 140640295116800, 140640295133183,
+STORE, 140640295161856, 140640295165951,
+STORE, 140640295165952, 140640295170047,
+STORE, 140640295170048, 140640295174143,
+STORE, 140725133303808, 140725133443071,
+STORE, 140725133684736, 140725133697023,
+STORE, 140725133697024, 140725133701119,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722826371072, 140737488351231,
+SNULL, 140722826375167, 140737488351231,
+STORE, 140722826371072, 140722826375167,
+STORE, 140722826240000, 140722826375167,
+STORE, 94113818611712, 94113820835839,
+SNULL, 94113818722303, 94113820835839,
+STORE, 94113818611712, 94113818722303,
+STORE, 94113818722304, 94113820835839,
+ERASE, 94113818722304, 94113820835839,
+STORE, 94113820815360, 94113820827647,
+STORE, 94113820827648, 94113820835839,
+STORE, 139628194508800, 139628196761599,
+SNULL, 139628194652159, 139628196761599,
+STORE, 139628194508800, 139628194652159,
+STORE, 139628194652160, 139628196761599,
+ERASE, 139628194652160, 139628196761599,
+STORE, 139628196749312, 139628196757503,
+STORE, 139628196757504, 139628196761599,
+STORE, 140722826727424, 140722826731519,
+STORE, 140722826715136, 140722826727423,
+STORE, 139628196720640, 139628196749311,
+STORE, 139628196712448, 139628196720639,
+STORE, 139628190711808, 139628194508799,
+SNULL, 139628190711808, 139628192370687,
+STORE, 139628192370688, 139628194508799,
+STORE, 139628190711808, 139628192370687,
+SNULL, 139628194467839, 139628194508799,
+STORE, 139628192370688, 139628194467839,
+STORE, 139628194467840, 139628194508799,
+SNULL, 139628194467840, 139628194492415,
+STORE, 139628194492416, 139628194508799,
+STORE, 139628194467840, 139628194492415,
+ERASE, 139628194467840, 139628194492415,
+STORE, 139628194467840, 139628194492415,
+ERASE, 139628194492416, 139628194508799,
+STORE, 139628194492416, 139628194508799,
+SNULL, 139628194484223, 139628194492415,
+STORE, 139628194467840, 139628194484223,
+STORE, 139628194484224, 139628194492415,
+SNULL, 94113820823551, 94113820827647,
+STORE, 94113820815360, 94113820823551,
+STORE, 94113820823552, 94113820827647,
+SNULL, 139628196753407, 139628196757503,
+STORE, 139628196749312, 139628196753407,
+STORE, 139628196753408, 139628196757503,
+ERASE, 139628196720640, 139628196749311,
+STORE, 94113830850560, 94113830985727,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731865833472, 140737488351231,
+SNULL, 140731865841663, 140737488351231,
+STORE, 140731865833472, 140731865841663,
+STORE, 140731865702400, 140731865841663,
+STORE, 94763339386880, 94763341611007,
+SNULL, 94763339497471, 94763341611007,
+STORE, 94763339386880, 94763339497471,
+STORE, 94763339497472, 94763341611007,
+ERASE, 94763339497472, 94763341611007,
+STORE, 94763341590528, 94763341602815,
+STORE, 94763341602816, 94763341611007,
+STORE, 139778398486528, 139778400739327,
+SNULL, 139778398629887, 139778400739327,
+STORE, 139778398486528, 139778398629887,
+STORE, 139778398629888, 139778400739327,
+ERASE, 139778398629888, 139778400739327,
+STORE, 139778400727040, 139778400735231,
+STORE, 139778400735232, 139778400739327,
+STORE, 140731865858048, 140731865862143,
+STORE, 140731865845760, 140731865858047,
+STORE, 139778400698368, 139778400727039,
+STORE, 139778400690176, 139778400698367,
+STORE, 139778394689536, 139778398486527,
+SNULL, 139778394689536, 139778396348415,
+STORE, 139778396348416, 139778398486527,
+STORE, 139778394689536, 139778396348415,
+SNULL, 139778398445567, 139778398486527,
+STORE, 139778396348416, 139778398445567,
+STORE, 139778398445568, 139778398486527,
+SNULL, 139778398445568, 139778398470143,
+STORE, 139778398470144, 139778398486527,
+STORE, 139778398445568, 139778398470143,
+ERASE, 139778398445568, 139778398470143,
+STORE, 139778398445568, 139778398470143,
+ERASE, 139778398470144, 139778398486527,
+STORE, 139778398470144, 139778398486527,
+SNULL, 139778398461951, 139778398470143,
+STORE, 139778398445568, 139778398461951,
+STORE, 139778398461952, 139778398470143,
+SNULL, 94763341598719, 94763341602815,
+STORE, 94763341590528, 94763341598719,
+STORE, 94763341598720, 94763341602815,
+SNULL, 139778400731135, 139778400735231,
+STORE, 139778400727040, 139778400731135,
+STORE, 139778400731136, 139778400735231,
+ERASE, 139778400698368, 139778400727039,
+STORE, 94763362197504, 94763362332671,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140732053192704, 140737488351231,
+SNULL, 140732053204991, 140737488351231,
+STORE, 140732053192704, 140732053204991,
+STORE, 140732053061632, 140732053204991,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140176018599936, 140176020852735,
+SNULL, 140176018743295, 140176020852735,
+STORE, 140176018599936, 140176018743295,
+STORE, 140176018743296, 140176020852735,
+ERASE, 140176018743296, 140176020852735,
+STORE, 140176020840448, 140176020848639,
+STORE, 140176020848640, 140176020852735,
+STORE, 140732053381120, 140732053385215,
+STORE, 140732053368832, 140732053381119,
+STORE, 140176020811776, 140176020840447,
+STORE, 140176020803584, 140176020811775,
+STORE, 140176014766080, 140176018599935,
+SNULL, 140176014766080, 140176016474111,
+STORE, 140176016474112, 140176018599935,
+STORE, 140176014766080, 140176016474111,
+SNULL, 140176018567167, 140176018599935,
+STORE, 140176016474112, 140176018567167,
+STORE, 140176018567168, 140176018599935,
+ERASE, 140176018567168, 140176018599935,
+STORE, 140176018567168, 140176018599935,
+STORE, 140176012570624, 140176014766079,
+SNULL, 140176012570624, 140176012664831,
+STORE, 140176012664832, 140176014766079,
+STORE, 140176012570624, 140176012664831,
+SNULL, 140176014757887, 140176014766079,
+STORE, 140176012664832, 140176014757887,
+STORE, 140176014757888, 140176014766079,
+ERASE, 140176014757888, 140176014766079,
+STORE, 140176014757888, 140176014766079,
+STORE, 140176010051584, 140176012570623,
+SNULL, 140176010051584, 140176010465279,
+STORE, 140176010465280, 140176012570623,
+STORE, 140176010051584, 140176010465279,
+SNULL, 140176012558335, 140176012570623,
+STORE, 140176010465280, 140176012558335,
+STORE, 140176012558336, 140176012570623,
+ERASE, 140176012558336, 140176012570623,
+STORE, 140176012558336, 140176012570623,
+STORE, 140176007417856, 140176010051583,
+SNULL, 140176007417856, 140176007946239,
+STORE, 140176007946240, 140176010051583,
+STORE, 140176007417856, 140176007946239,
+SNULL, 140176010043391, 140176010051583,
+STORE, 140176007946240, 140176010043391,
+STORE, 140176010043392, 140176010051583,
+ERASE, 140176010043392, 140176010051583,
+STORE, 140176010043392, 140176010051583,
+STORE, 140176005304320, 140176007417855,
+SNULL, 140176005304320, 140176005316607,
+STORE, 140176005316608, 140176007417855,
+STORE, 140176005304320, 140176005316607,
+SNULL, 140176007409663, 140176007417855,
+STORE, 140176005316608, 140176007409663,
+STORE, 140176007409664, 140176007417855,
+ERASE, 140176007409664, 140176007417855,
+STORE, 140176007409664, 140176007417855,
+STORE, 140176003100672, 140176005304319,
+SNULL, 140176003100672, 140176003203071,
+STORE, 140176003203072, 140176005304319,
+STORE, 140176003100672, 140176003203071,
+SNULL, 140176005296127, 140176005304319,
+STORE, 140176003203072, 140176005296127,
+STORE, 140176005296128, 140176005304319,
+ERASE, 140176005296128, 140176005304319,
+STORE, 140176005296128, 140176005304319,
+STORE, 140176020795392, 140176020811775,
+STORE, 140175999938560, 140176003100671,
+SNULL, 140175999938560, 140176000999423,
+STORE, 140176000999424, 140176003100671,
+STORE, 140175999938560, 140176000999423,
+SNULL, 140176003092479, 140176003100671,
+STORE, 140176000999424, 140176003092479,
+STORE, 140176003092480, 140176003100671,
+ERASE, 140176003092480, 140176003100671,
+STORE, 140176003092480, 140176003100671,
+STORE, 140175996141568, 140175999938559,
+SNULL, 140175996141568, 140175997800447,
+STORE, 140175997800448, 140175999938559,
+STORE, 140175996141568, 140175997800447,
+SNULL, 140175999897599, 140175999938559,
+STORE, 140175997800448, 140175999897599,
+STORE, 140175999897600, 140175999938559,
+SNULL, 140175999897600, 140175999922175,
+STORE, 140175999922176, 140175999938559,
+STORE, 140175999897600, 140175999922175,
+ERASE, 140175999897600, 140175999922175,
+STORE, 140175999897600, 140175999922175,
+ERASE, 140175999922176, 140175999938559,
+STORE, 140175999922176, 140175999938559,
+STORE, 140176020783104, 140176020811775,
+SNULL, 140175999913983, 140175999922175,
+STORE, 140175999897600, 140175999913983,
+STORE, 140175999913984, 140175999922175,
+SNULL, 140176003096575, 140176003100671,
+STORE, 140176003092480, 140176003096575,
+STORE, 140176003096576, 140176003100671,
+SNULL, 140176005300223, 140176005304319,
+STORE, 140176005296128, 140176005300223,
+STORE, 140176005300224, 140176005304319,
+SNULL, 140176007413759, 140176007417855,
+STORE, 140176007409664, 140176007413759,
+STORE, 140176007413760, 140176007417855,
+SNULL, 140176010047487, 140176010051583,
+STORE, 140176010043392, 140176010047487,
+STORE, 140176010047488, 140176010051583,
+SNULL, 140176012566527, 140176012570623,
+STORE, 140176012558336, 140176012566527,
+STORE, 140176012566528, 140176012570623,
+SNULL, 140176014761983, 140176014766079,
+STORE, 140176014757888, 140176014761983,
+STORE, 140176014761984, 140176014766079,
+SNULL, 140176018571263, 140176018599935,
+STORE, 140176018567168, 140176018571263,
+STORE, 140176018571264, 140176018599935,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140176020844543, 140176020848639,
+STORE, 140176020840448, 140176020844543,
+STORE, 140176020844544, 140176020848639,
+ERASE, 140176020811776, 140176020840447,
+STORE, 53080064, 53215231,
+STORE, 140176019099648, 140176020783103,
+STORE, 140176020836352, 140176020840447,
+STORE, 140176018964480, 140176019099647,
+STORE, 53080064, 53358591,
+STORE, 140175994044416, 140175996141567,
+STORE, 140176020828160, 140176020840447,
+STORE, 140176020819968, 140176020840447,
+STORE, 140176020783104, 140176020819967,
+STORE, 140176018948096, 140176019099647,
+STORE, 53080064, 53493759,
+STORE, 53080064, 53649407,
+STORE, 140176018939904, 140176019099647,
+STORE, 140176018931712, 140176019099647,
+STORE, 53080064, 53784575,
+STORE, 53080064, 53919743,
+STORE, 140176018915328, 140176019099647,
+STORE, 140176018907136, 140176019099647,
+STORE, 53080064, 54059007,
+STORE, 140175993769984, 140175996141567,
+STORE, 140176018747392, 140176019099647,
+STORE, 53080064, 54198271,
+SNULL, 54190079, 54198271,
+STORE, 53080064, 54190079,
+STORE, 54190080, 54198271,
+ERASE, 54190080, 54198271,
+SNULL, 54181887, 54190079,
+STORE, 53080064, 54181887,
+STORE, 54181888, 54190079,
+ERASE, 54181888, 54190079,
+SNULL, 54173695, 54181887,
+STORE, 53080064, 54173695,
+STORE, 54173696, 54181887,
+ERASE, 54173696, 54181887,
+SNULL, 54165503, 54173695,
+STORE, 53080064, 54165503,
+STORE, 54165504, 54173695,
+ERASE, 54165504, 54173695,
+STORE, 140175993753600, 140175996141567,
+STORE, 140175993688064, 140175996141567,
+STORE, 140175993655296, 140175996141567,
+STORE, 140175991558144, 140175996141567,
+STORE, 140175991492608, 140175996141567,
+STORE, 53080064, 54312959,
+STORE, 140175991361536, 140175996141567,
+STORE, 140175991099392, 140175996141567,
+STORE, 140175991091200, 140175996141567,
+STORE, 140175991074816, 140175996141567,
+STORE, 140175991066624, 140175996141567,
+STORE, 140175991058432, 140175996141567,
+STORE, 53080064, 54448127,
+SNULL, 54439935, 54448127,
+STORE, 53080064, 54439935,
+STORE, 54439936, 54448127,
+ERASE, 54439936, 54448127,
+SNULL, 54431743, 54439935,
+STORE, 53080064, 54431743,
+STORE, 54431744, 54439935,
+ERASE, 54431744, 54439935,
+SNULL, 54419455, 54431743,
+STORE, 53080064, 54419455,
+STORE, 54419456, 54431743,
+ERASE, 54419456, 54431743,
+SNULL, 54403071, 54419455,
+STORE, 53080064, 54403071,
+STORE, 54403072, 54419455,
+ERASE, 54403072, 54419455,
+STORE, 140175991042048, 140175996141567,
+STORE, 53080064, 54538239,
+SNULL, 54534143, 54538239,
+STORE, 53080064, 54534143,
+STORE, 54534144, 54538239,
+ERASE, 54534144, 54538239,
+SNULL, 54530047, 54534143,
+STORE, 53080064, 54530047,
+STORE, 54530048, 54534143,
+ERASE, 54530048, 54534143,
+SNULL, 54525951, 54530047,
+STORE, 53080064, 54525951,
+STORE, 54525952, 54530047,
+ERASE, 54525952, 54530047,
+SNULL, 54521855, 54525951,
+STORE, 53080064, 54521855,
+STORE, 54521856, 54525951,
+ERASE, 54521856, 54525951,
+SNULL, 54517759, 54521855,
+STORE, 53080064, 54517759,
+STORE, 54517760, 54521855,
+ERASE, 54517760, 54521855,
+SNULL, 54513663, 54517759,
+STORE, 53080064, 54513663,
+STORE, 54513664, 54517759,
+ERASE, 54513664, 54517759,
+SNULL, 54509567, 54513663,
+STORE, 53080064, 54509567,
+STORE, 54509568, 54513663,
+ERASE, 54509568, 54513663,
+STORE, 140175991025664, 140175996141567,
+STORE, 140175990992896, 140175996141567,
+STORE, 53080064, 54644735,
+SNULL, 54628351, 54644735,
+STORE, 53080064, 54628351,
+STORE, 54628352, 54644735,
+ERASE, 54628352, 54644735,
+SNULL, 54616063, 54628351,
+STORE, 53080064, 54616063,
+STORE, 54616064, 54628351,
+ERASE, 54616064, 54628351,
+STORE, 140175988895744, 140175996141567,
+STORE, 53080064, 54767615,
+STORE, 140175988879360, 140175996141567,
+STORE, 140175988617216, 140175996141567,
+STORE, 140175988609024, 140175996141567,
+STORE, 140175988600832, 140175996141567,
+STORE, 53080064, 54906879,
+SNULL, 54898687, 54906879,
+STORE, 53080064, 54898687,
+STORE, 54898688, 54906879,
+ERASE, 54898688, 54906879,
+SNULL, 54853631, 54898687,
+STORE, 53080064, 54853631,
+STORE, 54853632, 54898687,
+ERASE, 54853632, 54898687,
+STORE, 140175986503680, 140175996141567,
+STORE, 53080064, 54996991,
+STORE, 140175986495488, 140175996141567,
+STORE, 140175986487296, 140175996141567,
+STORE, 140175985438720, 140175996141567,
+STORE, 53080064, 55136255,
+STORE, 140175985405952, 140175996141567,
+STORE, 140175985139712, 140175996141567,
+SNULL, 140176018964479, 140176019099647,
+STORE, 140176018747392, 140176018964479,
+STORE, 140176018964480, 140176019099647,
+ERASE, 140176018964480, 140176019099647,
+STORE, 140175983042560, 140175996141567,
+STORE, 140175982518272, 140175996141567,
+STORE, 140175980421120, 140175996141567,
+STORE, 53080064, 55287807,
+STORE, 53080064, 55427071,
+STORE, 140176019091456, 140176019099647,
+STORE, 140176019083264, 140176019099647,
+STORE, 140176019075072, 140176019099647,
+STORE, 140176019066880, 140176019099647,
+STORE, 140176019058688, 140176019099647,
+STORE, 140175980158976, 140175996141567,
+STORE, 140176019050496, 140176019099647,
+STORE, 140176019042304, 140176019099647,
+STORE, 140176019034112, 140176019099647,
+STORE, 140176019025920, 140176019099647,
+STORE, 140176019017728, 140176019099647,
+STORE, 140176019009536, 140176019099647,
+STORE, 140176019001344, 140176019099647,
+STORE, 140176018993152, 140176019099647,
+STORE, 140176018984960, 140176019099647,
+STORE, 140176018976768, 140176019099647,
+STORE, 140176018968576, 140176019099647,
+STORE, 140175978061824, 140175996141567,
+STORE, 53080064, 55603199,
+STORE, 140175978029056, 140175996141567,
+STORE, 140175977996288, 140175996141567,
+STORE, 53080064, 55738367,
+STORE, 53080064, 55881727,
+STORE, 140175977963520, 140175996141567,
+STORE, 140175977930752, 140175996141567,
+STORE, 53080064, 56041471,
+STORE, 140175977897984, 140175996141567,
+STORE, 140175977865216, 140175996141567,
+SNULL, 55881727, 56041471,
+STORE, 53080064, 55881727,
+STORE, 55881728, 56041471,
+ERASE, 55881728, 56041471,
+SNULL, 55721983, 55881727,
+STORE, 53080064, 55721983,
+STORE, 55721984, 55881727,
+ERASE, 55721984, 55881727,
+SNULL, 55570431, 55721983,
+STORE, 53080064, 55570431,
+STORE, 55570432, 55721983,
+ERASE, 55570432, 55721983,
+STORE, 140175977857024, 140175996141567,
+STORE, 140175975759872, 140175996141567,
+STORE, 53080064, 55754751,
+STORE, 53080064, 55943167,
+STORE, 140175975751680, 140175996141567,
+STORE, 140175975743488, 140175996141567,
+STORE, 140175975735296, 140175996141567,
+STORE, 140175975727104, 140175996141567,
+STORE, 140175975718912, 140175996141567,
+STORE, 140175975710720, 140175996141567,
+STORE, 140175975702528, 140175996141567,
+STORE, 140175975694336, 140175996141567,
+STORE, 140175975686144, 140175996141567,
+STORE, 140175975677952, 140175996141567,
+STORE, 140175975669760, 140175996141567,
+STORE, 140175974621184, 140175996141567,
+STORE, 140175974612992, 140175996141567,
+STORE, 53080064, 56139775,
+STORE, 140175972515840, 140175996141567,
+STORE, 53080064, 56401919,
+STORE, 140175970418688, 140175996141567,
+STORE, 140175970410496, 140175996141567,
+STORE, 140175970402304, 140175996141567,
+STORE, 140175970394112, 140175996141567,
+STORE, 53080064, 56569855,
+STORE, 140175969865728, 140175996141567,
+SNULL, 140175985139711, 140175996141567,
+STORE, 140175969865728, 140175985139711,
+STORE, 140175985139712, 140175996141567,
+SNULL, 140175985139712, 140175985405951,
+STORE, 140175985405952, 140175996141567,
+STORE, 140175985139712, 140175985405951,
+ERASE, 140175985139712, 140175985405951,
+STORE, 140175965671424, 140175985139711,
+STORE, 140175985397760, 140175996141567,
+STORE, 140175985389568, 140175996141567,
+STORE, 140175985381376, 140175996141567,
+STORE, 140175985373184, 140175996141567,
+STORE, 140175985364992, 140175996141567,
+STORE, 140175985356800, 140175996141567,
+STORE, 140175985348608, 140175996141567,
+STORE, 140175985340416, 140175996141567,
+STORE, 140175985332224, 140175996141567,
+STORE, 140175985324032, 140175996141567,
+STORE, 140175985315840, 140175996141567,
+STORE, 140175985307648, 140175996141567,
+STORE, 140175985299456, 140175996141567,
+STORE, 140175985291264, 140175996141567,
+STORE, 140175985283072, 140175996141567,
+STORE, 140175985274880, 140175996141567,
+STORE, 140175963574272, 140175985139711,
+STORE, 140175985266688, 140175996141567,
+STORE, 140175961477120, 140175985139711,
+STORE, 53080064, 56831999,
+STORE, 140175959379968, 140175985139711,
+STORE, 140175985258496, 140175996141567,
+STORE, 140175957282816, 140175985139711,
+STORE, 140175985250304, 140175996141567,
+STORE, 140175985242112, 140175996141567,
+STORE, 140175985233920, 140175996141567,
+STORE, 140175985225728, 140175996141567,
+STORE, 140175985217536, 140175996141567,
+STORE, 140175957151744, 140175985139711,
+STORE, 140175956627456, 140175985139711,
+SNULL, 140175980158975, 140175985139711,
+STORE, 140175956627456, 140175980158975,
+STORE, 140175980158976, 140175985139711,
+SNULL, 140175980158976, 140175980421119,
+STORE, 140175980421120, 140175985139711,
+STORE, 140175980158976, 140175980421119,
+ERASE, 140175980158976, 140175980421119,
+STORE, 140175954530304, 140175980158975,
+STORE, 140175985209344, 140175996141567,
+STORE, 53080064, 57094143,
+STORE, 140175952433152, 140175980158975,
+STORE, 140175985192960, 140175996141567,
+STORE, 140175985184768, 140175996141567,
+STORE, 140175985176576, 140175996141567,
+STORE, 140175985168384, 140175996141567,
+STORE, 140175985160192, 140175996141567,
+STORE, 140175985152000, 140175996141567,
+STORE, 140175985143808, 140175996141567,
+STORE, 140175980412928, 140175985139711,
+STORE, 140175980404736, 140175985139711,
+STORE, 140175980396544, 140175985139711,
+STORE, 140175980388352, 140175985139711,
+STORE, 140175980380160, 140175985139711,
+STORE, 140175980371968, 140175985139711,
+STORE, 140175980363776, 140175985139711,
+STORE, 140175980355584, 140175985139711,
+STORE, 140175980347392, 140175985139711,
+STORE, 140175980339200, 140175985139711,
+STORE, 53080064, 57356287,
+SNULL, 140176018747392, 140176018907135,
+STORE, 140176018907136, 140176018964479,
+STORE, 140176018747392, 140176018907135,
+ERASE, 140176018747392, 140176018907135,
+STORE, 140175952146432, 140175980158975,
+STORE, 140175950049280, 140175980158975,
+SNULL, 140175952146431, 140175980158975,
+STORE, 140175950049280, 140175952146431,
+STORE, 140175952146432, 140175980158975,
+SNULL, 140175952146432, 140175952433151,
+STORE, 140175952433152, 140175980158975,
+STORE, 140175952146432, 140175952433151,
+ERASE, 140175952146432, 140175952433151,
+STORE, 140176018898944, 140176018964479,
+STORE, 53080064, 57749503,
+STORE, 140175949520896, 140175952146431,
+STORE, 140175947423744, 140175952146431,
+SNULL, 140175993769983, 140175996141567,
+STORE, 140175985143808, 140175993769983,
+STORE, 140175993769984, 140175996141567,
+SNULL, 140175993769984, 140175994044415,
+STORE, 140175994044416, 140175996141567,
+STORE, 140175993769984, 140175994044415,
+ERASE, 140175993769984, 140175994044415,
+STORE, 140176018890752, 140176018964479,
+STORE, 140176018882560, 140176018964479,
+STORE, 140176018874368, 140176018964479,
+STORE, 140176018866176, 140176018964479,
+STORE, 140176018849792, 140176018964479,
+STORE, 140176018841600, 140176018964479,
+STORE, 140176018825216, 140176018964479,
+STORE, 140176018817024, 140176018964479,
+STORE, 140176018800640, 140176018964479,
+STORE, 140176018792448, 140176018964479,
+STORE, 140176018759680, 140176018964479,
+STORE, 140176018751488, 140176018964479,
+STORE, 140175994028032, 140175996141567,
+STORE, 140176018743296, 140176018964479,
+STORE, 140175994011648, 140175996141567,
+STORE, 140175994003456, 140175996141567,
+STORE, 140175993987072, 140175996141567,
+STORE, 140175993978880, 140175996141567,
+STORE, 140175993946112, 140175996141567,
+STORE, 140175993937920, 140175996141567,
+STORE, 140175993921536, 140175996141567,
+STORE, 140175993913344, 140175996141567,
+STORE, 140175993896960, 140175996141567,
+STORE, 140175993888768, 140175996141567,
+STORE, 140175993872384, 140175996141567,
+STORE, 140175993864192, 140175996141567,
+STORE, 140175993831424, 140175996141567,
+STORE, 140175993823232, 140175996141567,
+STORE, 140175993806848, 140175996141567,
+STORE, 140175993798656, 140175996141567,
+STORE, 140175993782272, 140175996141567,
+STORE, 140175993774080, 140175996141567,
+STORE, 140175980322816, 140175985139711,
+STORE, 140175980314624, 140175985139711,
+STORE, 140175980281856, 140175985139711,
+STORE, 140175980273664, 140175985139711,
+STORE, 140175980257280, 140175985139711,
+STORE, 140175945326592, 140175952146431,
+STORE, 140175980249088, 140175985139711,
+STORE, 140175980232704, 140175985139711,
+STORE, 140175980224512, 140175985139711,
+STORE, 140175980208128, 140175985139711,
+STORE, 140175980199936, 140175985139711,
+STORE, 140175980167168, 140175985139711,
+STORE, 140175952433152, 140175985139711,
+STORE, 140175952416768, 140175985139711,
+STORE, 140175952408576, 140175985139711,
+STORE, 140175952392192, 140175985139711,
+STORE, 140175952384000, 140175985139711,
+STORE, 140175952367616, 140175985139711,
+STORE, 140175943229440, 140175952146431,
+STORE, 140175952359424, 140175985139711,
+STORE, 140175952326656, 140175985139711,
+STORE, 140175952318464, 140175985139711,
+STORE, 140175952302080, 140175985139711,
+STORE, 140175952293888, 140175985139711,
+STORE, 140175952277504, 140175985139711,
+STORE, 140175952269312, 140175985139711,
+STORE, 140175952252928, 140175985139711,
+STORE, 140175952244736, 140175985139711,
+STORE, 140175952211968, 140175985139711,
+STORE, 140175952203776, 140175985139711,
+STORE, 140175952187392, 140175985139711,
+STORE, 140175952179200, 140175985139711,
+STORE, 140175952162816, 140175985139711,
+STORE, 140175952154624, 140175985139711,
+STORE, 140175943213056, 140175952146431,
+STORE, 140175943213056, 140175985139711,
+STORE, 140175943180288, 140175985139711,
+STORE, 140175943172096, 140175985139711,
+STORE, 140175943155712, 140175985139711,
+STORE, 140175943147520, 140175985139711,
+STORE, 140175943131136, 140175985139711,
+STORE, 140175943122944, 140175985139711,
+STORE, 140175943106560, 140175985139711,
+STORE, 140175943098368, 140175985139711,
+STORE, 140175943065600, 140175985139711,
+STORE, 140175943057408, 140175985139711,
+STORE, 140175943041024, 140175985139711,
+STORE, 140175943032832, 140175985139711,
+STORE, 140175943016448, 140175985139711,
+STORE, 140175943008256, 140175985139711,
+STORE, 140175942991872, 140175985139711,
+STORE, 140175942983680, 140175985139711,
+STORE, 140175942950912, 140175985139711,
+STORE, 140175942942720, 140175985139711,
+STORE, 140175942926336, 140175985139711,
+STORE, 140175942918144, 140175985139711,
+STORE, 140175942901760, 140175985139711,
+STORE, 140175942893568, 140175985139711,
+STORE, 140175942877184, 140175985139711,
+STORE, 140175942868992, 140175985139711,
+STORE, 140175942836224, 140175985139711,
+STORE, 140175942828032, 140175985139711,
+STORE, 140175942811648, 140175985139711,
+STORE, 140175942803456, 140175985139711,
+STORE, 140175942787072, 140175985139711,
+STORE, 140175942778880, 140175985139711,
+STORE, 140175942762496, 140175985139711,
+STORE, 140175942754304, 140175985139711,
+STORE, 140175942721536, 140175985139711,
+STORE, 140175942713344, 140175985139711,
+STORE, 140175942696960, 140175985139711,
+STORE, 140175942688768, 140175985139711,
+STORE, 140175942672384, 140175985139711,
+STORE, 140175942664192, 140175985139711,
+STORE, 140175942647808, 140175985139711,
+STORE, 140175942639616, 140175985139711,
+STORE, 140175942606848, 140175985139711,
+STORE, 140175942598656, 140175985139711,
+STORE, 140175942582272, 140175985139711,
+STORE, 140175942574080, 140175985139711,
+STORE, 140175942557696, 140175985139711,
+STORE, 140175942549504, 140175985139711,
+STORE, 140175942533120, 140175985139711,
+STORE, 140175942524928, 140175985139711,
+STORE, 140175942492160, 140175985139711,
+STORE, 140175942483968, 140175985139711,
+STORE, 140175942467584, 140175985139711,
+STORE, 140175942459392, 140175985139711,
+STORE, 140175942443008, 140175985139711,
+STORE, 140175942434816, 140175985139711,
+STORE, 140175942418432, 140175985139711,
+STORE, 140175942410240, 140175985139711,
+STORE, 140175942377472, 140175985139711,
+STORE, 140175942369280, 140175985139711,
+STORE, 140175942352896, 140175985139711,
+STORE, 140175942344704, 140175985139711,
+STORE, 140175942328320, 140175985139711,
+STORE, 140175942320128, 140175985139711,
+STORE, 140175942303744, 140175985139711,
+STORE, 140175942295552, 140175985139711,
+STORE, 140175942262784, 140175985139711,
+STORE, 140175942254592, 140175985139711,
+STORE, 140175942238208, 140175985139711,
+STORE, 140175942230016, 140175985139711,
+STORE, 140175942213632, 140175985139711,
+STORE, 140175942205440, 140175985139711,
+STORE, 140175942189056, 140175985139711,
+STORE, 140175942180864, 140175985139711,
+STORE, 140175942148096, 140175985139711,
+STORE, 140175942139904, 140175985139711,
+STORE, 140175942123520, 140175985139711,
+STORE, 140175942115328, 140175985139711,
+STORE, 140175942098944, 140175985139711,
+STORE, 140175942090752, 140175985139711,
+STORE, 140175942074368, 140175985139711,
+STORE, 140175942066176, 140175985139711,
+STORE, 140175942033408, 140175985139711,
+STORE, 140175942025216, 140175985139711,
+STORE, 140175942008832, 140175985139711,
+STORE, 140175942000640, 140175985139711,
+STORE, 140175941984256, 140175985139711,
+STORE, 140175941976064, 140175985139711,
+STORE, 140175941959680, 140175985139711,
+STORE, 140175939862528, 140175985139711,
+STORE, 140175939854336, 140175985139711,
+STORE, 140175939821568, 140175985139711,
+STORE, 140175939813376, 140175985139711,
+STORE, 140175939796992, 140175985139711,
+STORE, 140175939788800, 140175985139711,
+STORE, 140175939772416, 140175985139711,
+STORE, 140175939764224, 140175985139711,
+STORE, 140175939747840, 140175985139711,
+STORE, 140175939739648, 140175985139711,
+STORE, 140175939706880, 140175985139711,
+STORE, 140175939698688, 140175985139711,
+STORE, 140175939682304, 140175985139711,
+STORE, 140175939674112, 140175985139711,
+STORE, 140175939657728, 140175985139711,
+STORE, 140175939649536, 140175985139711,
+STORE, 140175939633152, 140175985139711,
+STORE, 140175939624960, 140175985139711,
+STORE, 140175939592192, 140175985139711,
+STORE, 140175939584000, 140175985139711,
+STORE, 140175939567616, 140175985139711,
+STORE, 140175939559424, 140175985139711,
+STORE, 140175939543040, 140175985139711,
+STORE, 140175939534848, 140175985139711,
+STORE, 140175939518464, 140175985139711,
+STORE, 140175939510272, 140175985139711,
+STORE, 140175939477504, 140175985139711,
+STORE, 140175939469312, 140175985139711,
+STORE, 140175939452928, 140175985139711,
+STORE, 140175939444736, 140175985139711,
+STORE, 140175939428352, 140175985139711,
+STORE, 140175939420160, 140175985139711,
+STORE, 140175939403776, 140175985139711,
+STORE, 140175939395584, 140175985139711,
+STORE, 140175939362816, 140175985139711,
+STORE, 140175939354624, 140175985139711,
+STORE, 140175939338240, 140175985139711,
+STORE, 140175939330048, 140175985139711,
+STORE, 140175939313664, 140175985139711,
+STORE, 140175939305472, 140175985139711,
+STORE, 140175939289088, 140175985139711,
+STORE, 140175939280896, 140175985139711,
+STORE, 140175939248128, 140175985139711,
+STORE, 140175939239936, 140175985139711,
+STORE, 140175939223552, 140175985139711,
+STORE, 140175939215360, 140175985139711,
+STORE, 140175939198976, 140175985139711,
+STORE, 140175939190784, 140175985139711,
+STORE, 140175939174400, 140175985139711,
+STORE, 140175939166208, 140175985139711,
+STORE, 140175939133440, 140175985139711,
+STORE, 140175939125248, 140175985139711,
+STORE, 140175939108864, 140175985139711,
+STORE, 140175939100672, 140175985139711,
+STORE, 140175939084288, 140175985139711,
+STORE, 140175939076096, 140175985139711,
+STORE, 140175939059712, 140175985139711,
+STORE, 140175939051520, 140175985139711,
+STORE, 140175939018752, 140175985139711,
+STORE, 140175939010560, 140175985139711,
+STORE, 140175938994176, 140175985139711,
+STORE, 140175938985984, 140175985139711,
+STORE, 140175938969600, 140175985139711,
+STORE, 140175938961408, 140175985139711,
+STORE, 140175938945024, 140175985139711,
+STORE, 140175938936832, 140175985139711,
+STORE, 140175938904064, 140175985139711,
+STORE, 140175938895872, 140175985139711,
+STORE, 140175938879488, 140175985139711,
+STORE, 140175938871296, 140175985139711,
+STORE, 140175938854912, 140175985139711,
+STORE, 140175938846720, 140175985139711,
+STORE, 140175938830336, 140175985139711,
+STORE, 140175938822144, 140175985139711,
+STORE, 140175938789376, 140175985139711,
+STORE, 140175938781184, 140175985139711,
+STORE, 140175938764800, 140175985139711,
+STORE, 140175938756608, 140175985139711,
+STORE, 140175938740224, 140175985139711,
+STORE, 140175938732032, 140175985139711,
+STORE, 140175938715648, 140175985139711,
+STORE, 140175938707456, 140175985139711,
+STORE, 140175938674688, 140175985139711,
+STORE, 140175938666496, 140175985139711,
+STORE, 140175938650112, 140175985139711,
+STORE, 140175938641920, 140175985139711,
+STORE, 140175938625536, 140175985139711,
+STORE, 140175938617344, 140175985139711,
+STORE, 140175938600960, 140175985139711,
+STORE, 140175938592768, 140175985139711,
+STORE, 140175938560000, 140175985139711,
+STORE, 140175938551808, 140175985139711,
+STORE, 140175938535424, 140175985139711,
+STORE, 140175938527232, 140175985139711,
+STORE, 140175938510848, 140175985139711,
+STORE, 140175938502656, 140175985139711,
+STORE, 140175938486272, 140175985139711,
+STORE, 140175938478080, 140175985139711,
+STORE, 140175938445312, 140175985139711,
+STORE, 140175938437120, 140175985139711,
+STORE, 140175938420736, 140175985139711,
+STORE, 140175938412544, 140175985139711,
+STORE, 140175938396160, 140175985139711,
+STORE, 140175938387968, 140175985139711,
+STORE, 140175938371584, 140175985139711,
+STORE, 140175938363392, 140175985139711,
+STORE, 140175938330624, 140175985139711,
+STORE, 140175938322432, 140175985139711,
+STORE, 140175938306048, 140175985139711,
+STORE, 140175938297856, 140175985139711,
+STORE, 140175938281472, 140175985139711,
+STORE, 140175938273280, 140175985139711,
+STORE, 140175938256896, 140175985139711,
+STORE, 140175938248704, 140175985139711,
+STORE, 140175938215936, 140175985139711,
+STORE, 140175938207744, 140175985139711,
+STORE, 140175938191360, 140175985139711,
+STORE, 140175938183168, 140175985139711,
+STORE, 140175938166784, 140175985139711,
+STORE, 140175938158592, 140175985139711,
+STORE, 140175938142208, 140175985139711,
+STORE, 140175936045056, 140175985139711,
+STORE, 140175936036864, 140175985139711,
+STORE, 140175936004096, 140175985139711,
+STORE, 140175935995904, 140175985139711,
+STORE, 140175935979520, 140175985139711,
+STORE, 140175935971328, 140175985139711,
+STORE, 140175935954944, 140175985139711,
+STORE, 140175935946752, 140175985139711,
+STORE, 140175935930368, 140175985139711,
+STORE, 140175935922176, 140175985139711,
+STORE, 140175935889408, 140175985139711,
+STORE, 140175935881216, 140175985139711,
+STORE, 140175935864832, 140175985139711,
+STORE, 140175935856640, 140175985139711,
+STORE, 140175935840256, 140175985139711,
+STORE, 140175935832064, 140175985139711,
+STORE, 140175935815680, 140175985139711,
+STORE, 140175935807488, 140175985139711,
+STORE, 140175935774720, 140175985139711,
+STORE, 140175935766528, 140175985139711,
+STORE, 140175935750144, 140175985139711,
+STORE, 140175935741952, 140175985139711,
+STORE, 140175935725568, 140175985139711,
+STORE, 140175935717376, 140175985139711,
+STORE, 140175935700992, 140175985139711,
+STORE, 140175935692800, 140175985139711,
+STORE, 140175935660032, 140175985139711,
+STORE, 140175935651840, 140175985139711,
+STORE, 140175935635456, 140175985139711,
+STORE, 140175935627264, 140175985139711,
+STORE, 140175935610880, 140175985139711,
+STORE, 140175935602688, 140175985139711,
+STORE, 140175935586304, 140175985139711,
+STORE, 140175935578112, 140175985139711,
+STORE, 140175935545344, 140175985139711,
+STORE, 140175935537152, 140175985139711,
+STORE, 140175935520768, 140175985139711,
+STORE, 140175935512576, 140175985139711,
+STORE, 140175935496192, 140175985139711,
+STORE, 140175935488000, 140175985139711,
+STORE, 140175935471616, 140175985139711,
+STORE, 140175935463424, 140175985139711,
+STORE, 140175935430656, 140175985139711,
+STORE, 140175935422464, 140175985139711,
+STORE, 140175935406080, 140175985139711,
+STORE, 140175935397888, 140175985139711,
+STORE, 140175935381504, 140175985139711,
+STORE, 140175935373312, 140175985139711,
+STORE, 140175935356928, 140175985139711,
+STORE, 140175935348736, 140175985139711,
+STORE, 140175935315968, 140175985139711,
+STORE, 140175935307776, 140175985139711,
+STORE, 140175935291392, 140175985139711,
+STORE, 140175935283200, 140175985139711,
+STORE, 140175935266816, 140175985139711,
+STORE, 140175935258624, 140175985139711,
+STORE, 140175935242240, 140175985139711,
+STORE, 140175935234048, 140175985139711,
+STORE, 140175935201280, 140175985139711,
+STORE, 140175935193088, 140175985139711,
+STORE, 140175935176704, 140175985139711,
+STORE, 140175935168512, 140175985139711,
+STORE, 140175935152128, 140175985139711,
+STORE, 140175935143936, 140175985139711,
+STORE, 140175935127552, 140175985139711,
+STORE, 140175935119360, 140175985139711,
+STORE, 140175935086592, 140175985139711,
+STORE, 140175935078400, 140175985139711,
+STORE, 140175935062016, 140175985139711,
+STORE, 140175935053824, 140175985139711,
+STORE, 140175935037440, 140175985139711,
+STORE, 140175935029248, 140175985139711,
+STORE, 140175935012864, 140175985139711,
+STORE, 140175935004672, 140175985139711,
+STORE, 140175934971904, 140175985139711,
+STORE, 140175934963712, 140175985139711,
+STORE, 140175934947328, 140175985139711,
+STORE, 140175934939136, 140175985139711,
+STORE, 140175934922752, 140175985139711,
+STORE, 140175934914560, 140175985139711,
+STORE, 140175934898176, 140175985139711,
+STORE, 140175934889984, 140175985139711,
+STORE, 140175934857216, 140175985139711,
+STORE, 140175934849024, 140175985139711,
+STORE, 140175934832640, 140175985139711,
+STORE, 140175934824448, 140175985139711,
+STORE, 140175934808064, 140175985139711,
+STORE, 140175934799872, 140175985139711,
+STORE, 140175934783488, 140175985139711,
+STORE, 140175934775296, 140175985139711,
+STORE, 140175934742528, 140175985139711,
+STORE, 140175934734336, 140175985139711,
+STORE, 140175934717952, 140175985139711,
+STORE, 140175934709760, 140175985139711,
+STORE, 140175934693376, 140175985139711,
+STORE, 140175934685184, 140175985139711,
+STORE, 140175934668800, 140175985139711,
+STORE, 140175934660608, 140175985139711,
+STORE, 140175934627840, 140175985139711,
+STORE, 140175934619648, 140175985139711,
+STORE, 140175934603264, 140175985139711,
+STORE, 140175934595072, 140175985139711,
+STORE, 140175934578688, 140175985139711,
+STORE, 140175934570496, 140175985139711,
+STORE, 140175934554112, 140175985139711,
+STORE, 140175934545920, 140175985139711,
+STORE, 140175934513152, 140175985139711,
+STORE, 140175934504960, 140175985139711,
+STORE, 140175934488576, 140175985139711,
+STORE, 140175934480384, 140175985139711,
+STORE, 140175934464000, 140175985139711,
+STORE, 140175934455808, 140175985139711,
+STORE, 140175934439424, 140175985139711,
+STORE, 140175934431232, 140175985139711,
+STORE, 140175934398464, 140175985139711,
+STORE, 140175934390272, 140175985139711,
+STORE, 140175934373888, 140175985139711,
+STORE, 140175934365696, 140175985139711,
+STORE, 140175934349312, 140175985139711,
+STORE, 140175934341120, 140175985139711,
+STORE, 140175934324736, 140175985139711,
+STORE, 140175932227584, 140175985139711,
+STORE, 140175932219392, 140175985139711,
+STORE, 140175932186624, 140175985139711,
+STORE, 140175932178432, 140175985139711,
+STORE, 140175932162048, 140175985139711,
+STORE, 140175932153856, 140175985139711,
+STORE, 140175932137472, 140175985139711,
+STORE, 53080064, 57884671,
+STORE, 140175932129280, 140175985139711,
+STORE, 140175932112896, 140175985139711,
+STORE, 140175932104704, 140175985139711,
+STORE, 140175932071936, 140175985139711,
+STORE, 140175932063744, 140175985139711,
+STORE, 140175932047360, 140175985139711,
+STORE, 140175932039168, 140175985139711,
+STORE, 140175932022784, 140175985139711,
+STORE, 140175932014592, 140175985139711,
+STORE, 140175931998208, 140175985139711,
+STORE, 140175931990016, 140175985139711,
+STORE, 140175931957248, 140175985139711,
+STORE, 140175931949056, 140175985139711,
+STORE, 140175931932672, 140175985139711,
+STORE, 140175931924480, 140175985139711,
+STORE, 140175931908096, 140175985139711,
+STORE, 140175931899904, 140175985139711,
+STORE, 140175931883520, 140175985139711,
+STORE, 140175931875328, 140175985139711,
+STORE, 140175931842560, 140175985139711,
+STORE, 140175931834368, 140175985139711,
+STORE, 140175931817984, 140175985139711,
+STORE, 140175931809792, 140175985139711,
+STORE, 140175931793408, 140175985139711,
+STORE, 140175931785216, 140175985139711,
+STORE, 140175931768832, 140175985139711,
+STORE, 140175931760640, 140175985139711,
+STORE, 140175931727872, 140175985139711,
+STORE, 140175931719680, 140175985139711,
+STORE, 140175931703296, 140175985139711,
+STORE, 140175931695104, 140175985139711,
+STORE, 140175931678720, 140175985139711,
+STORE, 140175931670528, 140175985139711,
+STORE, 140175931654144, 140175985139711,
+STORE, 140175931645952, 140175985139711,
+STORE, 140175931613184, 140175985139711,
+STORE, 140175931604992, 140175985139711,
+STORE, 140175931588608, 140175985139711,
+STORE, 140175931580416, 140175985139711,
+STORE, 140175931564032, 140175985139711,
+STORE, 140175931555840, 140175985139711,
+STORE, 140175931539456, 140175985139711,
+STORE, 140175931531264, 140175985139711,
+STORE, 140175931498496, 140175985139711,
+STORE, 140175931490304, 140175985139711,
+STORE, 140175931473920, 140175985139711,
+STORE, 140175931465728, 140175985139711,
+STORE, 140175931449344, 140175985139711,
+STORE, 140175931441152, 140175985139711,
+STORE, 140175931424768, 140175985139711,
+STORE, 140175931416576, 140175985139711,
+STORE, 140175931383808, 140175985139711,
+STORE, 140175931375616, 140175985139711,
+STORE, 140175931359232, 140175985139711,
+STORE, 140175931351040, 140175985139711,
+STORE, 140175931334656, 140175985139711,
+STORE, 140175931326464, 140175985139711,
+STORE, 140175931310080, 140175985139711,
+STORE, 140175931301888, 140175985139711,
+STORE, 140175931269120, 140175985139711,
+STORE, 140175931260928, 140175985139711,
+STORE, 140175931244544, 140175985139711,
+STORE, 140175931236352, 140175985139711,
+STORE, 140175931219968, 140175985139711,
+STORE, 140175931211776, 140175985139711,
+STORE, 140175931195392, 140175985139711,
+STORE, 140175931187200, 140175985139711,
+STORE, 140175931154432, 140175985139711,
+STORE, 140175931146240, 140175985139711,
+STORE, 140175931129856, 140175985139711,
+STORE, 140175931121664, 140175985139711,
+STORE, 140175931105280, 140175985139711,
+STORE, 140175931097088, 140175985139711,
+STORE, 140175931080704, 140175985139711,
+STORE, 140175931072512, 140175985139711,
+STORE, 140175931039744, 140175985139711,
+STORE, 140175931031552, 140175985139711,
+STORE, 140175931015168, 140175985139711,
+STORE, 140175931006976, 140175985139711,
+STORE, 140175930990592, 140175985139711,
+STORE, 140175930982400, 140175985139711,
+STORE, 140175930966016, 140175985139711,
+STORE, 140175930957824, 140175985139711,
+STORE, 140175930925056, 140175985139711,
+STORE, 140175930916864, 140175985139711,
+STORE, 140175930900480, 140175985139711,
+STORE, 140175930892288, 140175985139711,
+STORE, 140175930875904, 140175985139711,
+STORE, 140175930867712, 140175985139711,
+STORE, 140175930851328, 140175985139711,
+STORE, 140175930843136, 140175985139711,
+STORE, 140175930810368, 140175985139711,
+STORE, 140175930802176, 140175985139711,
+STORE, 140175930785792, 140175985139711,
+STORE, 140175930777600, 140175985139711,
+STORE, 140175930761216, 140175985139711,
+STORE, 140175930753024, 140175985139711,
+STORE, 140175930736640, 140175985139711,
+STORE, 140175930728448, 140175985139711,
+STORE, 140175930695680, 140175985139711,
+STORE, 140175930687488, 140175985139711,
+STORE, 140175930671104, 140175985139711,
+STORE, 140175930662912, 140175985139711,
+STORE, 140175930646528, 140175985139711,
+STORE, 140175930638336, 140175985139711,
+STORE, 140175930621952, 140175985139711,
+STORE, 140175930613760, 140175985139711,
+STORE, 140175930580992, 140175985139711,
+STORE, 140175930572800, 140175985139711,
+STORE, 140175930556416, 140175985139711,
+STORE, 140175930548224, 140175985139711,
+STORE, 140175930531840, 140175985139711,
+STORE, 140175930523648, 140175985139711,
+STORE, 140175930507264, 140175985139711,
+STORE, 140175928410112, 140175985139711,
+STORE, 140175928401920, 140175985139711,
+STORE, 140175928369152, 140175985139711,
+STORE, 140175928360960, 140175985139711,
+STORE, 140175928344576, 140175985139711,
+STORE, 140175928336384, 140175985139711,
+STORE, 140175928320000, 140175985139711,
+STORE, 140175928311808, 140175985139711,
+STORE, 140175928295424, 140175985139711,
+STORE, 140175927242752, 140175985139711,
+SNULL, 140175956627455, 140175985139711,
+STORE, 140175927242752, 140175956627455,
+STORE, 140175956627456, 140175985139711,
+ };
+ static const unsigned long set24[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735281639424, 140737488351231,
+SNULL, 140735281643519, 140737488351231,
+STORE, 140735281639424, 140735281643519,
+STORE, 140735281508352, 140735281643519,
+STORE, 94717834911744, 94717834928127,
+SNULL, 94717834915839, 94717834928127,
+STORE, 94717834911744, 94717834915839,
+STORE, 94717834915840, 94717834928127,
+ERASE, 94717834915840, 94717834928127,
+STORE, 94717834919936, 94717834928127,
+STORE, 140428246065152, 140428248317951,
+SNULL, 140428246208511, 140428248317951,
+STORE, 140428246065152, 140428246208511,
+STORE, 140428246208512, 140428248317951,
+ERASE, 140428246208512, 140428248317951,
+STORE, 140428248305664, 140428248313855,
+STORE, 140428248313856, 140428248317951,
+STORE, 140735281811456, 140735281815551,
+STORE, 140735281799168, 140735281811455,
+STORE, 140428248297472, 140428248305663,
+STORE, 140428243841024, 140428246065151,
+SNULL, 140428245491711, 140428246065151,
+STORE, 140428243841024, 140428245491711,
+STORE, 140428245491712, 140428246065151,
+SNULL, 140428245491712, 140428246061055,
+STORE, 140428246061056, 140428246065151,
+STORE, 140428245491712, 140428246061055,
+ERASE, 140428245491712, 140428246061055,
+STORE, 140428245491712, 140428246061055,
+ERASE, 140428246061056, 140428246065151,
+STORE, 140428246061056, 140428246065151,
+STORE, 140428248268800, 140428248297471,
+STORE, 140428241625088, 140428243841023,
+SNULL, 140428241625088, 140428241723391,
+STORE, 140428241723392, 140428243841023,
+STORE, 140428241625088, 140428241723391,
+SNULL, 140428243816447, 140428243841023,
+STORE, 140428241723392, 140428243816447,
+STORE, 140428243816448, 140428243841023,
+SNULL, 140428243816448, 140428243824639,
+STORE, 140428243824640, 140428243841023,
+STORE, 140428243816448, 140428243824639,
+ERASE, 140428243816448, 140428243824639,
+STORE, 140428243816448, 140428243824639,
+ERASE, 140428243824640, 140428243841023,
+STORE, 140428243824640, 140428243841023,
+STORE, 140428237828096, 140428241625087,
+SNULL, 140428237828096, 140428239486975,
+STORE, 140428239486976, 140428241625087,
+STORE, 140428237828096, 140428239486975,
+SNULL, 140428241584127, 140428241625087,
+STORE, 140428239486976, 140428241584127,
+STORE, 140428241584128, 140428241625087,
+SNULL, 140428241584128, 140428241608703,
+STORE, 140428241608704, 140428241625087,
+STORE, 140428241584128, 140428241608703,
+ERASE, 140428241584128, 140428241608703,
+STORE, 140428241584128, 140428241608703,
+ERASE, 140428241608704, 140428241625087,
+STORE, 140428241608704, 140428241625087,
+STORE, 140428235567104, 140428237828095,
+SNULL, 140428235567104, 140428235718655,
+STORE, 140428235718656, 140428237828095,
+STORE, 140428235567104, 140428235718655,
+SNULL, 140428237811711, 140428237828095,
+STORE, 140428235718656, 140428237811711,
+STORE, 140428237811712, 140428237828095,
+SNULL, 140428237811712, 140428237819903,
+STORE, 140428237819904, 140428237828095,
+STORE, 140428237811712, 140428237819903,
+ERASE, 140428237811712, 140428237819903,
+STORE, 140428237811712, 140428237819903,
+ERASE, 140428237819904, 140428237828095,
+STORE, 140428237819904, 140428237828095,
+STORE, 140428233445376, 140428235567103,
+SNULL, 140428233445376, 140428233461759,
+STORE, 140428233461760, 140428235567103,
+STORE, 140428233445376, 140428233461759,
+SNULL, 140428235558911, 140428235567103,
+STORE, 140428233461760, 140428235558911,
+STORE, 140428235558912, 140428235567103,
+ERASE, 140428235558912, 140428235567103,
+STORE, 140428235558912, 140428235567103,
+STORE, 140428231315456, 140428233445375,
+SNULL, 140428231315456, 140428231344127,
+STORE, 140428231344128, 140428233445375,
+STORE, 140428231315456, 140428231344127,
+SNULL, 140428233437183, 140428233445375,
+STORE, 140428231344128, 140428233437183,
+STORE, 140428233437184, 140428233445375,
+ERASE, 140428233437184, 140428233445375,
+STORE, 140428233437184, 140428233445375,
+STORE, 140428248260608, 140428248268799,
+STORE, 140428229062656, 140428231315455,
+SNULL, 140428229062656, 140428229214207,
+STORE, 140428229214208, 140428231315455,
+STORE, 140428229062656, 140428229214207,
+SNULL, 140428231307263, 140428231315455,
+STORE, 140428229214208, 140428231307263,
+STORE, 140428231307264, 140428231315455,
+ERASE, 140428231307264, 140428231315455,
+STORE, 140428231307264, 140428231315455,
+STORE, 140428226891776, 140428229062655,
+SNULL, 140428226891776, 140428226961407,
+STORE, 140428226961408, 140428229062655,
+STORE, 140428226891776, 140428226961407,
+SNULL, 140428229054463, 140428229062655,
+STORE, 140428226961408, 140428229054463,
+STORE, 140428229054464, 140428229062655,
+ERASE, 140428229054464, 140428229062655,
+STORE, 140428229054464, 140428229062655,
+STORE, 140428223680512, 140428226891775,
+SNULL, 140428223680512, 140428224757759,
+STORE, 140428224757760, 140428226891775,
+STORE, 140428223680512, 140428224757759,
+SNULL, 140428226854911, 140428226891775,
+STORE, 140428224757760, 140428226854911,
+STORE, 140428226854912, 140428226891775,
+ERASE, 140428226854912, 140428226891775,
+STORE, 140428226854912, 140428226891775,
+STORE, 140428221546496, 140428223680511,
+SNULL, 140428221546496, 140428221575167,
+STORE, 140428221575168, 140428223680511,
+STORE, 140428221546496, 140428221575167,
+SNULL, 140428223672319, 140428223680511,
+STORE, 140428221575168, 140428223672319,
+STORE, 140428223672320, 140428223680511,
+ERASE, 140428223672320, 140428223680511,
+STORE, 140428223672320, 140428223680511,
+STORE, 140428219236352, 140428221546495,
+SNULL, 140428219236352, 140428219441151,
+STORE, 140428219441152, 140428221546495,
+STORE, 140428219236352, 140428219441151,
+SNULL, 140428221538303, 140428221546495,
+STORE, 140428219441152, 140428221538303,
+STORE, 140428221538304, 140428221546495,
+ERASE, 140428221538304, 140428221546495,
+STORE, 140428221538304, 140428221546495,
+STORE, 140428216852480, 140428219236351,
+SNULL, 140428216852480, 140428217044991,
+STORE, 140428217044992, 140428219236351,
+STORE, 140428216852480, 140428217044991,
+SNULL, 140428219138047, 140428219236351,
+STORE, 140428217044992, 140428219138047,
+STORE, 140428219138048, 140428219236351,
+ERASE, 140428219138048, 140428219236351,
+STORE, 140428219138048, 140428219236351,
+STORE, 140428248252416, 140428248268799,
+STORE, 140428214284288, 140428216852479,
+SNULL, 140428214284288, 140428214751231,
+STORE, 140428214751232, 140428216852479,
+STORE, 140428214284288, 140428214751231,
+SNULL, 140428216844287, 140428216852479,
+STORE, 140428214751232, 140428216844287,
+STORE, 140428216844288, 140428216852479,
+ERASE, 140428216844288, 140428216852479,
+STORE, 140428216844288, 140428216852479,
+STORE, 140428212170752, 140428214284287,
+SNULL, 140428212170752, 140428212183039,
+STORE, 140428212183040, 140428214284287,
+STORE, 140428212170752, 140428212183039,
+SNULL, 140428214276095, 140428214284287,
+STORE, 140428212183040, 140428214276095,
+STORE, 140428214276096, 140428214284287,
+ERASE, 140428214276096, 140428214284287,
+STORE, 140428214276096, 140428214284287,
+STORE, 140428209991680, 140428212170751,
+SNULL, 140428209991680, 140428210069503,
+STORE, 140428210069504, 140428212170751,
+STORE, 140428209991680, 140428210069503,
+SNULL, 140428212162559, 140428212170751,
+STORE, 140428210069504, 140428212162559,
+STORE, 140428212162560, 140428212170751,
+ERASE, 140428212162560, 140428212170751,
+STORE, 140428212162560, 140428212170751,
+STORE, 140428207874048, 140428209991679,
+SNULL, 140428207874048, 140428207890431,
+STORE, 140428207890432, 140428209991679,
+STORE, 140428207874048, 140428207890431,
+SNULL, 140428209983487, 140428209991679,
+STORE, 140428207890432, 140428209983487,
+STORE, 140428209983488, 140428209991679,
+ERASE, 140428209983488, 140428209991679,
+STORE, 140428209983488, 140428209991679,
+STORE, 140428248244224, 140428248268799,
+STORE, 140428248231936, 140428248268799,
+SNULL, 140428241600511, 140428241608703,
+STORE, 140428241584128, 140428241600511,
+STORE, 140428241600512, 140428241608703,
+SNULL, 140428209987583, 140428209991679,
+STORE, 140428209983488, 140428209987583,
+STORE, 140428209987584, 140428209991679,
+SNULL, 140428212166655, 140428212170751,
+STORE, 140428212162560, 140428212166655,
+STORE, 140428212166656, 140428212170751,
+SNULL, 140428214280191, 140428214284287,
+STORE, 140428214276096, 140428214280191,
+STORE, 140428214280192, 140428214284287,
+SNULL, 140428243820543, 140428243824639,
+STORE, 140428243816448, 140428243820543,
+STORE, 140428243820544, 140428243824639,
+SNULL, 140428216848383, 140428216852479,
+STORE, 140428216844288, 140428216848383,
+STORE, 140428216848384, 140428216852479,
+SNULL, 140428219232255, 140428219236351,
+STORE, 140428219138048, 140428219232255,
+STORE, 140428219232256, 140428219236351,
+SNULL, 140428221542399, 140428221546495,
+STORE, 140428221538304, 140428221542399,
+STORE, 140428221542400, 140428221546495,
+SNULL, 140428223676415, 140428223680511,
+STORE, 140428223672320, 140428223676415,
+STORE, 140428223676416, 140428223680511,
+SNULL, 140428226863103, 140428226891775,
+STORE, 140428226854912, 140428226863103,
+STORE, 140428226863104, 140428226891775,
+SNULL, 140428229058559, 140428229062655,
+STORE, 140428229054464, 140428229058559,
+STORE, 140428229058560, 140428229062655,
+SNULL, 140428231311359, 140428231315455,
+STORE, 140428231307264, 140428231311359,
+STORE, 140428231311360, 140428231315455,
+SNULL, 140428233441279, 140428233445375,
+STORE, 140428233437184, 140428233441279,
+STORE, 140428233441280, 140428233445375,
+SNULL, 140428235563007, 140428235567103,
+STORE, 140428235558912, 140428235563007,
+STORE, 140428235563008, 140428235567103,
+SNULL, 140428237815807, 140428237819903,
+STORE, 140428237811712, 140428237815807,
+STORE, 140428237815808, 140428237819903,
+SNULL, 140428246056959, 140428246061055,
+STORE, 140428245491712, 140428246056959,
+STORE, 140428246056960, 140428246061055,
+SNULL, 94717834924031, 94717834928127,
+STORE, 94717834919936, 94717834924031,
+STORE, 94717834924032, 94717834928127,
+SNULL, 140428248309759, 140428248313855,
+STORE, 140428248305664, 140428248309759,
+STORE, 140428248309760, 140428248313855,
+ERASE, 140428248268800, 140428248297471,
+STORE, 94717843058688, 94717843193855,
+STORE, 94749677137920, 94749677559807,
+STORE, 94749677563904, 94749677604863,
+STORE, 94749677604864, 94749677608959,
+STORE, 94749710970880, 94749711241215,
+STORE, 140490884894720, 140490884935679,
+STORE, 140490884935680, 140490887032831,
+STORE, 140490887032832, 140490887036927,
+STORE, 140490887036928, 140490887041023,
+STORE, 140490887041024, 140490887065599,
+STORE, 140490887065600, 140490887110655,
+STORE, 140490887110656, 140490889203711,
+STORE, 140490889203712, 140490889207807,
+STORE, 140490889207808, 140490889211903,
+STORE, 140490889211904, 140490889293823,
+STORE, 140490889293824, 140490891390975,
+STORE, 140490891390976, 140490891395071,
+STORE, 140490891395072, 140490891399167,
+STORE, 140490891399168, 140490891407359,
+STORE, 140490891407360, 140490891436031,
+STORE, 140490891436032, 140490893529087,
+STORE, 140490893529088, 140490893533183,
+STORE, 140490893533184, 140490893537279,
+STORE, 140490893537280, 140490901979135,
+STORE, 140490901979136, 140490901991423,
+STORE, 140490901991424, 140490904084479,
+STORE, 140490904084480, 140490904088575,
+STORE, 140490904088576, 140490904092671,
+STORE, 140490904092672, 140490904559615,
+STORE, 140490904559616, 140490906652671,
+STORE, 140490906652672, 140490906656767,
+STORE, 140490906656768, 140490906660863,
+STORE, 140490906660864, 140490906677247,
+STORE, 140490906677248, 140490908770303,
+STORE, 140490908770304, 140490908774399,
+STORE, 140490908774400, 140490908778495,
+STORE, 140490908778496, 140490908794879,
+STORE, 140490908794880, 140490910887935,
+STORE, 140490910887936, 140490910892031,
+STORE, 140490910892032, 140490910896127,
+STORE, 140490910896128, 140490912555007,
+STORE, 140490912555008, 140490914652159,
+STORE, 140490914652160, 140490914668543,
+STORE, 140490914668544, 140490914676735,
+STORE, 140490914676736, 140490914693119,
+STORE, 140490914693120, 140490914791423,
+STORE, 140490914791424, 140490916884479,
+STORE, 140490916884480, 140490916888575,
+STORE, 140490916888576, 140490916892671,
+STORE, 140490916892672, 140490916909055,
+STORE, 140490916909056, 140490916937727,
+STORE, 140490916937728, 140490919030783,
+STORE, 140490919030784, 140490919034879,
+STORE, 140490919034880, 140490919038975,
+STORE, 140490919038976, 140490919190527,
+STORE, 140490919190528, 140490921283583,
+STORE, 140490921283584, 140490921287679,
+STORE, 140490921287680, 140490921291775,
+STORE, 140490921291776, 140490921299967,
+STORE, 140490921299968, 140490921390079,
+STORE, 140490921390080, 140490923483135,
+STORE, 140490923483136, 140490923487231,
+STORE, 140490923487232, 140490923491327,
+STORE, 140490923491328, 140490923757567,
+STORE, 140490923757568, 140490925850623,
+STORE, 140490925850624, 140490925867007,
+STORE, 140490925867008, 140490925871103,
+STORE, 140490925871104, 140490925875199,
+STORE, 140490925875200, 140490925903871,
+STORE, 140490925903872, 140490928001023,
+STORE, 140490928001024, 140490928005119,
+STORE, 140490928005120, 140490928009215,
+STORE, 140490928009216, 140490928152575,
+STORE, 140490930184192, 140490930221055,
+STORE, 140490930221056, 140490930237439,
+STORE, 140490930237440, 140490930241535,
+STORE, 140490930241536, 140490930245631,
+STORE, 140490930245632, 140490930249727,
+STORE, 140490930249728, 140490930253823,
+STORE, 140490930253824, 140490930257919,
+STORE, 140490930257920, 140490930262015,
+STORE, 140724611694592, 140724611829759,
+STORE, 140724612427776, 140724612440063,
+STORE, 140724612440064, 140724612444159,
+STORE, 94103163662336, 94103163772927,
+STORE, 94103165865984, 94103165874175,
+STORE, 94103165874176, 94103165878271,
+STORE, 94103165878272, 94103165886463,
+STORE, 94103182548992, 94103182684159,
+STORE, 140092694708224, 140092696367103,
+STORE, 140092696367104, 140092698464255,
+STORE, 140092698464256, 140092698480639,
+STORE, 140092698480640, 140092698488831,
+STORE, 140092698488832, 140092698505215,
+STORE, 140092698505216, 140092698648575,
+STORE, 140092700708864, 140092700717055,
+STORE, 140092700745728, 140092700749823,
+STORE, 140092700749824, 140092700753919,
+STORE, 140092700753920, 140092700758015,
+STORE, 140736800911360, 140736801046527,
+STORE, 140736802308096, 140736802320383,
+STORE, 140736802320384, 140736802324479,
+STORE, 93948802064384, 93948802174975,
+STORE, 93948804268032, 93948804276223,
+STORE, 93948804276224, 93948804280319,
+STORE, 93948804280320, 93948804288511,
+STORE, 93948806266880, 93948806402047,
+STORE, 140222999113728, 140223000772607,
+STORE, 140223000772608, 140223002869759,
+STORE, 140223002869760, 140223002886143,
+STORE, 140223002886144, 140223002894335,
+STORE, 140223002894336, 140223002910719,
+STORE, 140223002910720, 140223003054079,
+STORE, 140223005114368, 140223005122559,
+STORE, 140223005151232, 140223005155327,
+STORE, 140223005155328, 140223005159423,
+STORE, 140223005159424, 140223005163519,
+STORE, 140720877506560, 140720877641727,
+STORE, 140720878231552, 140720878243839,
+STORE, 140720878243840, 140720878247935,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733232087040, 140737488351231,
+SNULL, 140733232091135, 140737488351231,
+STORE, 140733232087040, 140733232091135,
+STORE, 140733231955968, 140733232091135,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140161681321984, 140161683574783,
+SNULL, 140161681465343, 140161683574783,
+STORE, 140161681321984, 140161681465343,
+STORE, 140161681465344, 140161683574783,
+ERASE, 140161681465344, 140161683574783,
+STORE, 140161683562496, 140161683570687,
+STORE, 140161683570688, 140161683574783,
+STORE, 140733232214016, 140733232218111,
+STORE, 140733232201728, 140733232214015,
+STORE, 140161683533824, 140161683562495,
+STORE, 140161683525632, 140161683533823,
+STORE, 140161678159872, 140161681321983,
+SNULL, 140161678159872, 140161679220735,
+STORE, 140161679220736, 140161681321983,
+STORE, 140161678159872, 140161679220735,
+SNULL, 140161681313791, 140161681321983,
+STORE, 140161679220736, 140161681313791,
+STORE, 140161681313792, 140161681321983,
+ERASE, 140161681313792, 140161681321983,
+STORE, 140161681313792, 140161681321983,
+STORE, 140161674362880, 140161678159871,
+SNULL, 140161674362880, 140161676021759,
+STORE, 140161676021760, 140161678159871,
+STORE, 140161674362880, 140161676021759,
+SNULL, 140161678118911, 140161678159871,
+STORE, 140161676021760, 140161678118911,
+STORE, 140161678118912, 140161678159871,
+SNULL, 140161678118912, 140161678143487,
+STORE, 140161678143488, 140161678159871,
+STORE, 140161678118912, 140161678143487,
+ERASE, 140161678118912, 140161678143487,
+STORE, 140161678118912, 140161678143487,
+ERASE, 140161678143488, 140161678159871,
+STORE, 140161678143488, 140161678159871,
+STORE, 140161683513344, 140161683533823,
+SNULL, 140161678135295, 140161678143487,
+STORE, 140161678118912, 140161678135295,
+STORE, 140161678135296, 140161678143487,
+SNULL, 140161681317887, 140161681321983,
+STORE, 140161681313792, 140161681317887,
+STORE, 140161681317888, 140161681321983,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140161683566591, 140161683570687,
+STORE, 140161683562496, 140161683566591,
+STORE, 140161683566592, 140161683570687,
+ERASE, 140161683533824, 140161683562495,
+STORE, 25477120, 25612287,
+STORE, 25477120, 25759743,
+STORE, 140161681829888, 140161683513343,
+STORE, 25477120, 25915391,
+STORE, 25477120, 26054655,
+SNULL, 25800703, 26054655,
+STORE, 25477120, 25800703,
+STORE, 25800704, 26054655,
+ERASE, 25800704, 26054655,
+STORE, 140737488347136, 140737488351231,
+STORE, 140723218452480, 140737488351231,
+SNULL, 140723218456575, 140737488351231,
+STORE, 140723218452480, 140723218456575,
+STORE, 140723218321408, 140723218456575,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140398872264704, 140398874517503,
+SNULL, 140398872408063, 140398874517503,
+STORE, 140398872264704, 140398872408063,
+STORE, 140398872408064, 140398874517503,
+ERASE, 140398872408064, 140398874517503,
+STORE, 140398874505216, 140398874513407,
+STORE, 140398874513408, 140398874517503,
+STORE, 140723219247104, 140723219251199,
+STORE, 140723219234816, 140723219247103,
+STORE, 140398874476544, 140398874505215,
+STORE, 140398874468352, 140398874476543,
+STORE, 140398868430848, 140398872264703,
+SNULL, 140398868430848, 140398870138879,
+STORE, 140398870138880, 140398872264703,
+STORE, 140398868430848, 140398870138879,
+SNULL, 140398872231935, 140398872264703,
+STORE, 140398870138880, 140398872231935,
+STORE, 140398872231936, 140398872264703,
+ERASE, 140398872231936, 140398872264703,
+STORE, 140398872231936, 140398872264703,
+STORE, 140398866235392, 140398868430847,
+SNULL, 140398866235392, 140398866329599,
+STORE, 140398866329600, 140398868430847,
+STORE, 140398866235392, 140398866329599,
+SNULL, 140398868422655, 140398868430847,
+STORE, 140398866329600, 140398868422655,
+STORE, 140398868422656, 140398868430847,
+ERASE, 140398868422656, 140398868430847,
+STORE, 140398868422656, 140398868430847,
+STORE, 140398863716352, 140398866235391,
+SNULL, 140398863716352, 140398864130047,
+STORE, 140398864130048, 140398866235391,
+STORE, 140398863716352, 140398864130047,
+SNULL, 140398866223103, 140398866235391,
+STORE, 140398864130048, 140398866223103,
+STORE, 140398866223104, 140398866235391,
+ERASE, 140398866223104, 140398866235391,
+STORE, 140398866223104, 140398866235391,
+STORE, 140398861082624, 140398863716351,
+SNULL, 140398861082624, 140398861611007,
+STORE, 140398861611008, 140398863716351,
+STORE, 140398861082624, 140398861611007,
+SNULL, 140398863708159, 140398863716351,
+STORE, 140398861611008, 140398863708159,
+STORE, 140398863708160, 140398863716351,
+ERASE, 140398863708160, 140398863716351,
+STORE, 140398863708160, 140398863716351,
+STORE, 140398858969088, 140398861082623,
+SNULL, 140398858969088, 140398858981375,
+STORE, 140398858981376, 140398861082623,
+STORE, 140398858969088, 140398858981375,
+SNULL, 140398861074431, 140398861082623,
+STORE, 140398858981376, 140398861074431,
+STORE, 140398861074432, 140398861082623,
+ERASE, 140398861074432, 140398861082623,
+STORE, 140398861074432, 140398861082623,
+STORE, 140398856765440, 140398858969087,
+SNULL, 140398856765440, 140398856867839,
+STORE, 140398856867840, 140398858969087,
+STORE, 140398856765440, 140398856867839,
+SNULL, 140398858960895, 140398858969087,
+STORE, 140398856867840, 140398858960895,
+STORE, 140398858960896, 140398858969087,
+ERASE, 140398858960896, 140398858969087,
+STORE, 140398858960896, 140398858969087,
+STORE, 140398874460160, 140398874476543,
+STORE, 140398853603328, 140398856765439,
+SNULL, 140398853603328, 140398854664191,
+STORE, 140398854664192, 140398856765439,
+STORE, 140398853603328, 140398854664191,
+SNULL, 140398856757247, 140398856765439,
+STORE, 140398854664192, 140398856757247,
+STORE, 140398856757248, 140398856765439,
+ERASE, 140398856757248, 140398856765439,
+STORE, 140398856757248, 140398856765439,
+STORE, 140398849806336, 140398853603327,
+SNULL, 140398849806336, 140398851465215,
+STORE, 140398851465216, 140398853603327,
+STORE, 140398849806336, 140398851465215,
+SNULL, 140398853562367, 140398853603327,
+STORE, 140398851465216, 140398853562367,
+STORE, 140398853562368, 140398853603327,
+SNULL, 140398853562368, 140398853586943,
+STORE, 140398853586944, 140398853603327,
+STORE, 140398853562368, 140398853586943,
+ERASE, 140398853562368, 140398853586943,
+STORE, 140398853562368, 140398853586943,
+ERASE, 140398853586944, 140398853603327,
+STORE, 140398853586944, 140398853603327,
+STORE, 140398874447872, 140398874476543,
+SNULL, 140398853578751, 140398853586943,
+STORE, 140398853562368, 140398853578751,
+STORE, 140398853578752, 140398853586943,
+SNULL, 140398856761343, 140398856765439,
+STORE, 140398856757248, 140398856761343,
+STORE, 140398856761344, 140398856765439,
+SNULL, 140398858964991, 140398858969087,
+STORE, 140398858960896, 140398858964991,
+STORE, 140398858964992, 140398858969087,
+SNULL, 140398861078527, 140398861082623,
+STORE, 140398861074432, 140398861078527,
+STORE, 140398861078528, 140398861082623,
+SNULL, 140398863712255, 140398863716351,
+STORE, 140398863708160, 140398863712255,
+STORE, 140398863712256, 140398863716351,
+SNULL, 140398866231295, 140398866235391,
+STORE, 140398866223104, 140398866231295,
+STORE, 140398866231296, 140398866235391,
+SNULL, 140398868426751, 140398868430847,
+STORE, 140398868422656, 140398868426751,
+STORE, 140398868426752, 140398868430847,
+SNULL, 140398872236031, 140398872264703,
+STORE, 140398872231936, 140398872236031,
+STORE, 140398872236032, 140398872264703,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140398874509311, 140398874513407,
+STORE, 140398874505216, 140398874509311,
+STORE, 140398874509312, 140398874513407,
+ERASE, 140398874476544, 140398874505215,
+STORE, 43278336, 43413503,
+STORE, 140398872764416, 140398874447871,
+STORE, 140398874501120, 140398874505215,
+STORE, 140398872629248, 140398872764415,
+STORE, 43278336, 43556863,
+STORE, 140398847709184, 140398849806335,
+STORE, 140398874492928, 140398874505215,
+STORE, 140398874484736, 140398874505215,
+STORE, 140398874447872, 140398874484735,
+STORE, 140398872612864, 140398872764415,
+STORE, 43278336, 43692031,
+STORE, 43278336, 43880447,
+STORE, 140398872604672, 140398872764415,
+STORE, 140398872596480, 140398872764415,
+STORE, 43278336, 44044287,
+STORE, 140398872580096, 140398872764415,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734403092480, 140737488351231,
+SNULL, 140734403096575, 140737488351231,
+STORE, 140734403092480, 140734403096575,
+STORE, 140734402961408, 140734403096575,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140240662380544, 140240664633343,
+SNULL, 140240662523903, 140240664633343,
+STORE, 140240662380544, 140240662523903,
+STORE, 140240662523904, 140240664633343,
+ERASE, 140240662523904, 140240664633343,
+STORE, 140240664621056, 140240664629247,
+STORE, 140240664629248, 140240664633343,
+STORE, 140734403145728, 140734403149823,
+STORE, 140734403133440, 140734403145727,
+STORE, 140240664592384, 140240664621055,
+STORE, 140240664584192, 140240664592383,
+STORE, 140240659218432, 140240662380543,
+SNULL, 140240659218432, 140240660279295,
+STORE, 140240660279296, 140240662380543,
+STORE, 140240659218432, 140240660279295,
+SNULL, 140240662372351, 140240662380543,
+STORE, 140240660279296, 140240662372351,
+STORE, 140240662372352, 140240662380543,
+ERASE, 140240662372352, 140240662380543,
+STORE, 140240662372352, 140240662380543,
+STORE, 140240655421440, 140240659218431,
+SNULL, 140240655421440, 140240657080319,
+STORE, 140240657080320, 140240659218431,
+STORE, 140240655421440, 140240657080319,
+SNULL, 140240659177471, 140240659218431,
+STORE, 140240657080320, 140240659177471,
+STORE, 140240659177472, 140240659218431,
+SNULL, 140240659177472, 140240659202047,
+STORE, 140240659202048, 140240659218431,
+STORE, 140240659177472, 140240659202047,
+ERASE, 140240659177472, 140240659202047,
+STORE, 140240659177472, 140240659202047,
+ERASE, 140240659202048, 140240659218431,
+STORE, 140240659202048, 140240659218431,
+STORE, 140240664571904, 140240664592383,
+SNULL, 140240659193855, 140240659202047,
+STORE, 140240659177472, 140240659193855,
+STORE, 140240659193856, 140240659202047,
+SNULL, 140240662376447, 140240662380543,
+STORE, 140240662372352, 140240662376447,
+STORE, 140240662376448, 140240662380543,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140240664625151, 140240664629247,
+STORE, 140240664621056, 140240664625151,
+STORE, 140240664625152, 140240664629247,
+ERASE, 140240664592384, 140240664621055,
+STORE, 30646272, 30781439,
+STORE, 30646272, 30928895,
+STORE, 140240662888448, 140240664571903,
+STORE, 94256659468288, 94256659578879,
+STORE, 94256661671936, 94256661680127,
+STORE, 94256661680128, 94256661684223,
+STORE, 94256661684224, 94256661692415,
+STORE, 94256687980544, 94256688115711,
+STORE, 139801712504832, 139801714163711,
+STORE, 139801714163712, 139801716260863,
+STORE, 139801716260864, 139801716277247,
+STORE, 139801716277248, 139801716285439,
+STORE, 139801716285440, 139801716301823,
+STORE, 139801716301824, 139801716445183,
+STORE, 139801718505472, 139801718513663,
+STORE, 139801718542336, 139801718546431,
+STORE, 139801718546432, 139801718550527,
+STORE, 139801718550528, 139801718554623,
+STORE, 140721575538688, 140721575673855,
+STORE, 140721577013248, 140721577025535,
+STORE, 140721577025536, 140721577029631,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729259393024, 140737488351231,
+SNULL, 140729259397119, 140737488351231,
+STORE, 140729259393024, 140729259397119,
+STORE, 140729259261952, 140729259397119,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 139682376638464, 139682378891263,
+SNULL, 139682376781823, 139682378891263,
+STORE, 139682376638464, 139682376781823,
+STORE, 139682376781824, 139682378891263,
+ERASE, 139682376781824, 139682378891263,
+STORE, 139682378878976, 139682378887167,
+STORE, 139682378887168, 139682378891263,
+STORE, 140729260462080, 140729260466175,
+STORE, 140729260449792, 140729260462079,
+STORE, 139682378850304, 139682378878975,
+STORE, 139682378842112, 139682378850303,
+STORE, 139682373476352, 139682376638463,
+SNULL, 139682373476352, 139682374537215,
+STORE, 139682374537216, 139682376638463,
+STORE, 139682373476352, 139682374537215,
+SNULL, 139682376630271, 139682376638463,
+STORE, 139682374537216, 139682376630271,
+STORE, 139682376630272, 139682376638463,
+ERASE, 139682376630272, 139682376638463,
+STORE, 139682376630272, 139682376638463,
+STORE, 139682369679360, 139682373476351,
+SNULL, 139682369679360, 139682371338239,
+STORE, 139682371338240, 139682373476351,
+STORE, 139682369679360, 139682371338239,
+SNULL, 139682373435391, 139682373476351,
+STORE, 139682371338240, 139682373435391,
+STORE, 139682373435392, 139682373476351,
+SNULL, 139682373435392, 139682373459967,
+STORE, 139682373459968, 139682373476351,
+STORE, 139682373435392, 139682373459967,
+ERASE, 139682373435392, 139682373459967,
+STORE, 139682373435392, 139682373459967,
+ERASE, 139682373459968, 139682373476351,
+STORE, 139682373459968, 139682373476351,
+STORE, 139682378829824, 139682378850303,
+SNULL, 139682373451775, 139682373459967,
+STORE, 139682373435392, 139682373451775,
+STORE, 139682373451776, 139682373459967,
+SNULL, 139682376634367, 139682376638463,
+STORE, 139682376630272, 139682376634367,
+STORE, 139682376634368, 139682376638463,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 139682378883071, 139682378887167,
+STORE, 139682378878976, 139682378883071,
+STORE, 139682378883072, 139682378887167,
+ERASE, 139682378850304, 139682378878975,
+STORE, 10022912, 10158079,
+STORE, 10022912, 10305535,
+STORE, 139682377146368, 139682378829823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140731831926784, 140737488351231,
+SNULL, 140731831930879, 140737488351231,
+STORE, 140731831926784, 140731831930879,
+STORE, 140731831795712, 140731831930879,
+STORE, 94615305261056, 94615307485183,
+SNULL, 94615305371647, 94615307485183,
+STORE, 94615305261056, 94615305371647,
+STORE, 94615305371648, 94615307485183,
+ERASE, 94615305371648, 94615307485183,
+STORE, 94615307464704, 94615307476991,
+STORE, 94615307476992, 94615307485183,
+STORE, 140163912994816, 140163915247615,
+SNULL, 140163913138175, 140163915247615,
+STORE, 140163912994816, 140163913138175,
+STORE, 140163913138176, 140163915247615,
+ERASE, 140163913138176, 140163915247615,
+STORE, 140163915235328, 140163915243519,
+STORE, 140163915243520, 140163915247615,
+STORE, 140731832217600, 140731832221695,
+STORE, 140731832205312, 140731832217599,
+STORE, 140163915206656, 140163915235327,
+STORE, 140163915198464, 140163915206655,
+STORE, 140163909197824, 140163912994815,
+SNULL, 140163909197824, 140163910856703,
+STORE, 140163910856704, 140163912994815,
+STORE, 140163909197824, 140163910856703,
+SNULL, 140163912953855, 140163912994815,
+STORE, 140163910856704, 140163912953855,
+STORE, 140163912953856, 140163912994815,
+SNULL, 140163912953856, 140163912978431,
+STORE, 140163912978432, 140163912994815,
+STORE, 140163912953856, 140163912978431,
+ERASE, 140163912953856, 140163912978431,
+STORE, 140163912953856, 140163912978431,
+ERASE, 140163912978432, 140163912994815,
+STORE, 140163912978432, 140163912994815,
+SNULL, 140163912970239, 140163912978431,
+STORE, 140163912953856, 140163912970239,
+STORE, 140163912970240, 140163912978431,
+SNULL, 94615307472895, 94615307476991,
+STORE, 94615307464704, 94615307472895,
+STORE, 94615307472896, 94615307476991,
+SNULL, 140163915239423, 140163915243519,
+STORE, 140163915235328, 140163915239423,
+STORE, 140163915239424, 140163915243519,
+ERASE, 140163915206656, 140163915235327,
+STORE, 94615330672640, 94615330807807,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725254479872, 140737488351231,
+SNULL, 140725254488063, 140737488351231,
+STORE, 140725254479872, 140725254488063,
+STORE, 140725254348800, 140725254488063,
+STORE, 94572781277184, 94572785741823,
+SNULL, 94572783312895, 94572785741823,
+STORE, 94572781277184, 94572783312895,
+STORE, 94572783312896, 94572785741823,
+ERASE, 94572783312896, 94572785741823,
+STORE, 94572785405952, 94572785455103,
+STORE, 94572785455104, 94572785741823,
+STORE, 139636001341440, 139636003594239,
+SNULL, 139636001484799, 139636003594239,
+STORE, 139636001341440, 139636001484799,
+STORE, 139636001484800, 139636003594239,
+ERASE, 139636001484800, 139636003594239,
+STORE, 139636003581952, 139636003590143,
+STORE, 139636003590144, 139636003594239,
+STORE, 140725255557120, 140725255561215,
+STORE, 140725255544832, 140725255557119,
+STORE, 139636003553280, 139636003581951,
+STORE, 139636003545088, 139636003553279,
+STORE, 139635998773248, 139636001341439,
+SNULL, 139635998773248, 139635999240191,
+STORE, 139635999240192, 139636001341439,
+STORE, 139635998773248, 139635999240191,
+SNULL, 139636001333247, 139636001341439,
+STORE, 139635999240192, 139636001333247,
+STORE, 139636001333248, 139636001341439,
+ERASE, 139636001333248, 139636001341439,
+STORE, 139636001333248, 139636001341439,
+STORE, 139635996569600, 139635998773247,
+SNULL, 139635996569600, 139635996671999,
+STORE, 139635996672000, 139635998773247,
+STORE, 139635996569600, 139635996671999,
+SNULL, 139635998765055, 139635998773247,
+STORE, 139635996672000, 139635998765055,
+STORE, 139635998765056, 139635998773247,
+ERASE, 139635998765056, 139635998773247,
+STORE, 139635998765056, 139635998773247,
+STORE, 139635994353664, 139635996569599,
+SNULL, 139635994353664, 139635994451967,
+STORE, 139635994451968, 139635996569599,
+STORE, 139635994353664, 139635994451967,
+SNULL, 139635996545023, 139635996569599,
+STORE, 139635994451968, 139635996545023,
+STORE, 139635996545024, 139635996569599,
+SNULL, 139635996545024, 139635996553215,
+STORE, 139635996553216, 139635996569599,
+STORE, 139635996545024, 139635996553215,
+ERASE, 139635996545024, 139635996553215,
+STORE, 139635996545024, 139635996553215,
+ERASE, 139635996553216, 139635996569599,
+STORE, 139635996553216, 139635996569599,
+STORE, 139635992223744, 139635994353663,
+SNULL, 139635992223744, 139635992252415,
+STORE, 139635992252416, 139635994353663,
+STORE, 139635992223744, 139635992252415,
+SNULL, 139635994345471, 139635994353663,
+STORE, 139635992252416, 139635994345471,
+STORE, 139635994345472, 139635994353663,
+ERASE, 139635994345472, 139635994353663,
+STORE, 139635994345472, 139635994353663,
+STORE, 139635988426752, 139635992223743,
+SNULL, 139635988426752, 139635990085631,
+STORE, 139635990085632, 139635992223743,
+STORE, 139635988426752, 139635990085631,
+SNULL, 139635992182783, 139635992223743,
+STORE, 139635990085632, 139635992182783,
+STORE, 139635992182784, 139635992223743,
+SNULL, 139635992182784, 139635992207359,
+STORE, 139635992207360, 139635992223743,
+STORE, 139635992182784, 139635992207359,
+ERASE, 139635992182784, 139635992207359,
+STORE, 139635992182784, 139635992207359,
+ERASE, 139635992207360, 139635992223743,
+STORE, 139635992207360, 139635992223743,
+STORE, 139636003536896, 139636003553279,
+SNULL, 139635992199167, 139635992207359,
+STORE, 139635992182784, 139635992199167,
+STORE, 139635992199168, 139635992207359,
+SNULL, 139635996549119, 139635996553215,
+STORE, 139635996545024, 139635996549119,
+STORE, 139635996549120, 139635996553215,
+SNULL, 139635994349567, 139635994353663,
+STORE, 139635994345472, 139635994349567,
+STORE, 139635994349568, 139635994353663,
+SNULL, 139635998769151, 139635998773247,
+STORE, 139635998765056, 139635998769151,
+STORE, 139635998769152, 139635998773247,
+SNULL, 139636001337343, 139636001341439,
+STORE, 139636001333248, 139636001337343,
+STORE, 139636001337344, 139636001341439,
+SNULL, 94572785418239, 94572785455103,
+STORE, 94572785405952, 94572785418239,
+STORE, 94572785418240, 94572785455103,
+SNULL, 139636003586047, 139636003590143,
+STORE, 139636003581952, 139636003586047,
+STORE, 139636003586048, 139636003590143,
+ERASE, 139636003553280, 139636003581951,
+STORE, 94572798435328, 94572798570495,
+STORE, 139636001853440, 139636003536895,
+STORE, 139635981426688, 139635988426751,
+STORE, 139635980615680, 139635981426687,
+STORE, 94572798435328, 94572798705663,
+STORE, 94572798435328, 94572798840831,
+STORE, 94572798435328, 94572798975999,
+STORE, 94572798435328, 94572799111167,
+STORE, 94572798435328, 94572799246335,
+STORE, 94572798435328, 94572799381503,
+STORE, 94572798435328, 94572799516671,
+STORE, 94572798435328, 94572799651839,
+STORE, 94572798435328, 94572799787007,
+STORE, 94572798435328, 94572799922175,
+STORE, 94572798435328, 94572800057343,
+STORE, 94572798435328, 94572800192511,
+STORE, 94572798435328, 94572800327679,
+STORE, 94572798435328, 94572800462847,
+STORE, 94572798435328, 94572800598015,
+STORE, 94572798435328, 94572800733183,
+STORE, 94572798435328, 94572800868351,
+STORE, 94572798435328, 94572801003519,
+STORE, 94572798435328, 94572801138687,
+STORE, 94572798435328, 94572801273855,
+STORE, 94572798435328, 94572801409023,
+STORE, 94572798435328, 94572801544191,
+STORE, 94572798435328, 94572801679359,
+STORE, 94572798435328, 94572801814527,
+STORE, 94572798435328, 94572801949695,
+STORE, 94572798435328, 94572802084863,
+STORE, 94572798435328, 94572802220031,
+STORE, 94572798435328, 94572802355199,
+STORE, 94572798435328, 94572802490367,
+STORE, 94572798435328, 94572802625535,
+STORE, 94572798435328, 94572802760703,
+STORE, 94572798435328, 94572802895871,
+STORE, 94572798435328, 94572803031039,
+STORE, 94572798435328, 94572803166207,
+STORE, 94572798435328, 94572803301375,
+STORE, 94572798435328, 94572803436543,
+STORE, 94572798435328, 94572803571711,
+STORE, 94572798435328, 94572803706879,
+STORE, 94572798435328, 94572803842047,
+STORE, 94572798435328, 94572803977215,
+STORE, 94572798435328, 94572804112383,
+STORE, 94572798435328, 94572804247551,
+STORE, 94572798435328, 94572804382719,
+STORE, 94572798435328, 94572804517887,
+STORE, 94572798435328, 94572804653055,
+STORE, 94572798435328, 94572804788223,
+STORE, 94572798435328, 94572804923391,
+STORE, 94572798435328, 94572805058559,
+STORE, 94572798435328, 94572805193727,
+STORE, 94572798435328, 94572805328895,
+STORE, 94572798435328, 94572805464063,
+STORE, 94572798435328, 94572805599231,
+STORE, 94572798435328, 94572805734399,
+STORE, 94572798435328, 94572805869567,
+STORE, 94572798435328, 94572806004735,
+STORE, 94572798435328, 94572806139903,
+STORE, 94572798435328, 94572806275071,
+STORE, 94572798435328, 94572806410239,
+STORE, 94572798435328, 94572806545407,
+STORE, 94572798435328, 94572806680575,
+STORE, 94572798435328, 94572806815743,
+STORE, 94572798435328, 94572806950911,
+STORE, 94572798435328, 94572807086079,
+STORE, 94572798435328, 94572807221247,
+STORE, 94572798435328, 94572807356415,
+STORE, 94572798435328, 94572807491583,
+STORE, 94572798435328, 94572807626751,
+STORE, 94572798435328, 94572807761919,
+STORE, 94572798435328, 94572807897087,
+STORE, 94572798435328, 94572808032255,
+STORE, 94572798435328, 94572808167423,
+STORE, 94572798435328, 94572808302591,
+STORE, 94572798435328, 94572808437759,
+STORE, 94572798435328, 94572808572927,
+ERASE, 139635981426688, 139635988426751,
+STORE, 139635985088512, 139635988426751,
+STORE, 139635778273280, 139635980615679,
+STORE, 139635567632384, 139635778273279,
+STORE, 94572798435328, 94572808716287,
+STORE, 139635984564224, 139635985088511,
+STORE, 139635559239680, 139635567632383,
+SNULL, 139635559243775, 139635567632383,
+STORE, 139635559239680, 139635559243775,
+STORE, 139635559243776, 139635567632383,
+STORE, 139635550846976, 139635559239679,
+SNULL, 139635550851071, 139635559239679,
+STORE, 139635550846976, 139635550851071,
+STORE, 139635550851072, 139635559239679,
+STORE, 139635542454272, 139635550846975,
+STORE, 139635408236544, 139635542454271,
+SNULL, 139635408236544, 139635426590719,
+STORE, 139635426590720, 139635542454271,
+STORE, 139635408236544, 139635426590719,
+ERASE, 139635408236544, 139635426590719,
+STORE, 139635292372992, 139635542454271,
+SNULL, 139635359481855, 139635542454271,
+STORE, 139635292372992, 139635359481855,
+STORE, 139635359481856, 139635542454271,
+SNULL, 139635359481856, 139635426590719,
+STORE, 139635426590720, 139635542454271,
+STORE, 139635359481856, 139635426590719,
+ERASE, 139635359481856, 139635426590719,
+SNULL, 139635542458367, 139635550846975,
+STORE, 139635542454272, 139635542458367,
+STORE, 139635542458368, 139635550846975,
+STORE, 139635418198016, 139635426590719,
+SNULL, 139635493699583, 139635542454271,
+STORE, 139635426590720, 139635493699583,
+STORE, 139635493699584, 139635542454271,
+ERASE, 139635493699584, 139635542454271,
+SNULL, 139635426725887, 139635493699583,
+STORE, 139635426590720, 139635426725887,
+STORE, 139635426725888, 139635493699583,
+SNULL, 139635292508159, 139635359481855,
+STORE, 139635292372992, 139635292508159,
+STORE, 139635292508160, 139635359481855,
+SNULL, 139635418202111, 139635426590719,
+STORE, 139635418198016, 139635418202111,
+STORE, 139635418202112, 139635426590719,
+STORE, 139635225264128, 139635292372991,
+STORE, 139635534061568, 139635542454271,
+SNULL, 139635534065663, 139635542454271,
+STORE, 139635534061568, 139635534065663,
+STORE, 139635534065664, 139635542454271,
+STORE, 139635525668864, 139635534061567,
+SNULL, 139635525672959, 139635534061567,
+STORE, 139635525668864, 139635525672959,
+STORE, 139635525672960, 139635534061567,
+SNULL, 139635225399295, 139635292372991,
+STORE, 139635225264128, 139635225399295,
+STORE, 139635225399296, 139635292372991,
+STORE, 139635091046400, 139635225264127,
+SNULL, 139635158155263, 139635225264127,
+STORE, 139635091046400, 139635158155263,
+STORE, 139635158155264, 139635225264127,
+ERASE, 139635158155264, 139635225264127,
+STORE, 139634956828672, 139635158155263,
+STORE, 139635517276160, 139635525668863,
+SNULL, 139635517280255, 139635525668863,
+STORE, 139635517276160, 139635517280255,
+STORE, 139635517280256, 139635525668863,
+SNULL, 139634956828672, 139635091046399,
+STORE, 139635091046400, 139635158155263,
+STORE, 139634956828672, 139635091046399,
+SNULL, 139635091181567, 139635158155263,
+STORE, 139635091046400, 139635091181567,
+STORE, 139635091181568, 139635158155263,
+SNULL, 139635023937535, 139635091046399,
+STORE, 139634956828672, 139635023937535,
+STORE, 139635023937536, 139635091046399,
+ERASE, 139635023937536, 139635091046399,
+STORE, 139634956828672, 139635091046399,
+SNULL, 139634956828672, 139635023937535,
+STORE, 139635023937536, 139635091046399,
+STORE, 139634956828672, 139635023937535,
+SNULL, 139635024072703, 139635091046399,
+STORE, 139635023937536, 139635024072703,
+STORE, 139635024072704, 139635091046399,
+STORE, 139635508883456, 139635517276159,
+SNULL, 139635508887551, 139635517276159,
+STORE, 139635508883456, 139635508887551,
+STORE, 139635508887552, 139635517276159,
+STORE, 139634822610944, 139635023937535,
+SNULL, 139634822610944, 139634956828671,
+STORE, 139634956828672, 139635023937535,
+STORE, 139634822610944, 139634956828671,
+SNULL, 139634956963839, 139635023937535,
+STORE, 139634956828672, 139634956963839,
+STORE, 139634956963840, 139635023937535,
+STORE, 139635500490752, 139635508883455,
+SNULL, 139634889719807, 139634956828671,
+STORE, 139634822610944, 139634889719807,
+STORE, 139634889719808, 139634956828671,
+ERASE, 139634889719808, 139634956828671,
+SNULL, 139635500494847, 139635508883455,
+STORE, 139635500490752, 139635500494847,
+STORE, 139635500494848, 139635508883455,
+SNULL, 139634822746111, 139634889719807,
+STORE, 139634822610944, 139634822746111,
+STORE, 139634822746112, 139634889719807,
+STORE, 139635409805312, 139635418198015,
+STORE, 139634822746112, 139634956828671,
+SNULL, 139634822746112, 139634889719807,
+STORE, 139634889719808, 139634956828671,
+STORE, 139634822746112, 139634889719807,
+SNULL, 139634889854975, 139634956828671,
+STORE, 139634889719808, 139634889854975,
+STORE, 139634889854976, 139634956828671,
+SNULL, 139635409809407, 139635418198015,
+STORE, 139635409805312, 139635409809407,
+STORE, 139635409809408, 139635418198015,
+STORE, 139635401412608, 139635409805311,
+STORE, 139634688393216, 139634822610943,
+SNULL, 139634755502079, 139634822610943,
+STORE, 139634688393216, 139634755502079,
+STORE, 139634755502080, 139634822610943,
+ERASE, 139634755502080, 139634822610943,
+SNULL, 139635401416703, 139635409805311,
+STORE, 139635401412608, 139635401416703,
+STORE, 139635401416704, 139635409805311,
+STORE, 139634554175488, 139634755502079,
+SNULL, 139634554175488, 139634688393215,
+STORE, 139634688393216, 139634755502079,
+STORE, 139634554175488, 139634688393215,
+SNULL, 139634688528383, 139634755502079,
+STORE, 139634688393216, 139634688528383,
+STORE, 139634688528384, 139634755502079,
+STORE, 139635393019904, 139635401412607,
+SNULL, 139634621284351, 139634688393215,
+STORE, 139634554175488, 139634621284351,
+STORE, 139634621284352, 139634688393215,
+ERASE, 139634621284352, 139634688393215,
+SNULL, 139634554310655, 139634621284351,
+STORE, 139634554175488, 139634554310655,
+STORE, 139634554310656, 139634621284351,
+STORE, 139634554310656, 139634688393215,
+SNULL, 139635393023999, 139635401412607,
+STORE, 139635393019904, 139635393023999,
+STORE, 139635393024000, 139635401412607,
+SNULL, 139634554310656, 139634621284351,
+STORE, 139634621284352, 139634688393215,
+STORE, 139634554310656, 139634621284351,
+SNULL, 139634621419519, 139634688393215,
+STORE, 139634621284352, 139634621419519,
+STORE, 139634621419520, 139634688393215,
+STORE, 139635384627200, 139635393019903,
+SNULL, 139635384631295, 139635393019903,
+STORE, 139635384627200, 139635384631295,
+STORE, 139635384631296, 139635393019903,
+STORE, 139635376234496, 139635384627199,
+SNULL, 139635376238591, 139635384627199,
+STORE, 139635376234496, 139635376238591,
+STORE, 139635376238592, 139635384627199,
+STORE, 139635367841792, 139635376234495,
+SNULL, 139635367845887, 139635376234495,
+STORE, 139635367841792, 139635367845887,
+STORE, 139635367845888, 139635376234495,
+STORE, 139634419957760, 139634554175487,
+SNULL, 139634487066623, 139634554175487,
+STORE, 139634419957760, 139634487066623,
+STORE, 139634487066624, 139634554175487,
+ERASE, 139634487066624, 139634554175487,
+STORE, 139635216871424, 139635225264127,
+SNULL, 139635216875519, 139635225264127,
+STORE, 139635216871424, 139635216875519,
+STORE, 139635216875520, 139635225264127,
+SNULL, 139634420092927, 139634487066623,
+STORE, 139634419957760, 139634420092927,
+STORE, 139634420092928, 139634487066623,
+STORE, 139635208478720, 139635216871423,
+SNULL, 139635208482815, 139635216871423,
+STORE, 139635208478720, 139635208482815,
+STORE, 139635208482816, 139635216871423,
+STORE, 139635200086016, 139635208478719,
+SNULL, 139635200090111, 139635208478719,
+STORE, 139635200086016, 139635200090111,
+STORE, 139635200090112, 139635208478719,
+STORE, 139635191693312, 139635200086015,
+SNULL, 139635191697407, 139635200086015,
+STORE, 139635191693312, 139635191697407,
+STORE, 139635191697408, 139635200086015,
+STORE, 139635183300608, 139635191693311,
+SNULL, 139635183304703, 139635191693311,
+STORE, 139635183300608, 139635183304703,
+STORE, 139635183304704, 139635191693311,
+STORE, 139634420092928, 139634554175487,
+SNULL, 139634420092928, 139634487066623,
+STORE, 139634487066624, 139634554175487,
+STORE, 139634420092928, 139634487066623,
+SNULL, 139634487201791, 139634554175487,
+STORE, 139634487066624, 139634487201791,
+STORE, 139634487201792, 139634554175487,
+ERASE, 139635559239680, 139635559243775,
+ERASE, 139635559243776, 139635567632383,
+ERASE, 139635550846976, 139635550851071,
+ERASE, 139635550851072, 139635559239679,
+ERASE, 139635542454272, 139635542458367,
+ERASE, 139635542458368, 139635550846975,
+ERASE, 139635418198016, 139635418202111,
+ERASE, 139635418202112, 139635426590719,
+ERASE, 139635534061568, 139635534065663,
+ERASE, 139635534065664, 139635542454271,
+ERASE, 139635525668864, 139635525672959,
+ERASE, 139635525672960, 139635534061567,
+ERASE, 139635517276160, 139635517280255,
+ERASE, 139635517280256, 139635525668863,
+ERASE, 139635508883456, 139635508887551,
+ERASE, 139635508887552, 139635517276159,
+ERASE, 139635500490752, 139635500494847,
+ERASE, 139635500494848, 139635508883455,
+ERASE, 139635409805312, 139635409809407,
+ERASE, 139635409809408, 139635418198015,
+ERASE, 139635401412608, 139635401416703,
+ERASE, 139635401416704, 139635409805311,
+ERASE, 139635393019904, 139635393023999,
+ERASE, 139635393024000, 139635401412607,
+ERASE, 139635384627200, 139635384631295,
+ERASE, 139635384631296, 139635393019903,
+ };
+ static const unsigned long set25[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722547441664, 140737488351231,
+SNULL, 140722547449855, 140737488351231,
+STORE, 140722547441664, 140722547449855,
+STORE, 140722547310592, 140722547449855,
+STORE, 94827521732608, 94827523956735,
+SNULL, 94827521843199, 94827523956735,
+STORE, 94827521732608, 94827521843199,
+STORE, 94827521843200, 94827523956735,
+ERASE, 94827521843200, 94827523956735,
+STORE, 94827523936256, 94827523948543,
+STORE, 94827523948544, 94827523956735,
+STORE, 139816136847360, 139816139100159,
+SNULL, 139816136990719, 139816139100159,
+STORE, 139816136847360, 139816136990719,
+STORE, 139816136990720, 139816139100159,
+ERASE, 139816136990720, 139816139100159,
+STORE, 139816139087872, 139816139096063,
+STORE, 139816139096064, 139816139100159,
+STORE, 140722548142080, 140722548146175,
+STORE, 140722548129792, 140722548142079,
+STORE, 139816139059200, 139816139087871,
+STORE, 139816139051008, 139816139059199,
+STORE, 139816133050368, 139816136847359,
+SNULL, 139816133050368, 139816134709247,
+STORE, 139816134709248, 139816136847359,
+STORE, 139816133050368, 139816134709247,
+SNULL, 139816136806399, 139816136847359,
+STORE, 139816134709248, 139816136806399,
+STORE, 139816136806400, 139816136847359,
+SNULL, 139816136806400, 139816136830975,
+STORE, 139816136830976, 139816136847359,
+STORE, 139816136806400, 139816136830975,
+ERASE, 139816136806400, 139816136830975,
+STORE, 139816136806400, 139816136830975,
+ERASE, 139816136830976, 139816136847359,
+STORE, 139816136830976, 139816136847359,
+SNULL, 139816136822783, 139816136830975,
+STORE, 139816136806400, 139816136822783,
+STORE, 139816136822784, 139816136830975,
+SNULL, 94827523944447, 94827523948543,
+STORE, 94827523936256, 94827523944447,
+STORE, 94827523944448, 94827523948543,
+SNULL, 139816139091967, 139816139096063,
+STORE, 139816139087872, 139816139091967,
+STORE, 139816139091968, 139816139096063,
+ERASE, 139816139059200, 139816139087871,
+STORE, 94827534970880, 94827535106047,
+STORE, 94114394132480, 94114394345471,
+STORE, 94114396442624, 94114396446719,
+STORE, 94114396446720, 94114396454911,
+STORE, 94114396454912, 94114396467199,
+STORE, 94114421575680, 94114427715583,
+STORE, 139934313955328, 139934315614207,
+STORE, 139934315614208, 139934317711359,
+STORE, 139934317711360, 139934317727743,
+STORE, 139934317727744, 139934317735935,
+STORE, 139934317735936, 139934317752319,
+STORE, 139934317752320, 139934317764607,
+STORE, 139934317764608, 139934319857663,
+STORE, 139934319857664, 139934319861759,
+STORE, 139934319861760, 139934319865855,
+STORE, 139934319865856, 139934320009215,
+STORE, 139934320377856, 139934322061311,
+STORE, 139934322061312, 139934322077695,
+STORE, 139934322106368, 139934322110463,
+STORE, 139934322110464, 139934322114559,
+STORE, 139934322114560, 139934322118655,
+STORE, 140731200376832, 140731200516095,
+STORE, 140731200929792, 140731200942079,
+STORE, 140731200942080, 140731200946175,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734133174272, 140737488351231,
+SNULL, 140734133182463, 140737488351231,
+STORE, 140734133174272, 140734133182463,
+STORE, 140734133043200, 140734133182463,
+STORE, 94412675600384, 94412677824511,
+SNULL, 94412675710975, 94412677824511,
+STORE, 94412675600384, 94412675710975,
+STORE, 94412675710976, 94412677824511,
+ERASE, 94412675710976, 94412677824511,
+STORE, 94412677804032, 94412677816319,
+STORE, 94412677816320, 94412677824511,
+STORE, 140320087945216, 140320090198015,
+SNULL, 140320088088575, 140320090198015,
+STORE, 140320087945216, 140320088088575,
+STORE, 140320088088576, 140320090198015,
+ERASE, 140320088088576, 140320090198015,
+STORE, 140320090185728, 140320090193919,
+STORE, 140320090193920, 140320090198015,
+STORE, 140734134591488, 140734134595583,
+STORE, 140734134579200, 140734134591487,
+STORE, 140320090157056, 140320090185727,
+STORE, 140320090148864, 140320090157055,
+STORE, 140320084148224, 140320087945215,
+SNULL, 140320084148224, 140320085807103,
+STORE, 140320085807104, 140320087945215,
+STORE, 140320084148224, 140320085807103,
+SNULL, 140320087904255, 140320087945215,
+STORE, 140320085807104, 140320087904255,
+STORE, 140320087904256, 140320087945215,
+SNULL, 140320087904256, 140320087928831,
+STORE, 140320087928832, 140320087945215,
+STORE, 140320087904256, 140320087928831,
+ERASE, 140320087904256, 140320087928831,
+STORE, 140320087904256, 140320087928831,
+ERASE, 140320087928832, 140320087945215,
+STORE, 140320087928832, 140320087945215,
+SNULL, 140320087920639, 140320087928831,
+STORE, 140320087904256, 140320087920639,
+STORE, 140320087920640, 140320087928831,
+SNULL, 94412677812223, 94412677816319,
+STORE, 94412677804032, 94412677812223,
+STORE, 94412677812224, 94412677816319,
+SNULL, 140320090189823, 140320090193919,
+STORE, 140320090185728, 140320090189823,
+STORE, 140320090189824, 140320090193919,
+ERASE, 140320090157056, 140320090185727,
+STORE, 94412684546048, 94412684681215,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723005485056, 140737488351231,
+SNULL, 140723005493247, 140737488351231,
+STORE, 140723005485056, 140723005493247,
+STORE, 140723005353984, 140723005493247,
+STORE, 94387431936000, 94387434160127,
+SNULL, 94387432046591, 94387434160127,
+STORE, 94387431936000, 94387432046591,
+STORE, 94387432046592, 94387434160127,
+ERASE, 94387432046592, 94387434160127,
+STORE, 94387434139648, 94387434151935,
+STORE, 94387434151936, 94387434160127,
+STORE, 140151675392000, 140151677644799,
+SNULL, 140151675535359, 140151677644799,
+STORE, 140151675392000, 140151675535359,
+STORE, 140151675535360, 140151677644799,
+ERASE, 140151675535360, 140151677644799,
+STORE, 140151677632512, 140151677640703,
+STORE, 140151677640704, 140151677644799,
+STORE, 140723005784064, 140723005788159,
+STORE, 140723005771776, 140723005784063,
+STORE, 140151677603840, 140151677632511,
+STORE, 140151677595648, 140151677603839,
+STORE, 140151671595008, 140151675391999,
+SNULL, 140151671595008, 140151673253887,
+STORE, 140151673253888, 140151675391999,
+STORE, 140151671595008, 140151673253887,
+SNULL, 140151675351039, 140151675391999,
+STORE, 140151673253888, 140151675351039,
+STORE, 140151675351040, 140151675391999,
+SNULL, 140151675351040, 140151675375615,
+STORE, 140151675375616, 140151675391999,
+STORE, 140151675351040, 140151675375615,
+ERASE, 140151675351040, 140151675375615,
+STORE, 140151675351040, 140151675375615,
+ERASE, 140151675375616, 140151675391999,
+STORE, 140151675375616, 140151675391999,
+SNULL, 140151675367423, 140151675375615,
+STORE, 140151675351040, 140151675367423,
+STORE, 140151675367424, 140151675375615,
+SNULL, 94387434147839, 94387434151935,
+STORE, 94387434139648, 94387434147839,
+STORE, 94387434147840, 94387434151935,
+SNULL, 140151677636607, 140151677640703,
+STORE, 140151677632512, 140151677636607,
+STORE, 140151677636608, 140151677640703,
+ERASE, 140151677603840, 140151677632511,
+STORE, 94387458818048, 94387458953215,
+STORE, 94909010997248, 94909011210239,
+STORE, 94909013307392, 94909013311487,
+STORE, 94909013311488, 94909013319679,
+STORE, 94909013319680, 94909013331967,
+STORE, 94909014827008, 94909023371263,
+STORE, 140712411975680, 140712413634559,
+STORE, 140712413634560, 140712415731711,
+STORE, 140712415731712, 140712415748095,
+STORE, 140712415748096, 140712415756287,
+STORE, 140712415756288, 140712415772671,
+STORE, 140712415772672, 140712415784959,
+STORE, 140712415784960, 140712417878015,
+STORE, 140712417878016, 140712417882111,
+STORE, 140712417882112, 140712417886207,
+STORE, 140712417886208, 140712418029567,
+STORE, 140712418398208, 140712420081663,
+STORE, 140712420081664, 140712420098047,
+STORE, 140712420126720, 140712420130815,
+STORE, 140712420130816, 140712420134911,
+STORE, 140712420134912, 140712420139007,
+STORE, 140729293111296, 140729293250559,
+STORE, 140729293307904, 140729293320191,
+STORE, 140729293320192, 140729293324287,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720541691904, 140737488351231,
+SNULL, 140720541700095, 140737488351231,
+STORE, 140720541691904, 140720541700095,
+STORE, 140720541560832, 140720541700095,
+STORE, 94203603419136, 94203605643263,
+SNULL, 94203603529727, 94203605643263,
+STORE, 94203603419136, 94203603529727,
+STORE, 94203603529728, 94203605643263,
+ERASE, 94203603529728, 94203605643263,
+STORE, 94203605622784, 94203605635071,
+STORE, 94203605635072, 94203605643263,
+STORE, 139847623081984, 139847625334783,
+SNULL, 139847623225343, 139847625334783,
+STORE, 139847623081984, 139847623225343,
+STORE, 139847623225344, 139847625334783,
+ERASE, 139847623225344, 139847625334783,
+STORE, 139847625322496, 139847625330687,
+STORE, 139847625330688, 139847625334783,
+STORE, 140720542547968, 140720542552063,
+STORE, 140720542535680, 140720542547967,
+STORE, 139847625293824, 139847625322495,
+STORE, 139847625285632, 139847625293823,
+STORE, 139847619284992, 139847623081983,
+SNULL, 139847619284992, 139847620943871,
+STORE, 139847620943872, 139847623081983,
+STORE, 139847619284992, 139847620943871,
+SNULL, 139847623041023, 139847623081983,
+STORE, 139847620943872, 139847623041023,
+STORE, 139847623041024, 139847623081983,
+SNULL, 139847623041024, 139847623065599,
+STORE, 139847623065600, 139847623081983,
+STORE, 139847623041024, 139847623065599,
+ERASE, 139847623041024, 139847623065599,
+STORE, 139847623041024, 139847623065599,
+ERASE, 139847623065600, 139847623081983,
+STORE, 139847623065600, 139847623081983,
+SNULL, 139847623057407, 139847623065599,
+STORE, 139847623041024, 139847623057407,
+STORE, 139847623057408, 139847623065599,
+SNULL, 94203605630975, 94203605635071,
+STORE, 94203605622784, 94203605630975,
+STORE, 94203605630976, 94203605635071,
+SNULL, 139847625326591, 139847625330687,
+STORE, 139847625322496, 139847625326591,
+STORE, 139847625326592, 139847625330687,
+ERASE, 139847625293824, 139847625322495,
+STORE, 94203634880512, 94203635015679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721428738048, 140737488351231,
+SNULL, 140721428746239, 140737488351231,
+STORE, 140721428738048, 140721428746239,
+STORE, 140721428606976, 140721428746239,
+STORE, 93968808378368, 93968810602495,
+SNULL, 93968808488959, 93968810602495,
+STORE, 93968808378368, 93968808488959,
+STORE, 93968808488960, 93968810602495,
+ERASE, 93968808488960, 93968810602495,
+STORE, 93968810582016, 93968810594303,
+STORE, 93968810594304, 93968810602495,
+STORE, 140397757026304, 140397759279103,
+SNULL, 140397757169663, 140397759279103,
+STORE, 140397757026304, 140397757169663,
+STORE, 140397757169664, 140397759279103,
+ERASE, 140397757169664, 140397759279103,
+STORE, 140397759266816, 140397759275007,
+STORE, 140397759275008, 140397759279103,
+STORE, 140721430368256, 140721430372351,
+STORE, 140721430355968, 140721430368255,
+STORE, 140397759238144, 140397759266815,
+STORE, 140397759229952, 140397759238143,
+STORE, 140397753229312, 140397757026303,
+SNULL, 140397753229312, 140397754888191,
+STORE, 140397754888192, 140397757026303,
+STORE, 140397753229312, 140397754888191,
+SNULL, 140397756985343, 140397757026303,
+STORE, 140397754888192, 140397756985343,
+STORE, 140397756985344, 140397757026303,
+SNULL, 140397756985344, 140397757009919,
+STORE, 140397757009920, 140397757026303,
+STORE, 140397756985344, 140397757009919,
+ERASE, 140397756985344, 140397757009919,
+STORE, 140397756985344, 140397757009919,
+ERASE, 140397757009920, 140397757026303,
+STORE, 140397757009920, 140397757026303,
+SNULL, 140397757001727, 140397757009919,
+STORE, 140397756985344, 140397757001727,
+STORE, 140397757001728, 140397757009919,
+SNULL, 93968810590207, 93968810594303,
+STORE, 93968810582016, 93968810590207,
+STORE, 93968810590208, 93968810594303,
+SNULL, 140397759270911, 140397759275007,
+STORE, 140397759266816, 140397759270911,
+STORE, 140397759270912, 140397759275007,
+ERASE, 140397759238144, 140397759266815,
+STORE, 93968837025792, 93968837160959,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721751044096, 140737488351231,
+SNULL, 140721751052287, 140737488351231,
+STORE, 140721751044096, 140721751052287,
+STORE, 140721750913024, 140721751052287,
+STORE, 94426051657728, 94426053881855,
+SNULL, 94426051768319, 94426053881855,
+STORE, 94426051657728, 94426051768319,
+STORE, 94426051768320, 94426053881855,
+ERASE, 94426051768320, 94426053881855,
+STORE, 94426053861376, 94426053873663,
+STORE, 94426053873664, 94426053881855,
+STORE, 140228456181760, 140228458434559,
+SNULL, 140228456325119, 140228458434559,
+STORE, 140228456181760, 140228456325119,
+STORE, 140228456325120, 140228458434559,
+ERASE, 140228456325120, 140228458434559,
+STORE, 140228458422272, 140228458430463,
+STORE, 140228458430464, 140228458434559,
+STORE, 140721751117824, 140721751121919,
+STORE, 140721751105536, 140721751117823,
+STORE, 140228458393600, 140228458422271,
+STORE, 140228458385408, 140228458393599,
+STORE, 140228452384768, 140228456181759,
+SNULL, 140228452384768, 140228454043647,
+STORE, 140228454043648, 140228456181759,
+STORE, 140228452384768, 140228454043647,
+SNULL, 140228456140799, 140228456181759,
+STORE, 140228454043648, 140228456140799,
+STORE, 140228456140800, 140228456181759,
+SNULL, 140228456140800, 140228456165375,
+STORE, 140228456165376, 140228456181759,
+STORE, 140228456140800, 140228456165375,
+ERASE, 140228456140800, 140228456165375,
+STORE, 140228456140800, 140228456165375,
+ERASE, 140228456165376, 140228456181759,
+STORE, 140228456165376, 140228456181759,
+SNULL, 140228456157183, 140228456165375,
+STORE, 140228456140800, 140228456157183,
+STORE, 140228456157184, 140228456165375,
+SNULL, 94426053869567, 94426053873663,
+STORE, 94426053861376, 94426053869567,
+STORE, 94426053869568, 94426053873663,
+SNULL, 140228458426367, 140228458430463,
+STORE, 140228458422272, 140228458426367,
+STORE, 140228458426368, 140228458430463,
+ERASE, 140228458393600, 140228458422271,
+STORE, 94426073681920, 94426073817087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732727623680, 140737488351231,
+SNULL, 140732727631871, 140737488351231,
+STORE, 140732727623680, 140732727631871,
+STORE, 140732727492608, 140732727631871,
+STORE, 94537485996032, 94537488220159,
+SNULL, 94537486106623, 94537488220159,
+STORE, 94537485996032, 94537486106623,
+STORE, 94537486106624, 94537488220159,
+ERASE, 94537486106624, 94537488220159,
+STORE, 94537488199680, 94537488211967,
+STORE, 94537488211968, 94537488220159,
+STORE, 140446578036736, 140446580289535,
+SNULL, 140446578180095, 140446580289535,
+STORE, 140446578036736, 140446578180095,
+STORE, 140446578180096, 140446580289535,
+ERASE, 140446578180096, 140446580289535,
+STORE, 140446580277248, 140446580285439,
+STORE, 140446580285440, 140446580289535,
+STORE, 140732727758848, 140732727762943,
+STORE, 140732727746560, 140732727758847,
+STORE, 140446580248576, 140446580277247,
+STORE, 140446580240384, 140446580248575,
+STORE, 140446574239744, 140446578036735,
+SNULL, 140446574239744, 140446575898623,
+STORE, 140446575898624, 140446578036735,
+STORE, 140446574239744, 140446575898623,
+SNULL, 140446577995775, 140446578036735,
+STORE, 140446575898624, 140446577995775,
+STORE, 140446577995776, 140446578036735,
+SNULL, 140446577995776, 140446578020351,
+STORE, 140446578020352, 140446578036735,
+STORE, 140446577995776, 140446578020351,
+ERASE, 140446577995776, 140446578020351,
+STORE, 140446577995776, 140446578020351,
+ERASE, 140446578020352, 140446578036735,
+STORE, 140446578020352, 140446578036735,
+SNULL, 140446578012159, 140446578020351,
+STORE, 140446577995776, 140446578012159,
+STORE, 140446578012160, 140446578020351,
+SNULL, 94537488207871, 94537488211967,
+STORE, 94537488199680, 94537488207871,
+STORE, 94537488207872, 94537488211967,
+SNULL, 140446580281343, 140446580285439,
+STORE, 140446580277248, 140446580281343,
+STORE, 140446580281344, 140446580285439,
+ERASE, 140446580248576, 140446580277247,
+STORE, 94537489014784, 94537489149951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728766808064, 140737488351231,
+SNULL, 140728766816255, 140737488351231,
+STORE, 140728766808064, 140728766816255,
+STORE, 140728766676992, 140728766816255,
+STORE, 94418513866752, 94418516090879,
+SNULL, 94418513977343, 94418516090879,
+STORE, 94418513866752, 94418513977343,
+STORE, 94418513977344, 94418516090879,
+ERASE, 94418513977344, 94418516090879,
+STORE, 94418516070400, 94418516082687,
+STORE, 94418516082688, 94418516090879,
+STORE, 140556479520768, 140556481773567,
+SNULL, 140556479664127, 140556481773567,
+STORE, 140556479520768, 140556479664127,
+STORE, 140556479664128, 140556481773567,
+ERASE, 140556479664128, 140556481773567,
+STORE, 140556481761280, 140556481769471,
+STORE, 140556481769472, 140556481773567,
+STORE, 140728767148032, 140728767152127,
+STORE, 140728767135744, 140728767148031,
+STORE, 140556481732608, 140556481761279,
+STORE, 140556481724416, 140556481732607,
+STORE, 140556475723776, 140556479520767,
+SNULL, 140556475723776, 140556477382655,
+STORE, 140556477382656, 140556479520767,
+STORE, 140556475723776, 140556477382655,
+SNULL, 140556479479807, 140556479520767,
+STORE, 140556477382656, 140556479479807,
+STORE, 140556479479808, 140556479520767,
+SNULL, 140556479479808, 140556479504383,
+STORE, 140556479504384, 140556479520767,
+STORE, 140556479479808, 140556479504383,
+ERASE, 140556479479808, 140556479504383,
+STORE, 140556479479808, 140556479504383,
+ERASE, 140556479504384, 140556479520767,
+STORE, 140556479504384, 140556479520767,
+SNULL, 140556479496191, 140556479504383,
+STORE, 140556479479808, 140556479496191,
+STORE, 140556479496192, 140556479504383,
+SNULL, 94418516078591, 94418516082687,
+STORE, 94418516070400, 94418516078591,
+STORE, 94418516078592, 94418516082687,
+SNULL, 140556481765375, 140556481769471,
+STORE, 140556481761280, 140556481765375,
+STORE, 140556481765376, 140556481769471,
+ERASE, 140556481732608, 140556481761279,
+STORE, 94418541113344, 94418541248511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723945873408, 140737488351231,
+SNULL, 140723945881599, 140737488351231,
+STORE, 140723945873408, 140723945881599,
+STORE, 140723945742336, 140723945881599,
+STORE, 94543169773568, 94543171997695,
+SNULL, 94543169884159, 94543171997695,
+STORE, 94543169773568, 94543169884159,
+STORE, 94543169884160, 94543171997695,
+ERASE, 94543169884160, 94543171997695,
+STORE, 94543171977216, 94543171989503,
+STORE, 94543171989504, 94543171997695,
+STORE, 139890420883456, 139890423136255,
+SNULL, 139890421026815, 139890423136255,
+STORE, 139890420883456, 139890421026815,
+STORE, 139890421026816, 139890423136255,
+ERASE, 139890421026816, 139890423136255,
+STORE, 139890423123968, 139890423132159,
+STORE, 139890423132160, 139890423136255,
+STORE, 140723946102784, 140723946106879,
+STORE, 140723946090496, 140723946102783,
+STORE, 139890423095296, 139890423123967,
+STORE, 139890423087104, 139890423095295,
+STORE, 139890417086464, 139890420883455,
+SNULL, 139890417086464, 139890418745343,
+STORE, 139890418745344, 139890420883455,
+STORE, 139890417086464, 139890418745343,
+SNULL, 139890420842495, 139890420883455,
+STORE, 139890418745344, 139890420842495,
+STORE, 139890420842496, 139890420883455,
+SNULL, 139890420842496, 139890420867071,
+STORE, 139890420867072, 139890420883455,
+STORE, 139890420842496, 139890420867071,
+ERASE, 139890420842496, 139890420867071,
+STORE, 139890420842496, 139890420867071,
+ERASE, 139890420867072, 139890420883455,
+STORE, 139890420867072, 139890420883455,
+SNULL, 139890420858879, 139890420867071,
+STORE, 139890420842496, 139890420858879,
+STORE, 139890420858880, 139890420867071,
+SNULL, 94543171985407, 94543171989503,
+STORE, 94543171977216, 94543171985407,
+STORE, 94543171985408, 94543171989503,
+SNULL, 139890423128063, 139890423132159,
+STORE, 139890423123968, 139890423128063,
+STORE, 139890423128064, 139890423132159,
+ERASE, 139890423095296, 139890423123967,
+STORE, 94543197097984, 94543197233151,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736205979648, 140737488351231,
+SNULL, 140736205987839, 140737488351231,
+STORE, 140736205979648, 140736205987839,
+STORE, 140736205848576, 140736205987839,
+STORE, 94913209913344, 94913212137471,
+SNULL, 94913210023935, 94913212137471,
+STORE, 94913209913344, 94913210023935,
+STORE, 94913210023936, 94913212137471,
+ERASE, 94913210023936, 94913212137471,
+STORE, 94913212116992, 94913212129279,
+STORE, 94913212129280, 94913212137471,
+STORE, 140006323052544, 140006325305343,
+SNULL, 140006323195903, 140006325305343,
+STORE, 140006323052544, 140006323195903,
+STORE, 140006323195904, 140006325305343,
+ERASE, 140006323195904, 140006325305343,
+STORE, 140006325293056, 140006325301247,
+STORE, 140006325301248, 140006325305343,
+STORE, 140736206716928, 140736206721023,
+STORE, 140736206704640, 140736206716927,
+STORE, 140006325264384, 140006325293055,
+STORE, 140006325256192, 140006325264383,
+STORE, 140006319255552, 140006323052543,
+SNULL, 140006319255552, 140006320914431,
+STORE, 140006320914432, 140006323052543,
+STORE, 140006319255552, 140006320914431,
+SNULL, 140006323011583, 140006323052543,
+STORE, 140006320914432, 140006323011583,
+STORE, 140006323011584, 140006323052543,
+SNULL, 140006323011584, 140006323036159,
+STORE, 140006323036160, 140006323052543,
+STORE, 140006323011584, 140006323036159,
+ERASE, 140006323011584, 140006323036159,
+STORE, 140006323011584, 140006323036159,
+ERASE, 140006323036160, 140006323052543,
+STORE, 140006323036160, 140006323052543,
+SNULL, 140006323027967, 140006323036159,
+STORE, 140006323011584, 140006323027967,
+STORE, 140006323027968, 140006323036159,
+SNULL, 94913212125183, 94913212129279,
+STORE, 94913212116992, 94913212125183,
+STORE, 94913212125184, 94913212129279,
+SNULL, 140006325297151, 140006325301247,
+STORE, 140006325293056, 140006325297151,
+STORE, 140006325297152, 140006325301247,
+ERASE, 140006325264384, 140006325293055,
+STORE, 94913239932928, 94913240068095,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726926897152, 140737488351231,
+SNULL, 140726926905343, 140737488351231,
+STORE, 140726926897152, 140726926905343,
+STORE, 140726926766080, 140726926905343,
+STORE, 94213246820352, 94213249044479,
+SNULL, 94213246930943, 94213249044479,
+STORE, 94213246820352, 94213246930943,
+STORE, 94213246930944, 94213249044479,
+ERASE, 94213246930944, 94213249044479,
+STORE, 94213249024000, 94213249036287,
+STORE, 94213249036288, 94213249044479,
+STORE, 140368830242816, 140368832495615,
+SNULL, 140368830386175, 140368832495615,
+STORE, 140368830242816, 140368830386175,
+STORE, 140368830386176, 140368832495615,
+ERASE, 140368830386176, 140368832495615,
+STORE, 140368832483328, 140368832491519,
+STORE, 140368832491520, 140368832495615,
+STORE, 140726926999552, 140726927003647,
+STORE, 140726926987264, 140726926999551,
+STORE, 140368832454656, 140368832483327,
+STORE, 140368832446464, 140368832454655,
+STORE, 140368826445824, 140368830242815,
+SNULL, 140368826445824, 140368828104703,
+STORE, 140368828104704, 140368830242815,
+STORE, 140368826445824, 140368828104703,
+SNULL, 140368830201855, 140368830242815,
+STORE, 140368828104704, 140368830201855,
+STORE, 140368830201856, 140368830242815,
+SNULL, 140368830201856, 140368830226431,
+STORE, 140368830226432, 140368830242815,
+STORE, 140368830201856, 140368830226431,
+ERASE, 140368830201856, 140368830226431,
+STORE, 140368830201856, 140368830226431,
+ERASE, 140368830226432, 140368830242815,
+STORE, 140368830226432, 140368830242815,
+SNULL, 140368830218239, 140368830226431,
+STORE, 140368830201856, 140368830218239,
+STORE, 140368830218240, 140368830226431,
+SNULL, 94213249032191, 94213249036287,
+STORE, 94213249024000, 94213249032191,
+STORE, 94213249032192, 94213249036287,
+SNULL, 140368832487423, 140368832491519,
+STORE, 140368832483328, 140368832487423,
+STORE, 140368832487424, 140368832491519,
+ERASE, 140368832454656, 140368832483327,
+STORE, 94213267435520, 94213267570687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728954130432, 140737488351231,
+SNULL, 140728954138623, 140737488351231,
+STORE, 140728954130432, 140728954138623,
+STORE, 140728953999360, 140728954138623,
+STORE, 94672570966016, 94672573190143,
+SNULL, 94672571076607, 94672573190143,
+STORE, 94672570966016, 94672571076607,
+STORE, 94672571076608, 94672573190143,
+ERASE, 94672571076608, 94672573190143,
+STORE, 94672573169664, 94672573181951,
+STORE, 94672573181952, 94672573190143,
+STORE, 140201696735232, 140201698988031,
+SNULL, 140201696878591, 140201698988031,
+STORE, 140201696735232, 140201696878591,
+STORE, 140201696878592, 140201698988031,
+ERASE, 140201696878592, 140201698988031,
+STORE, 140201698975744, 140201698983935,
+STORE, 140201698983936, 140201698988031,
+STORE, 140728954163200, 140728954167295,
+STORE, 140728954150912, 140728954163199,
+STORE, 140201698947072, 140201698975743,
+STORE, 140201698938880, 140201698947071,
+STORE, 140201692938240, 140201696735231,
+SNULL, 140201692938240, 140201694597119,
+STORE, 140201694597120, 140201696735231,
+STORE, 140201692938240, 140201694597119,
+SNULL, 140201696694271, 140201696735231,
+STORE, 140201694597120, 140201696694271,
+STORE, 140201696694272, 140201696735231,
+SNULL, 140201696694272, 140201696718847,
+STORE, 140201696718848, 140201696735231,
+STORE, 140201696694272, 140201696718847,
+ERASE, 140201696694272, 140201696718847,
+STORE, 140201696694272, 140201696718847,
+ERASE, 140201696718848, 140201696735231,
+STORE, 140201696718848, 140201696735231,
+SNULL, 140201696710655, 140201696718847,
+STORE, 140201696694272, 140201696710655,
+STORE, 140201696710656, 140201696718847,
+SNULL, 94672573177855, 94672573181951,
+STORE, 94672573169664, 94672573177855,
+STORE, 94672573177856, 94672573181951,
+SNULL, 140201698979839, 140201698983935,
+STORE, 140201698975744, 140201698979839,
+STORE, 140201698979840, 140201698983935,
+ERASE, 140201698947072, 140201698975743,
+STORE, 94672595689472, 94672595824639,
+STORE, 94114394132480, 94114394345471,
+STORE, 94114396442624, 94114396446719,
+STORE, 94114396446720, 94114396454911,
+STORE, 94114396454912, 94114396467199,
+STORE, 94114421575680, 94114428256255,
+STORE, 139934313955328, 139934315614207,
+STORE, 139934315614208, 139934317711359,
+STORE, 139934317711360, 139934317727743,
+STORE, 139934317727744, 139934317735935,
+STORE, 139934317735936, 139934317752319,
+STORE, 139934317752320, 139934317764607,
+STORE, 139934317764608, 139934319857663,
+STORE, 139934319857664, 139934319861759,
+STORE, 139934319861760, 139934319865855,
+STORE, 139934319865856, 139934320009215,
+STORE, 139934320377856, 139934322061311,
+STORE, 139934322061312, 139934322077695,
+STORE, 139934322106368, 139934322110463,
+STORE, 139934322110464, 139934322114559,
+STORE, 139934322114560, 139934322118655,
+STORE, 140731200376832, 140731200516095,
+STORE, 140731200929792, 140731200942079,
+STORE, 140731200942080, 140731200946175,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721532362752, 140737488351231,
+SNULL, 140721532370943, 140737488351231,
+STORE, 140721532362752, 140721532370943,
+STORE, 140721532231680, 140721532370943,
+STORE, 94467222597632, 94467224821759,
+SNULL, 94467222708223, 94467224821759,
+STORE, 94467222597632, 94467222708223,
+STORE, 94467222708224, 94467224821759,
+ERASE, 94467222708224, 94467224821759,
+STORE, 94467224801280, 94467224813567,
+STORE, 94467224813568, 94467224821759,
+STORE, 140191433543680, 140191435796479,
+SNULL, 140191433687039, 140191435796479,
+STORE, 140191433543680, 140191433687039,
+STORE, 140191433687040, 140191435796479,
+ERASE, 140191433687040, 140191435796479,
+STORE, 140191435784192, 140191435792383,
+STORE, 140191435792384, 140191435796479,
+STORE, 140721533034496, 140721533038591,
+STORE, 140721533022208, 140721533034495,
+STORE, 140191435755520, 140191435784191,
+STORE, 140191435747328, 140191435755519,
+STORE, 140191429746688, 140191433543679,
+SNULL, 140191429746688, 140191431405567,
+STORE, 140191431405568, 140191433543679,
+STORE, 140191429746688, 140191431405567,
+SNULL, 140191433502719, 140191433543679,
+STORE, 140191431405568, 140191433502719,
+STORE, 140191433502720, 140191433543679,
+SNULL, 140191433502720, 140191433527295,
+STORE, 140191433527296, 140191433543679,
+STORE, 140191433502720, 140191433527295,
+ERASE, 140191433502720, 140191433527295,
+STORE, 140191433502720, 140191433527295,
+ERASE, 140191433527296, 140191433543679,
+STORE, 140191433527296, 140191433543679,
+SNULL, 140191433519103, 140191433527295,
+STORE, 140191433502720, 140191433519103,
+STORE, 140191433519104, 140191433527295,
+SNULL, 94467224809471, 94467224813567,
+STORE, 94467224801280, 94467224809471,
+STORE, 94467224809472, 94467224813567,
+SNULL, 140191435788287, 140191435792383,
+STORE, 140191435784192, 140191435788287,
+STORE, 140191435788288, 140191435792383,
+ERASE, 140191435755520, 140191435784191,
+STORE, 94467251847168, 94467251982335,
+STORE, 94367895400448, 94367895613439,
+STORE, 94367897710592, 94367897714687,
+STORE, 94367897714688, 94367897722879,
+STORE, 94367897722880, 94367897735167,
+STORE, 94367925264384, 94367926861823,
+STORE, 139801317548032, 139801319206911,
+STORE, 139801319206912, 139801321304063,
+STORE, 139801321304064, 139801321320447,
+STORE, 139801321320448, 139801321328639,
+STORE, 139801321328640, 139801321345023,
+STORE, 139801321345024, 139801321357311,
+STORE, 139801321357312, 139801323450367,
+STORE, 139801323450368, 139801323454463,
+STORE, 139801323454464, 139801323458559,
+STORE, 139801323458560, 139801323601919,
+STORE, 139801323970560, 139801325654015,
+STORE, 139801325654016, 139801325670399,
+STORE, 139801325699072, 139801325703167,
+STORE, 139801325703168, 139801325707263,
+STORE, 139801325707264, 139801325711359,
+STORE, 140724442861568, 140724443000831,
+STORE, 140724443611136, 140724443623423,
+STORE, 140724443623424, 140724443627519,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731353149440, 140737488351231,
+SNULL, 140731353157631, 140737488351231,
+STORE, 140731353149440, 140731353157631,
+STORE, 140731353018368, 140731353157631,
+STORE, 94310379503616, 94310381838335,
+SNULL, 94310379716607, 94310381838335,
+STORE, 94310379503616, 94310379716607,
+STORE, 94310379716608, 94310381838335,
+ERASE, 94310379716608, 94310381838335,
+STORE, 94310381813760, 94310381826047,
+STORE, 94310381826048, 94310381838335,
+STORE, 140515434659840, 140515436912639,
+SNULL, 140515434803199, 140515436912639,
+STORE, 140515434659840, 140515434803199,
+STORE, 140515434803200, 140515436912639,
+ERASE, 140515434803200, 140515436912639,
+STORE, 140515436900352, 140515436908543,
+STORE, 140515436908544, 140515436912639,
+STORE, 140731353886720, 140731353890815,
+STORE, 140731353874432, 140731353886719,
+STORE, 140515436871680, 140515436900351,
+STORE, 140515436863488, 140515436871679,
+STORE, 140515432546304, 140515434659839,
+SNULL, 140515432546304, 140515432558591,
+STORE, 140515432558592, 140515434659839,
+STORE, 140515432546304, 140515432558591,
+SNULL, 140515434651647, 140515434659839,
+STORE, 140515432558592, 140515434651647,
+STORE, 140515434651648, 140515434659839,
+ERASE, 140515434651648, 140515434659839,
+STORE, 140515434651648, 140515434659839,
+STORE, 140515428749312, 140515432546303,
+SNULL, 140515428749312, 140515430408191,
+STORE, 140515430408192, 140515432546303,
+STORE, 140515428749312, 140515430408191,
+SNULL, 140515432505343, 140515432546303,
+STORE, 140515430408192, 140515432505343,
+STORE, 140515432505344, 140515432546303,
+SNULL, 140515432505344, 140515432529919,
+STORE, 140515432529920, 140515432546303,
+STORE, 140515432505344, 140515432529919,
+ERASE, 140515432505344, 140515432529919,
+STORE, 140515432505344, 140515432529919,
+ERASE, 140515432529920, 140515432546303,
+STORE, 140515432529920, 140515432546303,
+STORE, 140515436855296, 140515436871679,
+SNULL, 140515432521727, 140515432529919,
+STORE, 140515432505344, 140515432521727,
+STORE, 140515432521728, 140515432529919,
+SNULL, 140515434655743, 140515434659839,
+STORE, 140515434651648, 140515434655743,
+STORE, 140515434655744, 140515434659839,
+SNULL, 94310381817855, 94310381826047,
+STORE, 94310381813760, 94310381817855,
+STORE, 94310381817856, 94310381826047,
+SNULL, 140515436904447, 140515436908543,
+STORE, 140515436900352, 140515436904447,
+STORE, 140515436904448, 140515436908543,
+ERASE, 140515436871680, 140515436900351,
+STORE, 94310395457536, 94310395592703,
+STORE, 140515435171840, 140515436855295,
+STORE, 94310395457536, 94310395727871,
+STORE, 94310395457536, 94310395863039,
+STORE, 94310395457536, 94310396047359,
+SNULL, 94310396022783, 94310396047359,
+STORE, 94310395457536, 94310396022783,
+STORE, 94310396022784, 94310396047359,
+ERASE, 94310396022784, 94310396047359,
+STORE, 94310395457536, 94310396157951,
+STORE, 94310395457536, 94310396293119,
+SNULL, 94310396276735, 94310396293119,
+STORE, 94310395457536, 94310396276735,
+STORE, 94310396276736, 94310396293119,
+ERASE, 94310396276736, 94310396293119,
+STORE, 94310395457536, 94310396411903,
+SNULL, 94310396383231, 94310396411903,
+STORE, 94310395457536, 94310396383231,
+STORE, 94310396383232, 94310396411903,
+ERASE, 94310396383232, 94310396411903,
+STORE, 94310395457536, 94310396522495,
+STORE, 94310395457536, 94310396674047,
+SNULL, 94310396657663, 94310396674047,
+STORE, 94310395457536, 94310396657663,
+STORE, 94310396657664, 94310396674047,
+ERASE, 94310396657664, 94310396674047,
+SNULL, 94310396624895, 94310396657663,
+STORE, 94310395457536, 94310396624895,
+STORE, 94310396624896, 94310396657663,
+ERASE, 94310396624896, 94310396657663,
+STORE, 94310395457536, 94310396776447,
+SNULL, 94310396764159, 94310396776447,
+STORE, 94310395457536, 94310396764159,
+STORE, 94310396764160, 94310396776447,
+ERASE, 94310396764160, 94310396776447,
+SNULL, 94310396739583, 94310396764159,
+STORE, 94310395457536, 94310396739583,
+STORE, 94310396739584, 94310396764159,
+ERASE, 94310396739584, 94310396764159,
+STORE, 94310395457536, 94310396882943,
+STORE, 94310395457536, 94310397018111,
+STORE, 94310395457536, 94310397161471,
+STORE, 94310395457536, 94310397300735,
+SNULL, 94310397292543, 94310397300735,
+STORE, 94310395457536, 94310397292543,
+STORE, 94310397292544, 94310397300735,
+ERASE, 94310397292544, 94310397300735,
+STORE, 94359222210560, 94359222423551,
+STORE, 94359224520704, 94359224524799,
+STORE, 94359224524800, 94359224532991,
+STORE, 94359224532992, 94359224545279,
+STORE, 94359238348800, 94359239385087,
+STORE, 140675699838976, 140675701497855,
+STORE, 140675701497856, 140675703595007,
+STORE, 140675703595008, 140675703611391,
+STORE, 140675703611392, 140675703619583,
+STORE, 140675703619584, 140675703635967,
+STORE, 140675703635968, 140675703648255,
+STORE, 140675703648256, 140675705741311,
+STORE, 140675705741312, 140675705745407,
+STORE, 140675705745408, 140675705749503,
+STORE, 140675705749504, 140675705892863,
+STORE, 140675706261504, 140675707944959,
+STORE, 140675707944960, 140675707961343,
+STORE, 140675707990016, 140675707994111,
+STORE, 140675707994112, 140675707998207,
+STORE, 140675707998208, 140675708002303,
+STORE, 140721324634112, 140721324773375,
+STORE, 140721324810240, 140721324822527,
+STORE, 140721324822528, 140721324826623,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724099678208, 140737488351231,
+SNULL, 140724099686399, 140737488351231,
+STORE, 140724099678208, 140724099686399,
+STORE, 140724099547136, 140724099686399,
+STORE, 94586638516224, 94586640850943,
+SNULL, 94586638729215, 94586640850943,
+STORE, 94586638516224, 94586638729215,
+STORE, 94586638729216, 94586640850943,
+ERASE, 94586638729216, 94586640850943,
+STORE, 94586640826368, 94586640838655,
+STORE, 94586640838656, 94586640850943,
+STORE, 140371033796608, 140371036049407,
+SNULL, 140371033939967, 140371036049407,
+STORE, 140371033796608, 140371033939967,
+STORE, 140371033939968, 140371036049407,
+ERASE, 140371033939968, 140371036049407,
+STORE, 140371036037120, 140371036045311,
+STORE, 140371036045312, 140371036049407,
+STORE, 140724100001792, 140724100005887,
+STORE, 140724099989504, 140724100001791,
+STORE, 140371036008448, 140371036037119,
+STORE, 140371036000256, 140371036008447,
+STORE, 140371031683072, 140371033796607,
+SNULL, 140371031683072, 140371031695359,
+STORE, 140371031695360, 140371033796607,
+STORE, 140371031683072, 140371031695359,
+SNULL, 140371033788415, 140371033796607,
+STORE, 140371031695360, 140371033788415,
+STORE, 140371033788416, 140371033796607,
+ERASE, 140371033788416, 140371033796607,
+STORE, 140371033788416, 140371033796607,
+STORE, 140371027886080, 140371031683071,
+SNULL, 140371027886080, 140371029544959,
+STORE, 140371029544960, 140371031683071,
+STORE, 140371027886080, 140371029544959,
+SNULL, 140371031642111, 140371031683071,
+STORE, 140371029544960, 140371031642111,
+STORE, 140371031642112, 140371031683071,
+SNULL, 140371031642112, 140371031666687,
+STORE, 140371031666688, 140371031683071,
+STORE, 140371031642112, 140371031666687,
+ERASE, 140371031642112, 140371031666687,
+STORE, 140371031642112, 140371031666687,
+ERASE, 140371031666688, 140371031683071,
+STORE, 140371031666688, 140371031683071,
+STORE, 140371035992064, 140371036008447,
+SNULL, 140371031658495, 140371031666687,
+STORE, 140371031642112, 140371031658495,
+STORE, 140371031658496, 140371031666687,
+SNULL, 140371033792511, 140371033796607,
+STORE, 140371033788416, 140371033792511,
+STORE, 140371033792512, 140371033796607,
+SNULL, 94586640830463, 94586640838655,
+STORE, 94586640826368, 94586640830463,
+STORE, 94586640830464, 94586640838655,
+SNULL, 140371036041215, 140371036045311,
+STORE, 140371036037120, 140371036041215,
+STORE, 140371036041216, 140371036045311,
+ERASE, 140371036008448, 140371036037119,
+STORE, 94586663849984, 94586663985151,
+STORE, 140371034308608, 140371035992063,
+STORE, 94586663849984, 94586664120319,
+STORE, 94586663849984, 94586664255487,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727532937216, 140737488351231,
+SNULL, 140727532945407, 140737488351231,
+STORE, 140727532937216, 140727532945407,
+STORE, 140727532806144, 140727532945407,
+STORE, 94849780191232, 94849782525951,
+SNULL, 94849780404223, 94849782525951,
+STORE, 94849780191232, 94849780404223,
+STORE, 94849780404224, 94849782525951,
+ERASE, 94849780404224, 94849782525951,
+STORE, 94849782501376, 94849782513663,
+STORE, 94849782513664, 94849782525951,
+STORE, 140382070218752, 140382072471551,
+SNULL, 140382070362111, 140382072471551,
+STORE, 140382070218752, 140382070362111,
+STORE, 140382070362112, 140382072471551,
+ERASE, 140382070362112, 140382072471551,
+STORE, 140382072459264, 140382072467455,
+STORE, 140382072467456, 140382072471551,
+STORE, 140727533092864, 140727533096959,
+STORE, 140727533080576, 140727533092863,
+STORE, 140382072430592, 140382072459263,
+STORE, 140382072422400, 140382072430591,
+STORE, 140382068105216, 140382070218751,
+SNULL, 140382068105216, 140382068117503,
+STORE, 140382068117504, 140382070218751,
+STORE, 140382068105216, 140382068117503,
+SNULL, 140382070210559, 140382070218751,
+STORE, 140382068117504, 140382070210559,
+STORE, 140382070210560, 140382070218751,
+ERASE, 140382070210560, 140382070218751,
+STORE, 140382070210560, 140382070218751,
+STORE, 140382064308224, 140382068105215,
+SNULL, 140382064308224, 140382065967103,
+STORE, 140382065967104, 140382068105215,
+STORE, 140382064308224, 140382065967103,
+SNULL, 140382068064255, 140382068105215,
+STORE, 140382065967104, 140382068064255,
+STORE, 140382068064256, 140382068105215,
+SNULL, 140382068064256, 140382068088831,
+STORE, 140382068088832, 140382068105215,
+STORE, 140382068064256, 140382068088831,
+ERASE, 140382068064256, 140382068088831,
+STORE, 140382068064256, 140382068088831,
+ERASE, 140382068088832, 140382068105215,
+STORE, 140382068088832, 140382068105215,
+STORE, 140382072414208, 140382072430591,
+SNULL, 140382068080639, 140382068088831,
+STORE, 140382068064256, 140382068080639,
+STORE, 140382068080640, 140382068088831,
+SNULL, 140382070214655, 140382070218751,
+STORE, 140382070210560, 140382070214655,
+STORE, 140382070214656, 140382070218751,
+SNULL, 94849782505471, 94849782513663,
+STORE, 94849782501376, 94849782505471,
+STORE, 94849782505472, 94849782513663,
+SNULL, 140382072463359, 140382072467455,
+STORE, 140382072459264, 140382072463359,
+STORE, 140382072463360, 140382072467455,
+ERASE, 140382072430592, 140382072459263,
+STORE, 94849782845440, 94849782980607,
+STORE, 140382070730752, 140382072414207,
+STORE, 94849782845440, 94849783115775,
+STORE, 94849782845440, 94849783250943,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722594377728, 140737488351231,
+SNULL, 140722594385919, 140737488351231,
+STORE, 140722594377728, 140722594385919,
+STORE, 140722594246656, 140722594385919,
+STORE, 94421466353664, 94421468577791,
+SNULL, 94421466464255, 94421468577791,
+STORE, 94421466353664, 94421466464255,
+STORE, 94421466464256, 94421468577791,
+ERASE, 94421466464256, 94421468577791,
+STORE, 94421468557312, 94421468569599,
+STORE, 94421468569600, 94421468577791,
+STORE, 140345458057216, 140345460310015,
+SNULL, 140345458200575, 140345460310015,
+STORE, 140345458057216, 140345458200575,
+STORE, 140345458200576, 140345460310015,
+ERASE, 140345458200576, 140345460310015,
+STORE, 140345460297728, 140345460305919,
+STORE, 140345460305920, 140345460310015,
+STORE, 140722595557376, 140722595561471,
+STORE, 140722595545088, 140722595557375,
+STORE, 140345460269056, 140345460297727,
+STORE, 140345460260864, 140345460269055,
+STORE, 140345454260224, 140345458057215,
+SNULL, 140345454260224, 140345455919103,
+STORE, 140345455919104, 140345458057215,
+STORE, 140345454260224, 140345455919103,
+SNULL, 140345458016255, 140345458057215,
+STORE, 140345455919104, 140345458016255,
+STORE, 140345458016256, 140345458057215,
+SNULL, 140345458016256, 140345458040831,
+STORE, 140345458040832, 140345458057215,
+STORE, 140345458016256, 140345458040831,
+ERASE, 140345458016256, 140345458040831,
+STORE, 140345458016256, 140345458040831,
+ERASE, 140345458040832, 140345458057215,
+STORE, 140345458040832, 140345458057215,
+SNULL, 140345458032639, 140345458040831,
+STORE, 140345458016256, 140345458032639,
+STORE, 140345458032640, 140345458040831,
+SNULL, 94421468565503, 94421468569599,
+STORE, 94421468557312, 94421468565503,
+STORE, 94421468565504, 94421468569599,
+SNULL, 140345460301823, 140345460305919,
+STORE, 140345460297728, 140345460301823,
+STORE, 140345460301824, 140345460305919,
+ERASE, 140345460269056, 140345460297727,
+STORE, 94421496004608, 94421496139775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726096302080, 140737488351231,
+SNULL, 140726096310271, 140737488351231,
+STORE, 140726096302080, 140726096310271,
+STORE, 140726096171008, 140726096310271,
+STORE, 94101992124416, 94101994459135,
+SNULL, 94101992337407, 94101994459135,
+STORE, 94101992124416, 94101992337407,
+STORE, 94101992337408, 94101994459135,
+ERASE, 94101992337408, 94101994459135,
+STORE, 94101994434560, 94101994446847,
+STORE, 94101994446848, 94101994459135,
+STORE, 140192085594112, 140192087846911,
+SNULL, 140192085737471, 140192087846911,
+STORE, 140192085594112, 140192085737471,
+STORE, 140192085737472, 140192087846911,
+ERASE, 140192085737472, 140192087846911,
+STORE, 140192087834624, 140192087842815,
+STORE, 140192087842816, 140192087846911,
+STORE, 140726096375808, 140726096379903,
+STORE, 140726096363520, 140726096375807,
+STORE, 140192087805952, 140192087834623,
+STORE, 140192087797760, 140192087805951,
+STORE, 140192083480576, 140192085594111,
+SNULL, 140192083480576, 140192083492863,
+STORE, 140192083492864, 140192085594111,
+STORE, 140192083480576, 140192083492863,
+SNULL, 140192085585919, 140192085594111,
+STORE, 140192083492864, 140192085585919,
+STORE, 140192085585920, 140192085594111,
+ERASE, 140192085585920, 140192085594111,
+STORE, 140192085585920, 140192085594111,
+STORE, 140192079683584, 140192083480575,
+SNULL, 140192079683584, 140192081342463,
+STORE, 140192081342464, 140192083480575,
+STORE, 140192079683584, 140192081342463,
+SNULL, 140192083439615, 140192083480575,
+STORE, 140192081342464, 140192083439615,
+STORE, 140192083439616, 140192083480575,
+SNULL, 140192083439616, 140192083464191,
+STORE, 140192083464192, 140192083480575,
+STORE, 140192083439616, 140192083464191,
+ERASE, 140192083439616, 140192083464191,
+STORE, 140192083439616, 140192083464191,
+ERASE, 140192083464192, 140192083480575,
+STORE, 140192083464192, 140192083480575,
+STORE, 140192087789568, 140192087805951,
+SNULL, 140192083455999, 140192083464191,
+STORE, 140192083439616, 140192083455999,
+STORE, 140192083456000, 140192083464191,
+SNULL, 140192085590015, 140192085594111,
+STORE, 140192085585920, 140192085590015,
+STORE, 140192085590016, 140192085594111,
+SNULL, 94101994438655, 94101994446847,
+STORE, 94101994434560, 94101994438655,
+STORE, 94101994438656, 94101994446847,
+SNULL, 140192087838719, 140192087842815,
+STORE, 140192087834624, 140192087838719,
+STORE, 140192087838720, 140192087842815,
+ERASE, 140192087805952, 140192087834623,
+STORE, 94102011887616, 94102012022783,
+STORE, 140192086106112, 140192087789567,
+STORE, 94102011887616, 94102012157951,
+STORE, 94102011887616, 94102012293119,
+STORE, 94102011887616, 94102012440575,
+SNULL, 94102012428287, 94102012440575,
+STORE, 94102011887616, 94102012428287,
+STORE, 94102012428288, 94102012440575,
+ERASE, 94102012428288, 94102012440575,
+STORE, 94102011887616, 94102012579839,
+STORE, 94102011887616, 94102012715007,
+SNULL, 94102012694527, 94102012715007,
+STORE, 94102011887616, 94102012694527,
+STORE, 94102012694528, 94102012715007,
+ERASE, 94102012694528, 94102012715007,
+STORE, 94102011887616, 94102012833791,
+STORE, 94102011887616, 94102012968959,
+SNULL, 94102012927999, 94102012968959,
+STORE, 94102011887616, 94102012927999,
+STORE, 94102012928000, 94102012968959,
+ERASE, 94102012928000, 94102012968959,
+STORE, 94102011887616, 94102013091839,
+SNULL, 94102013075455, 94102013091839,
+STORE, 94102011887616, 94102013075455,
+STORE, 94102013075456, 94102013091839,
+ERASE, 94102013075456, 94102013091839,
+STORE, 94102011887616, 94102013210623,
+STORE, 94102011887616, 94102013345791,
+STORE, 93968727965696, 93968728178687,
+STORE, 93968730275840, 93968730279935,
+STORE, 93968730279936, 93968730288127,
+STORE, 93968730288128, 93968730300415,
+STORE, 93968731140096, 93968732704767,
+STORE, 140588443168768, 140588444827647,
+STORE, 140588444827648, 140588446924799,
+STORE, 140588446924800, 140588446941183,
+STORE, 140588446941184, 140588446949375,
+STORE, 140588446949376, 140588446965759,
+STORE, 140588446965760, 140588446978047,
+STORE, 140588446978048, 140588449071103,
+STORE, 140588449071104, 140588449075199,
+STORE, 140588449075200, 140588449079295,
+STORE, 140588449079296, 140588449222655,
+STORE, 140588449591296, 140588451274751,
+STORE, 140588451274752, 140588451291135,
+STORE, 140588451319808, 140588451323903,
+STORE, 140588451323904, 140588451327999,
+STORE, 140588451328000, 140588451332095,
+STORE, 140733877239808, 140733877379071,
+STORE, 140733878702080, 140733878714367,
+STORE, 140733878714368, 140733878718463,
+STORE, 93968727965696, 93968728178687,
+STORE, 93968730275840, 93968730279935,
+STORE, 93968730279936, 93968730288127,
+STORE, 93968730288128, 93968730300415,
+STORE, 93968731140096, 93968732991487,
+STORE, 140588443168768, 140588444827647,
+STORE, 140588444827648, 140588446924799,
+STORE, 140588446924800, 140588446941183,
+STORE, 140588446941184, 140588446949375,
+STORE, 140588446949376, 140588446965759,
+STORE, 140588446965760, 140588446978047,
+STORE, 140588446978048, 140588449071103,
+STORE, 140588449071104, 140588449075199,
+STORE, 140588449075200, 140588449079295,
+STORE, 140588449079296, 140588449222655,
+STORE, 140588449591296, 140588451274751,
+STORE, 140588451274752, 140588451291135,
+STORE, 140588451319808, 140588451323903,
+STORE, 140588451323904, 140588451327999,
+STORE, 140588451328000, 140588451332095,
+STORE, 140733877239808, 140733877379071,
+STORE, 140733878702080, 140733878714367,
+STORE, 140733878714368, 140733878718463,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733054472192, 140737488351231,
+SNULL, 140733054480383, 140737488351231,
+STORE, 140733054472192, 140733054480383,
+STORE, 140733054341120, 140733054480383,
+STORE, 93992873623552, 93992875847679,
+SNULL, 93992873734143, 93992875847679,
+STORE, 93992873623552, 93992873734143,
+STORE, 93992873734144, 93992875847679,
+ERASE, 93992873734144, 93992875847679,
+STORE, 93992875827200, 93992875839487,
+STORE, 93992875839488, 93992875847679,
+STORE, 139790881488896, 139790883741695,
+SNULL, 139790881632255, 139790883741695,
+STORE, 139790881488896, 139790881632255,
+STORE, 139790881632256, 139790883741695,
+ERASE, 139790881632256, 139790883741695,
+STORE, 139790883729408, 139790883737599,
+STORE, 139790883737600, 139790883741695,
+STORE, 140733054754816, 140733054758911,
+STORE, 140733054742528, 140733054754815,
+STORE, 139790883700736, 139790883729407,
+STORE, 139790883692544, 139790883700735,
+STORE, 139790877691904, 139790881488895,
+SNULL, 139790877691904, 139790879350783,
+STORE, 139790879350784, 139790881488895,
+STORE, 139790877691904, 139790879350783,
+SNULL, 139790881447935, 139790881488895,
+STORE, 139790879350784, 139790881447935,
+STORE, 139790881447936, 139790881488895,
+SNULL, 139790881447936, 139790881472511,
+STORE, 139790881472512, 139790881488895,
+STORE, 139790881447936, 139790881472511,
+ERASE, 139790881447936, 139790881472511,
+STORE, 139790881447936, 139790881472511,
+ERASE, 139790881472512, 139790881488895,
+STORE, 139790881472512, 139790881488895,
+SNULL, 139790881464319, 139790881472511,
+STORE, 139790881447936, 139790881464319,
+STORE, 139790881464320, 139790881472511,
+SNULL, 93992875835391, 93992875839487,
+STORE, 93992875827200, 93992875835391,
+STORE, 93992875835392, 93992875839487,
+SNULL, 139790883733503, 139790883737599,
+STORE, 139790883729408, 139790883733503,
+STORE, 139790883733504, 139790883737599,
+ERASE, 139790883700736, 139790883729407,
+STORE, 93992877031424, 93992877166591,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728550887424, 140737488351231,
+SNULL, 140728550895615, 140737488351231,
+STORE, 140728550887424, 140728550895615,
+STORE, 140728550756352, 140728550895615,
+STORE, 94707634077696, 94707636301823,
+SNULL, 94707634188287, 94707636301823,
+STORE, 94707634077696, 94707634188287,
+STORE, 94707634188288, 94707636301823,
+ERASE, 94707634188288, 94707636301823,
+STORE, 94707636281344, 94707636293631,
+STORE, 94707636293632, 94707636301823,
+STORE, 140553545666560, 140553547919359,
+SNULL, 140553545809919, 140553547919359,
+STORE, 140553545666560, 140553545809919,
+STORE, 140553545809920, 140553547919359,
+ERASE, 140553545809920, 140553547919359,
+STORE, 140553547907072, 140553547915263,
+STORE, 140553547915264, 140553547919359,
+STORE, 140728552374272, 140728552378367,
+STORE, 140728552361984, 140728552374271,
+STORE, 140553547878400, 140553547907071,
+STORE, 140553547870208, 140553547878399,
+STORE, 140553541869568, 140553545666559,
+SNULL, 140553541869568, 140553543528447,
+STORE, 140553543528448, 140553545666559,
+STORE, 140553541869568, 140553543528447,
+SNULL, 140553545625599, 140553545666559,
+STORE, 140553543528448, 140553545625599,
+STORE, 140553545625600, 140553545666559,
+SNULL, 140553545625600, 140553545650175,
+STORE, 140553545650176, 140553545666559,
+STORE, 140553545625600, 140553545650175,
+ERASE, 140553545625600, 140553545650175,
+STORE, 140553545625600, 140553545650175,
+ERASE, 140553545650176, 140553545666559,
+STORE, 140553545650176, 140553545666559,
+SNULL, 140553545641983, 140553545650175,
+STORE, 140553545625600, 140553545641983,
+STORE, 140553545641984, 140553545650175,
+SNULL, 94707636289535, 94707636293631,
+STORE, 94707636281344, 94707636289535,
+STORE, 94707636289536, 94707636293631,
+SNULL, 140553547911167, 140553547915263,
+STORE, 140553547907072, 140553547911167,
+STORE, 140553547911168, 140553547915263,
+ERASE, 140553547878400, 140553547907071,
+STORE, 94707651411968, 94707651547135,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732168695808, 140737488351231,
+SNULL, 140732168703999, 140737488351231,
+STORE, 140732168695808, 140732168703999,
+STORE, 140732168564736, 140732168703999,
+STORE, 94454287859712, 94454290083839,
+SNULL, 94454287970303, 94454290083839,
+STORE, 94454287859712, 94454287970303,
+STORE, 94454287970304, 94454290083839,
+ERASE, 94454287970304, 94454290083839,
+STORE, 94454290063360, 94454290075647,
+STORE, 94454290075648, 94454290083839,
+STORE, 140564947107840, 140564949360639,
+SNULL, 140564947251199, 140564949360639,
+STORE, 140564947107840, 140564947251199,
+STORE, 140564947251200, 140564949360639,
+ERASE, 140564947251200, 140564949360639,
+STORE, 140564949348352, 140564949356543,
+STORE, 140564949356544, 140564949360639,
+STORE, 140732168843264, 140732168847359,
+STORE, 140732168830976, 140732168843263,
+STORE, 140564949319680, 140564949348351,
+STORE, 140564949311488, 140564949319679,
+STORE, 140564943310848, 140564947107839,
+SNULL, 140564943310848, 140564944969727,
+STORE, 140564944969728, 140564947107839,
+STORE, 140564943310848, 140564944969727,
+SNULL, 140564947066879, 140564947107839,
+STORE, 140564944969728, 140564947066879,
+STORE, 140564947066880, 140564947107839,
+SNULL, 140564947066880, 140564947091455,
+STORE, 140564947091456, 140564947107839,
+STORE, 140564947066880, 140564947091455,
+ERASE, 140564947066880, 140564947091455,
+STORE, 140564947066880, 140564947091455,
+ERASE, 140564947091456, 140564947107839,
+STORE, 140564947091456, 140564947107839,
+SNULL, 140564947083263, 140564947091455,
+STORE, 140564947066880, 140564947083263,
+STORE, 140564947083264, 140564947091455,
+SNULL, 94454290071551, 94454290075647,
+STORE, 94454290063360, 94454290071551,
+STORE, 94454290071552, 94454290075647,
+SNULL, 140564949352447, 140564949356543,
+STORE, 140564949348352, 140564949352447,
+STORE, 140564949352448, 140564949356543,
+ERASE, 140564949319680, 140564949348351,
+STORE, 94454316236800, 94454316371967,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735155617792, 140737488351231,
+SNULL, 140735155625983, 140737488351231,
+STORE, 140735155617792, 140735155625983,
+STORE, 140735155486720, 140735155625983,
+STORE, 93915969556480, 93915971780607,
+SNULL, 93915969667071, 93915971780607,
+STORE, 93915969556480, 93915969667071,
+STORE, 93915969667072, 93915971780607,
+ERASE, 93915969667072, 93915971780607,
+STORE, 93915971760128, 93915971772415,
+STORE, 93915971772416, 93915971780607,
+STORE, 140141164605440, 140141166858239,
+SNULL, 140141164748799, 140141166858239,
+STORE, 140141164605440, 140141164748799,
+STORE, 140141164748800, 140141166858239,
+ERASE, 140141164748800, 140141166858239,
+STORE, 140141166845952, 140141166854143,
+STORE, 140141166854144, 140141166858239,
+STORE, 140735155691520, 140735155695615,
+STORE, 140735155679232, 140735155691519,
+STORE, 140141166817280, 140141166845951,
+STORE, 140141166809088, 140141166817279,
+STORE, 140141160808448, 140141164605439,
+SNULL, 140141160808448, 140141162467327,
+STORE, 140141162467328, 140141164605439,
+STORE, 140141160808448, 140141162467327,
+SNULL, 140141164564479, 140141164605439,
+STORE, 140141162467328, 140141164564479,
+STORE, 140141164564480, 140141164605439,
+SNULL, 140141164564480, 140141164589055,
+STORE, 140141164589056, 140141164605439,
+STORE, 140141164564480, 140141164589055,
+ERASE, 140141164564480, 140141164589055,
+STORE, 140141164564480, 140141164589055,
+ERASE, 140141164589056, 140141164605439,
+STORE, 140141164589056, 140141164605439,
+SNULL, 140141164580863, 140141164589055,
+STORE, 140141164564480, 140141164580863,
+STORE, 140141164580864, 140141164589055,
+SNULL, 93915971768319, 93915971772415,
+STORE, 93915971760128, 93915971768319,
+STORE, 93915971768320, 93915971772415,
+SNULL, 140141166850047, 140141166854143,
+STORE, 140141166845952, 140141166850047,
+STORE, 140141166850048, 140141166854143,
+ERASE, 140141166817280, 140141166845951,
+STORE, 93916002775040, 93916002910207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728988409856, 140737488351231,
+SNULL, 140728988418047, 140737488351231,
+STORE, 140728988409856, 140728988418047,
+STORE, 140728988278784, 140728988418047,
+STORE, 94021634813952, 94021637038079,
+SNULL, 94021634924543, 94021637038079,
+STORE, 94021634813952, 94021634924543,
+STORE, 94021634924544, 94021637038079,
+ERASE, 94021634924544, 94021637038079,
+STORE, 94021637017600, 94021637029887,
+STORE, 94021637029888, 94021637038079,
+STORE, 140638014038016, 140638016290815,
+SNULL, 140638014181375, 140638016290815,
+STORE, 140638014038016, 140638014181375,
+STORE, 140638014181376, 140638016290815,
+ERASE, 140638014181376, 140638016290815,
+STORE, 140638016278528, 140638016286719,
+STORE, 140638016286720, 140638016290815,
+STORE, 140728988536832, 140728988540927,
+STORE, 140728988524544, 140728988536831,
+STORE, 140638016249856, 140638016278527,
+STORE, 140638016241664, 140638016249855,
+STORE, 140638010241024, 140638014038015,
+SNULL, 140638010241024, 140638011899903,
+STORE, 140638011899904, 140638014038015,
+STORE, 140638010241024, 140638011899903,
+SNULL, 140638013997055, 140638014038015,
+STORE, 140638011899904, 140638013997055,
+STORE, 140638013997056, 140638014038015,
+SNULL, 140638013997056, 140638014021631,
+STORE, 140638014021632, 140638014038015,
+STORE, 140638013997056, 140638014021631,
+ERASE, 140638013997056, 140638014021631,
+STORE, 140638013997056, 140638014021631,
+ERASE, 140638014021632, 140638014038015,
+STORE, 140638014021632, 140638014038015,
+SNULL, 140638014013439, 140638014021631,
+STORE, 140638013997056, 140638014013439,
+STORE, 140638014013440, 140638014021631,
+SNULL, 94021637025791, 94021637029887,
+STORE, 94021637017600, 94021637025791,
+STORE, 94021637025792, 94021637029887,
+SNULL, 140638016282623, 140638016286719,
+STORE, 140638016278528, 140638016282623,
+STORE, 140638016282624, 140638016286719,
+ERASE, 140638016249856, 140638016278527,
+STORE, 94021643124736, 94021643259903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731219275776, 140737488351231,
+SNULL, 140731219283967, 140737488351231,
+STORE, 140731219275776, 140731219283967,
+STORE, 140731219144704, 140731219283967,
+STORE, 93888803647488, 93888805871615,
+SNULL, 93888803758079, 93888805871615,
+STORE, 93888803647488, 93888803758079,
+STORE, 93888803758080, 93888805871615,
+ERASE, 93888803758080, 93888805871615,
+STORE, 93888805851136, 93888805863423,
+STORE, 93888805863424, 93888805871615,
+STORE, 139630576934912, 139630579187711,
+SNULL, 139630577078271, 139630579187711,
+STORE, 139630576934912, 139630577078271,
+STORE, 139630577078272, 139630579187711,
+ERASE, 139630577078272, 139630579187711,
+STORE, 139630579175424, 139630579183615,
+STORE, 139630579183616, 139630579187711,
+STORE, 140731219718144, 140731219722239,
+STORE, 140731219705856, 140731219718143,
+STORE, 139630579146752, 139630579175423,
+STORE, 139630579138560, 139630579146751,
+STORE, 139630573137920, 139630576934911,
+SNULL, 139630573137920, 139630574796799,
+STORE, 139630574796800, 139630576934911,
+STORE, 139630573137920, 139630574796799,
+SNULL, 139630576893951, 139630576934911,
+STORE, 139630574796800, 139630576893951,
+STORE, 139630576893952, 139630576934911,
+SNULL, 139630576893952, 139630576918527,
+STORE, 139630576918528, 139630576934911,
+STORE, 139630576893952, 139630576918527,
+ERASE, 139630576893952, 139630576918527,
+STORE, 139630576893952, 139630576918527,
+ERASE, 139630576918528, 139630576934911,
+STORE, 139630576918528, 139630576934911,
+SNULL, 139630576910335, 139630576918527,
+STORE, 139630576893952, 139630576910335,
+STORE, 139630576910336, 139630576918527,
+SNULL, 93888805859327, 93888805863423,
+STORE, 93888805851136, 93888805859327,
+STORE, 93888805859328, 93888805863423,
+SNULL, 139630579179519, 139630579183615,
+STORE, 139630579175424, 139630579179519,
+STORE, 139630579179520, 139630579183615,
+ERASE, 139630579146752, 139630579175423,
+STORE, 93888822235136, 93888822370303,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733391151104, 140737488351231,
+SNULL, 140733391159295, 140737488351231,
+STORE, 140733391151104, 140733391159295,
+STORE, 140733391020032, 140733391159295,
+STORE, 94393875324928, 94393877549055,
+SNULL, 94393875435519, 94393877549055,
+STORE, 94393875324928, 94393875435519,
+STORE, 94393875435520, 94393877549055,
+ERASE, 94393875435520, 94393877549055,
+STORE, 94393877528576, 94393877540863,
+STORE, 94393877540864, 94393877549055,
+STORE, 140292111740928, 140292113993727,
+SNULL, 140292111884287, 140292113993727,
+STORE, 140292111740928, 140292111884287,
+STORE, 140292111884288, 140292113993727,
+ERASE, 140292111884288, 140292113993727,
+STORE, 140292113981440, 140292113989631,
+STORE, 140292113989632, 140292113993727,
+STORE, 140733391532032, 140733391536127,
+STORE, 140733391519744, 140733391532031,
+STORE, 140292113952768, 140292113981439,
+STORE, 140292113944576, 140292113952767,
+STORE, 140292107943936, 140292111740927,
+SNULL, 140292107943936, 140292109602815,
+STORE, 140292109602816, 140292111740927,
+STORE, 140292107943936, 140292109602815,
+SNULL, 140292111699967, 140292111740927,
+STORE, 140292109602816, 140292111699967,
+STORE, 140292111699968, 140292111740927,
+SNULL, 140292111699968, 140292111724543,
+STORE, 140292111724544, 140292111740927,
+STORE, 140292111699968, 140292111724543,
+ERASE, 140292111699968, 140292111724543,
+STORE, 140292111699968, 140292111724543,
+ERASE, 140292111724544, 140292111740927,
+STORE, 140292111724544, 140292111740927,
+SNULL, 140292111716351, 140292111724543,
+STORE, 140292111699968, 140292111716351,
+STORE, 140292111716352, 140292111724543,
+SNULL, 94393877536767, 94393877540863,
+STORE, 94393877528576, 94393877536767,
+STORE, 94393877536768, 94393877540863,
+SNULL, 140292113985535, 140292113989631,
+STORE, 140292113981440, 140292113985535,
+STORE, 140292113985536, 140292113989631,
+ERASE, 140292113952768, 140292113981439,
+STORE, 94393909342208, 94393909477375,
+STORE, 94458367512576, 94458367725567,
+STORE, 94458369822720, 94458369826815,
+STORE, 94458369826816, 94458369835007,
+STORE, 94458369835008, 94458369847295,
+STORE, 94458393292800, 94458399666175,
+STORE, 140619773841408, 140619775500287,
+STORE, 140619775500288, 140619777597439,
+STORE, 140619777597440, 140619777613823,
+STORE, 140619777613824, 140619777622015,
+STORE, 140619777622016, 140619777638399,
+STORE, 140619777638400, 140619777650687,
+STORE, 140619777650688, 140619779743743,
+STORE, 140619779743744, 140619779747839,
+STORE, 140619779747840, 140619779751935,
+STORE, 140619779751936, 140619779895295,
+STORE, 140619780263936, 140619781947391,
+STORE, 140619781947392, 140619781963775,
+STORE, 140619781992448, 140619781996543,
+STORE, 140619781996544, 140619782000639,
+STORE, 140619782000640, 140619782004735,
+STORE, 140725811675136, 140725811814399,
+STORE, 140725812813824, 140725812826111,
+STORE, 140725812826112, 140725812830207,
+STORE, 94458367512576, 94458367725567,
+STORE, 94458369822720, 94458369826815,
+STORE, 94458369826816, 94458369835007,
+STORE, 94458369835008, 94458369847295,
+STORE, 94458393292800, 94458400366591,
+STORE, 140619773841408, 140619775500287,
+STORE, 140619775500288, 140619777597439,
+STORE, 140619777597440, 140619777613823,
+STORE, 140619777613824, 140619777622015,
+STORE, 140619777622016, 140619777638399,
+STORE, 140619777638400, 140619777650687,
+STORE, 140619777650688, 140619779743743,
+STORE, 140619779743744, 140619779747839,
+STORE, 140619779747840, 140619779751935,
+STORE, 140619779751936, 140619779895295,
+STORE, 140619780263936, 140619781947391,
+STORE, 140619781947392, 140619781963775,
+STORE, 140619781992448, 140619781996543,
+STORE, 140619781996544, 140619782000639,
+STORE, 140619782000640, 140619782004735,
+STORE, 140725811675136, 140725811814399,
+STORE, 140725812813824, 140725812826111,
+STORE, 140725812826112, 140725812830207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728740679680, 140737488351231,
+SNULL, 140728740687871, 140737488351231,
+STORE, 140728740679680, 140728740687871,
+STORE, 140728740548608, 140728740687871,
+STORE, 94764075249664, 94764077473791,
+SNULL, 94764075360255, 94764077473791,
+STORE, 94764075249664, 94764075360255,
+STORE, 94764075360256, 94764077473791,
+ERASE, 94764075360256, 94764077473791,
+STORE, 94764077453312, 94764077465599,
+STORE, 94764077465600, 94764077473791,
+STORE, 139766406791168, 139766409043967,
+SNULL, 139766406934527, 139766409043967,
+STORE, 139766406791168, 139766406934527,
+STORE, 139766406934528, 139766409043967,
+ERASE, 139766406934528, 139766409043967,
+STORE, 139766409031680, 139766409039871,
+STORE, 139766409039872, 139766409043967,
+STORE, 140728740913152, 140728740917247,
+STORE, 140728740900864, 140728740913151,
+STORE, 139766409003008, 139766409031679,
+STORE, 139766408994816, 139766409003007,
+STORE, 139766402994176, 139766406791167,
+SNULL, 139766402994176, 139766404653055,
+STORE, 139766404653056, 139766406791167,
+STORE, 139766402994176, 139766404653055,
+SNULL, 139766406750207, 139766406791167,
+STORE, 139766404653056, 139766406750207,
+STORE, 139766406750208, 139766406791167,
+SNULL, 139766406750208, 139766406774783,
+STORE, 139766406774784, 139766406791167,
+STORE, 139766406750208, 139766406774783,
+ERASE, 139766406750208, 139766406774783,
+STORE, 139766406750208, 139766406774783,
+ERASE, 139766406774784, 139766406791167,
+STORE, 139766406774784, 139766406791167,
+SNULL, 139766406766591, 139766406774783,
+STORE, 139766406750208, 139766406766591,
+STORE, 139766406766592, 139766406774783,
+SNULL, 94764077461503, 94764077465599,
+STORE, 94764077453312, 94764077461503,
+STORE, 94764077461504, 94764077465599,
+SNULL, 139766409035775, 139766409039871,
+STORE, 139766409031680, 139766409035775,
+STORE, 139766409035776, 139766409039871,
+ERASE, 139766409003008, 139766409031679,
+STORE, 94764090458112, 94764090593279,
+STORE, 94758057480192, 94758057590783,
+STORE, 94758059683840, 94758059692031,
+STORE, 94758059692032, 94758059696127,
+STORE, 94758059696128, 94758059704319,
+STORE, 94758083215360, 94758083350527,
+STORE, 139951456772096, 139951458430975,
+STORE, 139951458430976, 139951460528127,
+STORE, 139951460528128, 139951460544511,
+STORE, 139951460544512, 139951460552703,
+STORE, 139951460552704, 139951460569087,
+STORE, 139951460569088, 139951460712447,
+STORE, 139951462772736, 139951462780927,
+STORE, 139951462809600, 139951462813695,
+STORE, 139951462813696, 139951462817791,
+STORE, 139951462817792, 139951462821887,
+STORE, 140734098313216, 140734098452479,
+STORE, 140734098911232, 140734098923519,
+STORE, 140734098923520, 140734098927615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724904095744, 140737488351231,
+SNULL, 140724904103935, 140737488351231,
+STORE, 140724904095744, 140724904103935,
+STORE, 140724903964672, 140724904103935,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140408497864704, 140408500117503,
+SNULL, 140408498008063, 140408500117503,
+STORE, 140408497864704, 140408498008063,
+STORE, 140408498008064, 140408500117503,
+ERASE, 140408498008064, 140408500117503,
+STORE, 140408500105216, 140408500113407,
+STORE, 140408500113408, 140408500117503,
+STORE, 140724905369600, 140724905373695,
+STORE, 140724905357312, 140724905369599,
+STORE, 140408500076544, 140408500105215,
+STORE, 140408500068352, 140408500076543,
+STORE, 140408494702592, 140408497864703,
+SNULL, 140408494702592, 140408495763455,
+STORE, 140408495763456, 140408497864703,
+STORE, 140408494702592, 140408495763455,
+SNULL, 140408497856511, 140408497864703,
+STORE, 140408495763456, 140408497856511,
+STORE, 140408497856512, 140408497864703,
+ERASE, 140408497856512, 140408497864703,
+STORE, 140408497856512, 140408497864703,
+STORE, 140408490905600, 140408494702591,
+SNULL, 140408490905600, 140408492564479,
+STORE, 140408492564480, 140408494702591,
+STORE, 140408490905600, 140408492564479,
+SNULL, 140408494661631, 140408494702591,
+STORE, 140408492564480, 140408494661631,
+STORE, 140408494661632, 140408494702591,
+SNULL, 140408494661632, 140408494686207,
+STORE, 140408494686208, 140408494702591,
+STORE, 140408494661632, 140408494686207,
+ERASE, 140408494661632, 140408494686207,
+STORE, 140408494661632, 140408494686207,
+ERASE, 140408494686208, 140408494702591,
+STORE, 140408494686208, 140408494702591,
+STORE, 140408500056064, 140408500076543,
+SNULL, 140408494678015, 140408494686207,
+STORE, 140408494661632, 140408494678015,
+STORE, 140408494678016, 140408494686207,
+SNULL, 140408497860607, 140408497864703,
+STORE, 140408497856512, 140408497860607,
+STORE, 140408497860608, 140408497864703,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140408500109311, 140408500113407,
+STORE, 140408500105216, 140408500109311,
+STORE, 140408500109312, 140408500113407,
+ERASE, 140408500076544, 140408500105215,
+STORE, 25235456, 25370623,
+STORE, 25235456, 25518079,
+STORE, 140408498372608, 140408500056063,
+STORE, 94543937388544, 94543937499135,
+STORE, 94543939592192, 94543939600383,
+STORE, 94543939600384, 94543939604479,
+STORE, 94543939604480, 94543939612671,
+STORE, 94543941447680, 94543941582847,
+STORE, 140282621947904, 140282623606783,
+STORE, 140282623606784, 140282625703935,
+STORE, 140282625703936, 140282625720319,
+STORE, 140282625720320, 140282625728511,
+STORE, 140282625728512, 140282625744895,
+STORE, 140282625744896, 140282625888255,
+STORE, 140282627948544, 140282627956735,
+STORE, 140282627985408, 140282627989503,
+STORE, 140282627989504, 140282627993599,
+STORE, 140282627993600, 140282627997695,
+STORE, 140728295723008, 140728295862271,
+STORE, 140728296476672, 140728296488959,
+STORE, 140728296488960, 140728296493055,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431510691839,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431510691839,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140736944451584, 140737488351231,
+SNULL, 140736944463871, 140737488351231,
+STORE, 140736944451584, 140736944463871,
+STORE, 140736944320512, 140736944463871,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 139693609893888, 139693612146687,
+SNULL, 139693610037247, 139693612146687,
+STORE, 139693609893888, 139693610037247,
+STORE, 139693610037248, 139693612146687,
+ERASE, 139693610037248, 139693612146687,
+STORE, 139693612134400, 139693612142591,
+STORE, 139693612142592, 139693612146687,
+STORE, 140736945152000, 140736945156095,
+STORE, 140736945139712, 140736945151999,
+STORE, 139693612105728, 139693612134399,
+STORE, 139693612097536, 139693612105727,
+STORE, 139693606060032, 139693609893887,
+SNULL, 139693606060032, 139693607768063,
+STORE, 139693607768064, 139693609893887,
+STORE, 139693606060032, 139693607768063,
+SNULL, 139693609861119, 139693609893887,
+STORE, 139693607768064, 139693609861119,
+STORE, 139693609861120, 139693609893887,
+ERASE, 139693609861120, 139693609893887,
+STORE, 139693609861120, 139693609893887,
+STORE, 139693603864576, 139693606060031,
+SNULL, 139693603864576, 139693603958783,
+STORE, 139693603958784, 139693606060031,
+STORE, 139693603864576, 139693603958783,
+SNULL, 139693606051839, 139693606060031,
+STORE, 139693603958784, 139693606051839,
+STORE, 139693606051840, 139693606060031,
+ERASE, 139693606051840, 139693606060031,
+STORE, 139693606051840, 139693606060031,
+STORE, 139693601345536, 139693603864575,
+SNULL, 139693601345536, 139693601759231,
+STORE, 139693601759232, 139693603864575,
+STORE, 139693601345536, 139693601759231,
+SNULL, 139693603852287, 139693603864575,
+STORE, 139693601759232, 139693603852287,
+STORE, 139693603852288, 139693603864575,
+ERASE, 139693603852288, 139693603864575,
+STORE, 139693603852288, 139693603864575,
+STORE, 139693598711808, 139693601345535,
+SNULL, 139693598711808, 139693599240191,
+STORE, 139693599240192, 139693601345535,
+STORE, 139693598711808, 139693599240191,
+SNULL, 139693601337343, 139693601345535,
+STORE, 139693599240192, 139693601337343,
+STORE, 139693601337344, 139693601345535,
+ERASE, 139693601337344, 139693601345535,
+STORE, 139693601337344, 139693601345535,
+STORE, 139693596598272, 139693598711807,
+SNULL, 139693596598272, 139693596610559,
+STORE, 139693596610560, 139693598711807,
+STORE, 139693596598272, 139693596610559,
+SNULL, 139693598703615, 139693598711807,
+STORE, 139693596610560, 139693598703615,
+STORE, 139693598703616, 139693598711807,
+ERASE, 139693598703616, 139693598711807,
+STORE, 139693598703616, 139693598711807,
+STORE, 139693594394624, 139693596598271,
+SNULL, 139693594394624, 139693594497023,
+STORE, 139693594497024, 139693596598271,
+STORE, 139693594394624, 139693594497023,
+SNULL, 139693596590079, 139693596598271,
+STORE, 139693594497024, 139693596590079,
+STORE, 139693596590080, 139693596598271,
+ERASE, 139693596590080, 139693596598271,
+STORE, 139693596590080, 139693596598271,
+STORE, 139693612089344, 139693612105727,
+STORE, 139693591232512, 139693594394623,
+SNULL, 139693591232512, 139693592293375,
+STORE, 139693592293376, 139693594394623,
+STORE, 139693591232512, 139693592293375,
+SNULL, 139693594386431, 139693594394623,
+STORE, 139693592293376, 139693594386431,
+STORE, 139693594386432, 139693594394623,
+ERASE, 139693594386432, 139693594394623,
+STORE, 139693594386432, 139693594394623,
+STORE, 139693587435520, 139693591232511,
+SNULL, 139693587435520, 139693589094399,
+STORE, 139693589094400, 139693591232511,
+STORE, 139693587435520, 139693589094399,
+SNULL, 139693591191551, 139693591232511,
+STORE, 139693589094400, 139693591191551,
+STORE, 139693591191552, 139693591232511,
+SNULL, 139693591191552, 139693591216127,
+STORE, 139693591216128, 139693591232511,
+STORE, 139693591191552, 139693591216127,
+ERASE, 139693591191552, 139693591216127,
+STORE, 139693591191552, 139693591216127,
+ERASE, 139693591216128, 139693591232511,
+STORE, 139693591216128, 139693591232511,
+STORE, 139693612077056, 139693612105727,
+SNULL, 139693591207935, 139693591216127,
+STORE, 139693591191552, 139693591207935,
+STORE, 139693591207936, 139693591216127,
+SNULL, 139693594390527, 139693594394623,
+STORE, 139693594386432, 139693594390527,
+STORE, 139693594390528, 139693594394623,
+SNULL, 139693596594175, 139693596598271,
+STORE, 139693596590080, 139693596594175,
+STORE, 139693596594176, 139693596598271,
+SNULL, 139693598707711, 139693598711807,
+STORE, 139693598703616, 139693598707711,
+STORE, 139693598707712, 139693598711807,
+SNULL, 139693601341439, 139693601345535,
+STORE, 139693601337344, 139693601341439,
+STORE, 139693601341440, 139693601345535,
+SNULL, 139693603860479, 139693603864575,
+STORE, 139693603852288, 139693603860479,
+STORE, 139693603860480, 139693603864575,
+SNULL, 139693606055935, 139693606060031,
+STORE, 139693606051840, 139693606055935,
+STORE, 139693606055936, 139693606060031,
+SNULL, 139693609865215, 139693609893887,
+STORE, 139693609861120, 139693609865215,
+STORE, 139693609865216, 139693609893887,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 139693612138495, 139693612142591,
+STORE, 139693612134400, 139693612138495,
+STORE, 139693612138496, 139693612142591,
+ERASE, 139693612105728, 139693612134399,
+STORE, 39976960, 40112127,
+STORE, 139693610393600, 139693612077055,
+STORE, 139693612130304, 139693612134399,
+STORE, 139693610258432, 139693610393599,
+STORE, 39976960, 40255487,
+STORE, 139693585338368, 139693587435519,
+STORE, 139693612122112, 139693612134399,
+STORE, 139693612113920, 139693612134399,
+STORE, 139693612077056, 139693612113919,
+STORE, 139693610242048, 139693610393599,
+STORE, 39976960, 40390655,
+STORE, 39976960, 40546303,
+STORE, 139693610233856, 139693610393599,
+STORE, 139693610225664, 139693610393599,
+STORE, 39976960, 40714239,
+STORE, 139693610209280, 139693610393599,
+STORE, 39976960, 40861695,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431528759295,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729993904128, 140737488351231,
+SNULL, 140729993912319, 140737488351231,
+STORE, 140729993904128, 140729993912319,
+STORE, 140729993773056, 140729993912319,
+STORE, 93926271991808, 93926274215935,
+SNULL, 93926272102399, 93926274215935,
+STORE, 93926271991808, 93926272102399,
+STORE, 93926272102400, 93926274215935,
+ERASE, 93926272102400, 93926274215935,
+STORE, 93926274195456, 93926274207743,
+STORE, 93926274207744, 93926274215935,
+STORE, 139962167296000, 139962169548799,
+SNULL, 139962167439359, 139962169548799,
+STORE, 139962167296000, 139962167439359,
+STORE, 139962167439360, 139962169548799,
+ERASE, 139962167439360, 139962169548799,
+STORE, 139962169536512, 139962169544703,
+STORE, 139962169544704, 139962169548799,
+STORE, 140729995096064, 140729995100159,
+STORE, 140729995083776, 140729995096063,
+STORE, 139962169507840, 139962169536511,
+STORE, 139962169499648, 139962169507839,
+STORE, 139962163499008, 139962167295999,
+SNULL, 139962163499008, 139962165157887,
+STORE, 139962165157888, 139962167295999,
+STORE, 139962163499008, 139962165157887,
+SNULL, 139962167255039, 139962167295999,
+STORE, 139962165157888, 139962167255039,
+STORE, 139962167255040, 139962167295999,
+SNULL, 139962167255040, 139962167279615,
+STORE, 139962167279616, 139962167295999,
+STORE, 139962167255040, 139962167279615,
+ERASE, 139962167255040, 139962167279615,
+STORE, 139962167255040, 139962167279615,
+ERASE, 139962167279616, 139962167295999,
+STORE, 139962167279616, 139962167295999,
+SNULL, 139962167271423, 139962167279615,
+STORE, 139962167255040, 139962167271423,
+STORE, 139962167271424, 139962167279615,
+SNULL, 93926274203647, 93926274207743,
+STORE, 93926274195456, 93926274203647,
+STORE, 93926274203648, 93926274207743,
+SNULL, 139962169540607, 139962169544703,
+STORE, 139962169536512, 139962169540607,
+STORE, 139962169540608, 139962169544703,
+ERASE, 139962169507840, 139962169536511,
+STORE, 93926291120128, 93926291255295,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724960579584, 140737488351231,
+SNULL, 140724960587775, 140737488351231,
+STORE, 140724960579584, 140724960587775,
+STORE, 140724960448512, 140724960587775,
+STORE, 94246489489408, 94246491713535,
+SNULL, 94246489599999, 94246491713535,
+STORE, 94246489489408, 94246489599999,
+STORE, 94246489600000, 94246491713535,
+ERASE, 94246489600000, 94246491713535,
+STORE, 94246491693056, 94246491705343,
+STORE, 94246491705344, 94246491713535,
+STORE, 140098174926848, 140098177179647,
+SNULL, 140098175070207, 140098177179647,
+STORE, 140098174926848, 140098175070207,
+STORE, 140098175070208, 140098177179647,
+ERASE, 140098175070208, 140098177179647,
+STORE, 140098177167360, 140098177175551,
+STORE, 140098177175552, 140098177179647,
+STORE, 140724961439744, 140724961443839,
+STORE, 140724961427456, 140724961439743,
+STORE, 140098177138688, 140098177167359,
+STORE, 140098177130496, 140098177138687,
+STORE, 140098171129856, 140098174926847,
+SNULL, 140098171129856, 140098172788735,
+STORE, 140098172788736, 140098174926847,
+STORE, 140098171129856, 140098172788735,
+SNULL, 140098174885887, 140098174926847,
+STORE, 140098172788736, 140098174885887,
+STORE, 140098174885888, 140098174926847,
+SNULL, 140098174885888, 140098174910463,
+STORE, 140098174910464, 140098174926847,
+STORE, 140098174885888, 140098174910463,
+ERASE, 140098174885888, 140098174910463,
+STORE, 140098174885888, 140098174910463,
+ERASE, 140098174910464, 140098174926847,
+STORE, 140098174910464, 140098174926847,
+SNULL, 140098174902271, 140098174910463,
+STORE, 140098174885888, 140098174902271,
+STORE, 140098174902272, 140098174910463,
+SNULL, 94246491701247, 94246491705343,
+STORE, 94246491693056, 94246491701247,
+STORE, 94246491701248, 94246491705343,
+SNULL, 140098177171455, 140098177175551,
+STORE, 140098177167360, 140098177171455,
+STORE, 140098177171456, 140098177175551,
+ERASE, 140098177138688, 140098177167359,
+STORE, 94246516998144, 94246517133311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730522918912, 140737488351231,
+SNULL, 140730522927103, 140737488351231,
+STORE, 140730522918912, 140730522927103,
+STORE, 140730522787840, 140730522927103,
+STORE, 94196043120640, 94196045344767,
+SNULL, 94196043231231, 94196045344767,
+STORE, 94196043120640, 94196043231231,
+STORE, 94196043231232, 94196045344767,
+ERASE, 94196043231232, 94196045344767,
+STORE, 94196045324288, 94196045336575,
+STORE, 94196045336576, 94196045344767,
+STORE, 139815918940160, 139815921192959,
+SNULL, 139815919083519, 139815921192959,
+STORE, 139815918940160, 139815919083519,
+STORE, 139815919083520, 139815921192959,
+ERASE, 139815919083520, 139815921192959,
+STORE, 139815921180672, 139815921188863,
+STORE, 139815921188864, 139815921192959,
+STORE, 140730523344896, 140730523348991,
+STORE, 140730523332608, 140730523344895,
+STORE, 139815921152000, 139815921180671,
+STORE, 139815921143808, 139815921151999,
+STORE, 139815915143168, 139815918940159,
+SNULL, 139815915143168, 139815916802047,
+STORE, 139815916802048, 139815918940159,
+STORE, 139815915143168, 139815916802047,
+SNULL, 139815918899199, 139815918940159,
+STORE, 139815916802048, 139815918899199,
+STORE, 139815918899200, 139815918940159,
+SNULL, 139815918899200, 139815918923775,
+STORE, 139815918923776, 139815918940159,
+STORE, 139815918899200, 139815918923775,
+ERASE, 139815918899200, 139815918923775,
+STORE, 139815918899200, 139815918923775,
+ERASE, 139815918923776, 139815918940159,
+STORE, 139815918923776, 139815918940159,
+SNULL, 139815918915583, 139815918923775,
+STORE, 139815918899200, 139815918915583,
+STORE, 139815918915584, 139815918923775,
+SNULL, 94196045332479, 94196045336575,
+STORE, 94196045324288, 94196045332479,
+STORE, 94196045332480, 94196045336575,
+SNULL, 139815921184767, 139815921188863,
+STORE, 139815921180672, 139815921184767,
+STORE, 139815921184768, 139815921188863,
+ERASE, 139815921152000, 139815921180671,
+STORE, 94196076183552, 94196076318719,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722460393472, 140737488351231,
+SNULL, 140722460401663, 140737488351231,
+STORE, 140722460393472, 140722460401663,
+STORE, 140722460262400, 140722460401663,
+STORE, 94569810399232, 94569812623359,
+SNULL, 94569810509823, 94569812623359,
+STORE, 94569810399232, 94569810509823,
+STORE, 94569810509824, 94569812623359,
+ERASE, 94569810509824, 94569812623359,
+STORE, 94569812602880, 94569812615167,
+STORE, 94569812615168, 94569812623359,
+STORE, 139681565450240, 139681567703039,
+SNULL, 139681565593599, 139681567703039,
+STORE, 139681565450240, 139681565593599,
+STORE, 139681565593600, 139681567703039,
+ERASE, 139681565593600, 139681567703039,
+STORE, 139681567690752, 139681567698943,
+STORE, 139681567698944, 139681567703039,
+STORE, 140722460569600, 140722460573695,
+STORE, 140722460557312, 140722460569599,
+STORE, 139681567662080, 139681567690751,
+STORE, 139681567653888, 139681567662079,
+STORE, 139681561653248, 139681565450239,
+SNULL, 139681561653248, 139681563312127,
+STORE, 139681563312128, 139681565450239,
+STORE, 139681561653248, 139681563312127,
+SNULL, 139681565409279, 139681565450239,
+STORE, 139681563312128, 139681565409279,
+STORE, 139681565409280, 139681565450239,
+SNULL, 139681565409280, 139681565433855,
+STORE, 139681565433856, 139681565450239,
+STORE, 139681565409280, 139681565433855,
+ERASE, 139681565409280, 139681565433855,
+STORE, 139681565409280, 139681565433855,
+ERASE, 139681565433856, 139681565450239,
+STORE, 139681565433856, 139681565450239,
+SNULL, 139681565425663, 139681565433855,
+STORE, 139681565409280, 139681565425663,
+STORE, 139681565425664, 139681565433855,
+SNULL, 94569812611071, 94569812615167,
+STORE, 94569812602880, 94569812611071,
+STORE, 94569812611072, 94569812615167,
+SNULL, 139681567694847, 139681567698943,
+STORE, 139681567690752, 139681567694847,
+STORE, 139681567694848, 139681567698943,
+ERASE, 139681567662080, 139681567690751,
+STORE, 94569818066944, 94569818202111,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431534280703,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725452365824, 140737488351231,
+SNULL, 140725452374015, 140737488351231,
+STORE, 140725452365824, 140725452374015,
+STORE, 140725452234752, 140725452374015,
+STORE, 94395067465728, 94395069689855,
+SNULL, 94395067576319, 94395069689855,
+STORE, 94395067465728, 94395067576319,
+STORE, 94395067576320, 94395069689855,
+ERASE, 94395067576320, 94395069689855,
+STORE, 94395069669376, 94395069681663,
+STORE, 94395069681664, 94395069689855,
+STORE, 140269941211136, 140269943463935,
+SNULL, 140269941354495, 140269943463935,
+STORE, 140269941211136, 140269941354495,
+STORE, 140269941354496, 140269943463935,
+ERASE, 140269941354496, 140269943463935,
+STORE, 140269943451648, 140269943459839,
+STORE, 140269943459840, 140269943463935,
+STORE, 140725452558336, 140725452562431,
+STORE, 140725452546048, 140725452558335,
+STORE, 140269943422976, 140269943451647,
+STORE, 140269943414784, 140269943422975,
+STORE, 140269937414144, 140269941211135,
+SNULL, 140269937414144, 140269939073023,
+STORE, 140269939073024, 140269941211135,
+STORE, 140269937414144, 140269939073023,
+SNULL, 140269941170175, 140269941211135,
+STORE, 140269939073024, 140269941170175,
+STORE, 140269941170176, 140269941211135,
+SNULL, 140269941170176, 140269941194751,
+STORE, 140269941194752, 140269941211135,
+STORE, 140269941170176, 140269941194751,
+ERASE, 140269941170176, 140269941194751,
+STORE, 140269941170176, 140269941194751,
+ERASE, 140269941194752, 140269941211135,
+STORE, 140269941194752, 140269941211135,
+SNULL, 140269941186559, 140269941194751,
+STORE, 140269941170176, 140269941186559,
+STORE, 140269941186560, 140269941194751,
+SNULL, 94395069677567, 94395069681663,
+STORE, 94395069669376, 94395069677567,
+STORE, 94395069677568, 94395069681663,
+SNULL, 140269943455743, 140269943459839,
+STORE, 140269943451648, 140269943455743,
+STORE, 140269943455744, 140269943459839,
+ERASE, 140269943422976, 140269943451647,
+STORE, 94395101691904, 94395101827071,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733860118528, 140737488351231,
+SNULL, 140733860126719, 140737488351231,
+STORE, 140733860118528, 140733860126719,
+STORE, 140733859987456, 140733860126719,
+STORE, 94484752990208, 94484755214335,
+SNULL, 94484753100799, 94484755214335,
+STORE, 94484752990208, 94484753100799,
+STORE, 94484753100800, 94484755214335,
+ERASE, 94484753100800, 94484755214335,
+STORE, 94484755193856, 94484755206143,
+STORE, 94484755206144, 94484755214335,
+STORE, 139958922309632, 139958924562431,
+SNULL, 139958922452991, 139958924562431,
+STORE, 139958922309632, 139958922452991,
+STORE, 139958922452992, 139958924562431,
+ERASE, 139958922452992, 139958924562431,
+STORE, 139958924550144, 139958924558335,
+STORE, 139958924558336, 139958924562431,
+STORE, 140733860253696, 140733860257791,
+STORE, 140733860241408, 140733860253695,
+STORE, 139958924521472, 139958924550143,
+STORE, 139958924513280, 139958924521471,
+STORE, 139958918512640, 139958922309631,
+SNULL, 139958918512640, 139958920171519,
+STORE, 139958920171520, 139958922309631,
+STORE, 139958918512640, 139958920171519,
+SNULL, 139958922268671, 139958922309631,
+STORE, 139958920171520, 139958922268671,
+STORE, 139958922268672, 139958922309631,
+SNULL, 139958922268672, 139958922293247,
+STORE, 139958922293248, 139958922309631,
+STORE, 139958922268672, 139958922293247,
+ERASE, 139958922268672, 139958922293247,
+STORE, 139958922268672, 139958922293247,
+ERASE, 139958922293248, 139958922309631,
+STORE, 139958922293248, 139958922309631,
+SNULL, 139958922285055, 139958922293247,
+STORE, 139958922268672, 139958922285055,
+STORE, 139958922285056, 139958922293247,
+SNULL, 94484755202047, 94484755206143,
+STORE, 94484755193856, 94484755202047,
+STORE, 94484755202048, 94484755206143,
+SNULL, 139958924554239, 139958924558335,
+STORE, 139958924550144, 139958924554239,
+STORE, 139958924554240, 139958924558335,
+ERASE, 139958924521472, 139958924550143,
+STORE, 94484777615360, 94484777750527,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731051036672, 140737488351231,
+SNULL, 140731051044863, 140737488351231,
+STORE, 140731051036672, 140731051044863,
+STORE, 140731050905600, 140731051044863,
+STORE, 93945822998528, 93945825222655,
+SNULL, 93945823109119, 93945825222655,
+STORE, 93945822998528, 93945823109119,
+STORE, 93945823109120, 93945825222655,
+ERASE, 93945823109120, 93945825222655,
+STORE, 93945825202176, 93945825214463,
+STORE, 93945825214464, 93945825222655,
+STORE, 140153503997952, 140153506250751,
+SNULL, 140153504141311, 140153506250751,
+STORE, 140153503997952, 140153504141311,
+STORE, 140153504141312, 140153506250751,
+ERASE, 140153504141312, 140153506250751,
+STORE, 140153506238464, 140153506246655,
+STORE, 140153506246656, 140153506250751,
+STORE, 140731051331584, 140731051335679,
+STORE, 140731051319296, 140731051331583,
+STORE, 140153506209792, 140153506238463,
+STORE, 140153506201600, 140153506209791,
+STORE, 140153500200960, 140153503997951,
+SNULL, 140153500200960, 140153501859839,
+STORE, 140153501859840, 140153503997951,
+STORE, 140153500200960, 140153501859839,
+SNULL, 140153503956991, 140153503997951,
+STORE, 140153501859840, 140153503956991,
+STORE, 140153503956992, 140153503997951,
+SNULL, 140153503956992, 140153503981567,
+STORE, 140153503981568, 140153503997951,
+STORE, 140153503956992, 140153503981567,
+ERASE, 140153503956992, 140153503981567,
+STORE, 140153503956992, 140153503981567,
+ERASE, 140153503981568, 140153503997951,
+STORE, 140153503981568, 140153503997951,
+SNULL, 140153503973375, 140153503981567,
+STORE, 140153503956992, 140153503973375,
+STORE, 140153503973376, 140153503981567,
+SNULL, 93945825210367, 93945825214463,
+STORE, 93945825202176, 93945825210367,
+STORE, 93945825210368, 93945825214463,
+SNULL, 140153506242559, 140153506246655,
+STORE, 140153506238464, 140153506242559,
+STORE, 140153506242560, 140153506246655,
+ERASE, 140153506209792, 140153506238463,
+STORE, 93945854537728, 93945854672895,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431537885183,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736025325568, 140737488351231,
+SNULL, 140736025333759, 140737488351231,
+STORE, 140736025325568, 140736025333759,
+STORE, 140736025194496, 140736025333759,
+STORE, 94809095172096, 94809097396223,
+SNULL, 94809095282687, 94809097396223,
+STORE, 94809095172096, 94809095282687,
+STORE, 94809095282688, 94809097396223,
+ERASE, 94809095282688, 94809097396223,
+STORE, 94809097375744, 94809097388031,
+STORE, 94809097388032, 94809097396223,
+STORE, 140194992517120, 140194994769919,
+SNULL, 140194992660479, 140194994769919,
+STORE, 140194992517120, 140194992660479,
+STORE, 140194992660480, 140194994769919,
+ERASE, 140194992660480, 140194994769919,
+STORE, 140194994757632, 140194994765823,
+STORE, 140194994765824, 140194994769919,
+STORE, 140736026173440, 140736026177535,
+STORE, 140736026161152, 140736026173439,
+STORE, 140194994728960, 140194994757631,
+STORE, 140194994720768, 140194994728959,
+STORE, 140194988720128, 140194992517119,
+SNULL, 140194988720128, 140194990379007,
+STORE, 140194990379008, 140194992517119,
+STORE, 140194988720128, 140194990379007,
+SNULL, 140194992476159, 140194992517119,
+STORE, 140194990379008, 140194992476159,
+STORE, 140194992476160, 140194992517119,
+SNULL, 140194992476160, 140194992500735,
+STORE, 140194992500736, 140194992517119,
+STORE, 140194992476160, 140194992500735,
+ERASE, 140194992476160, 140194992500735,
+STORE, 140194992476160, 140194992500735,
+ERASE, 140194992500736, 140194992517119,
+STORE, 140194992500736, 140194992517119,
+SNULL, 140194992492543, 140194992500735,
+STORE, 140194992476160, 140194992492543,
+STORE, 140194992492544, 140194992500735,
+SNULL, 94809097383935, 94809097388031,
+STORE, 94809097375744, 94809097383935,
+STORE, 94809097383936, 94809097388031,
+SNULL, 140194994761727, 140194994765823,
+STORE, 140194994757632, 140194994761727,
+STORE, 140194994761728, 140194994765823,
+ERASE, 140194994728960, 140194994757631,
+STORE, 94809124286464, 94809124421631,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726342660096, 140737488351231,
+SNULL, 140726342668287, 140737488351231,
+STORE, 140726342660096, 140726342668287,
+STORE, 140726342529024, 140726342668287,
+STORE, 94140331462656, 94140333686783,
+SNULL, 94140331573247, 94140333686783,
+STORE, 94140331462656, 94140331573247,
+STORE, 94140331573248, 94140333686783,
+ERASE, 94140331573248, 94140333686783,
+STORE, 94140333666304, 94140333678591,
+STORE, 94140333678592, 94140333686783,
+STORE, 140714077208576, 140714079461375,
+SNULL, 140714077351935, 140714079461375,
+STORE, 140714077208576, 140714077351935,
+STORE, 140714077351936, 140714079461375,
+ERASE, 140714077351936, 140714079461375,
+STORE, 140714079449088, 140714079457279,
+STORE, 140714079457280, 140714079461375,
+STORE, 140726343933952, 140726343938047,
+STORE, 140726343921664, 140726343933951,
+STORE, 140714079420416, 140714079449087,
+STORE, 140714079412224, 140714079420415,
+STORE, 140714073411584, 140714077208575,
+SNULL, 140714073411584, 140714075070463,
+STORE, 140714075070464, 140714077208575,
+STORE, 140714073411584, 140714075070463,
+SNULL, 140714077167615, 140714077208575,
+STORE, 140714075070464, 140714077167615,
+STORE, 140714077167616, 140714077208575,
+SNULL, 140714077167616, 140714077192191,
+STORE, 140714077192192, 140714077208575,
+STORE, 140714077167616, 140714077192191,
+ERASE, 140714077167616, 140714077192191,
+STORE, 140714077167616, 140714077192191,
+ERASE, 140714077192192, 140714077208575,
+STORE, 140714077192192, 140714077208575,
+SNULL, 140714077183999, 140714077192191,
+STORE, 140714077167616, 140714077183999,
+STORE, 140714077184000, 140714077192191,
+SNULL, 94140333674495, 94140333678591,
+STORE, 94140333666304, 94140333674495,
+STORE, 94140333674496, 94140333678591,
+SNULL, 140714079453183, 140714079457279,
+STORE, 140714079449088, 140714079453183,
+STORE, 140714079453184, 140714079457279,
+ERASE, 140714079420416, 140714079449087,
+STORE, 94140341432320, 94140341567487,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431539601407,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725843607552, 140737488351231,
+SNULL, 140725843615743, 140737488351231,
+STORE, 140725843607552, 140725843615743,
+STORE, 140725843476480, 140725843615743,
+STORE, 94889043505152, 94889045839871,
+SNULL, 94889043718143, 94889045839871,
+STORE, 94889043505152, 94889043718143,
+STORE, 94889043718144, 94889045839871,
+ERASE, 94889043718144, 94889045839871,
+STORE, 94889045815296, 94889045827583,
+STORE, 94889045827584, 94889045839871,
+STORE, 140250965946368, 140250968199167,
+SNULL, 140250966089727, 140250968199167,
+STORE, 140250965946368, 140250966089727,
+STORE, 140250966089728, 140250968199167,
+ERASE, 140250966089728, 140250968199167,
+STORE, 140250968186880, 140250968195071,
+STORE, 140250968195072, 140250968199167,
+STORE, 140725844500480, 140725844504575,
+STORE, 140725844488192, 140725844500479,
+STORE, 140250968158208, 140250968186879,
+STORE, 140250968150016, 140250968158207,
+STORE, 140250963832832, 140250965946367,
+SNULL, 140250963832832, 140250963845119,
+STORE, 140250963845120, 140250965946367,
+STORE, 140250963832832, 140250963845119,
+SNULL, 140250965938175, 140250965946367,
+STORE, 140250963845120, 140250965938175,
+STORE, 140250965938176, 140250965946367,
+ERASE, 140250965938176, 140250965946367,
+STORE, 140250965938176, 140250965946367,
+STORE, 140250960035840, 140250963832831,
+SNULL, 140250960035840, 140250961694719,
+STORE, 140250961694720, 140250963832831,
+STORE, 140250960035840, 140250961694719,
+SNULL, 140250963791871, 140250963832831,
+STORE, 140250961694720, 140250963791871,
+STORE, 140250963791872, 140250963832831,
+SNULL, 140250963791872, 140250963816447,
+STORE, 140250963816448, 140250963832831,
+STORE, 140250963791872, 140250963816447,
+ERASE, 140250963791872, 140250963816447,
+STORE, 140250963791872, 140250963816447,
+ERASE, 140250963816448, 140250963832831,
+STORE, 140250963816448, 140250963832831,
+STORE, 140250968141824, 140250968158207,
+SNULL, 140250963808255, 140250963816447,
+STORE, 140250963791872, 140250963808255,
+STORE, 140250963808256, 140250963816447,
+SNULL, 140250965942271, 140250965946367,
+STORE, 140250965938176, 140250965942271,
+STORE, 140250965942272, 140250965946367,
+SNULL, 94889045819391, 94889045827583,
+STORE, 94889045815296, 94889045819391,
+STORE, 94889045819392, 94889045827583,
+SNULL, 140250968190975, 140250968195071,
+STORE, 140250968186880, 140250968190975,
+STORE, 140250968190976, 140250968195071,
+ERASE, 140250968158208, 140250968186879,
+STORE, 94889052213248, 94889052348415,
+STORE, 140250966458368, 140250968141823,
+STORE, 94889052213248, 94889052483583,
+STORE, 94889052213248, 94889052618751,
+STORE, 94170851819520, 94170852032511,
+STORE, 94170854129664, 94170854133759,
+STORE, 94170854133760, 94170854141951,
+STORE, 94170854141952, 94170854154239,
+STORE, 94170866515968, 94170867740671,
+STORE, 140062030422016, 140062032080895,
+STORE, 140062032080896, 140062034178047,
+STORE, 140062034178048, 140062034194431,
+STORE, 140062034194432, 140062034202623,
+STORE, 140062034202624, 140062034219007,
+STORE, 140062034219008, 140062034231295,
+STORE, 140062034231296, 140062036324351,
+STORE, 140062036324352, 140062036328447,
+STORE, 140062036328448, 140062036332543,
+STORE, 140062036332544, 140062036475903,
+STORE, 140062036844544, 140062038527999,
+STORE, 140062038528000, 140062038544383,
+STORE, 140062038573056, 140062038577151,
+STORE, 140062038577152, 140062038581247,
+STORE, 140062038581248, 140062038585343,
+STORE, 140736210550784, 140736210690047,
+STORE, 140736210759680, 140736210771967,
+STORE, 140736210771968, 140736210776063,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724272365568, 140737488351231,
+SNULL, 140724272373759, 140737488351231,
+STORE, 140724272365568, 140724272373759,
+STORE, 140724272234496, 140724272373759,
+STORE, 94607711965184, 94607714189311,
+SNULL, 94607712075775, 94607714189311,
+STORE, 94607711965184, 94607712075775,
+STORE, 94607712075776, 94607714189311,
+ERASE, 94607712075776, 94607714189311,
+STORE, 94607714168832, 94607714181119,
+STORE, 94607714181120, 94607714189311,
+STORE, 140054949253120, 140054951505919,
+SNULL, 140054949396479, 140054951505919,
+STORE, 140054949253120, 140054949396479,
+STORE, 140054949396480, 140054951505919,
+ERASE, 140054949396480, 140054951505919,
+STORE, 140054951493632, 140054951501823,
+STORE, 140054951501824, 140054951505919,
+STORE, 140724272992256, 140724272996351,
+STORE, 140724272979968, 140724272992255,
+STORE, 140054951464960, 140054951493631,
+STORE, 140054951456768, 140054951464959,
+STORE, 140054945456128, 140054949253119,
+SNULL, 140054945456128, 140054947115007,
+STORE, 140054947115008, 140054949253119,
+STORE, 140054945456128, 140054947115007,
+SNULL, 140054949212159, 140054949253119,
+STORE, 140054947115008, 140054949212159,
+STORE, 140054949212160, 140054949253119,
+SNULL, 140054949212160, 140054949236735,
+STORE, 140054949236736, 140054949253119,
+STORE, 140054949212160, 140054949236735,
+ERASE, 140054949212160, 140054949236735,
+STORE, 140054949212160, 140054949236735,
+ERASE, 140054949236736, 140054949253119,
+STORE, 140054949236736, 140054949253119,
+SNULL, 140054949228543, 140054949236735,
+STORE, 140054949212160, 140054949228543,
+STORE, 140054949228544, 140054949236735,
+SNULL, 94607714177023, 94607714181119,
+STORE, 94607714168832, 94607714177023,
+STORE, 94607714177024, 94607714181119,
+SNULL, 140054951497727, 140054951501823,
+STORE, 140054951493632, 140054951497727,
+STORE, 140054951497728, 140054951501823,
+ERASE, 140054951464960, 140054951493631,
+STORE, 94607733374976, 94607733510143,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733586923520, 140737488351231,
+SNULL, 140733586931711, 140737488351231,
+STORE, 140733586923520, 140733586931711,
+STORE, 140733586792448, 140733586931711,
+STORE, 93901634904064, 93901637128191,
+SNULL, 93901635014655, 93901637128191,
+STORE, 93901634904064, 93901635014655,
+STORE, 93901635014656, 93901637128191,
+ERASE, 93901635014656, 93901637128191,
+STORE, 93901637107712, 93901637119999,
+STORE, 93901637120000, 93901637128191,
+STORE, 140086104784896, 140086107037695,
+SNULL, 140086104928255, 140086107037695,
+STORE, 140086104784896, 140086104928255,
+STORE, 140086104928256, 140086107037695,
+ERASE, 140086104928256, 140086107037695,
+STORE, 140086107025408, 140086107033599,
+STORE, 140086107033600, 140086107037695,
+STORE, 140733587263488, 140733587267583,
+STORE, 140733587251200, 140733587263487,
+STORE, 140086106996736, 140086107025407,
+STORE, 140086106988544, 140086106996735,
+STORE, 140086100987904, 140086104784895,
+SNULL, 140086100987904, 140086102646783,
+STORE, 140086102646784, 140086104784895,
+STORE, 140086100987904, 140086102646783,
+SNULL, 140086104743935, 140086104784895,
+STORE, 140086102646784, 140086104743935,
+STORE, 140086104743936, 140086104784895,
+SNULL, 140086104743936, 140086104768511,
+STORE, 140086104768512, 140086104784895,
+STORE, 140086104743936, 140086104768511,
+ERASE, 140086104743936, 140086104768511,
+STORE, 140086104743936, 140086104768511,
+ERASE, 140086104768512, 140086104784895,
+STORE, 140086104768512, 140086104784895,
+SNULL, 140086104760319, 140086104768511,
+STORE, 140086104743936, 140086104760319,
+STORE, 140086104760320, 140086104768511,
+SNULL, 93901637115903, 93901637119999,
+STORE, 93901637107712, 93901637115903,
+STORE, 93901637115904, 93901637119999,
+SNULL, 140086107029503, 140086107033599,
+STORE, 140086107025408, 140086107029503,
+STORE, 140086107029504, 140086107033599,
+ERASE, 140086106996736, 140086107025407,
+STORE, 93901662715904, 93901662851071,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723365613568, 140737488351231,
+SNULL, 140723365621759, 140737488351231,
+STORE, 140723365613568, 140723365621759,
+STORE, 140723365482496, 140723365621759,
+STORE, 94759193546752, 94759195770879,
+SNULL, 94759193657343, 94759195770879,
+STORE, 94759193546752, 94759193657343,
+STORE, 94759193657344, 94759195770879,
+ERASE, 94759193657344, 94759195770879,
+STORE, 94759195750400, 94759195762687,
+STORE, 94759195762688, 94759195770879,
+STORE, 140607636246528, 140607638499327,
+SNULL, 140607636389887, 140607638499327,
+STORE, 140607636246528, 140607636389887,
+STORE, 140607636389888, 140607638499327,
+ERASE, 140607636389888, 140607638499327,
+STORE, 140607638487040, 140607638495231,
+STORE, 140607638495232, 140607638499327,
+STORE, 140723365900288, 140723365904383,
+STORE, 140723365888000, 140723365900287,
+STORE, 140607638458368, 140607638487039,
+STORE, 140607638450176, 140607638458367,
+STORE, 140607632449536, 140607636246527,
+SNULL, 140607632449536, 140607634108415,
+STORE, 140607634108416, 140607636246527,
+STORE, 140607632449536, 140607634108415,
+SNULL, 140607636205567, 140607636246527,
+STORE, 140607634108416, 140607636205567,
+STORE, 140607636205568, 140607636246527,
+SNULL, 140607636205568, 140607636230143,
+STORE, 140607636230144, 140607636246527,
+STORE, 140607636205568, 140607636230143,
+ERASE, 140607636205568, 140607636230143,
+STORE, 140607636205568, 140607636230143,
+ERASE, 140607636230144, 140607636246527,
+STORE, 140607636230144, 140607636246527,
+SNULL, 140607636221951, 140607636230143,
+STORE, 140607636205568, 140607636221951,
+STORE, 140607636221952, 140607636230143,
+SNULL, 94759195758591, 94759195762687,
+STORE, 94759195750400, 94759195758591,
+STORE, 94759195758592, 94759195762687,
+SNULL, 140607638491135, 140607638495231,
+STORE, 140607638487040, 140607638491135,
+STORE, 140607638491136, 140607638495231,
+ERASE, 140607638458368, 140607638487039,
+STORE, 94759204995072, 94759205130239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732503789568, 140737488351231,
+SNULL, 140732503797759, 140737488351231,
+STORE, 140732503789568, 140732503797759,
+STORE, 140732503658496, 140732503797759,
+STORE, 94077792956416, 94077795180543,
+SNULL, 94077793067007, 94077795180543,
+STORE, 94077792956416, 94077793067007,
+STORE, 94077793067008, 94077795180543,
+ERASE, 94077793067008, 94077795180543,
+STORE, 94077795160064, 94077795172351,
+STORE, 94077795172352, 94077795180543,
+STORE, 140359874252800, 140359876505599,
+SNULL, 140359874396159, 140359876505599,
+STORE, 140359874252800, 140359874396159,
+STORE, 140359874396160, 140359876505599,
+ERASE, 140359874396160, 140359876505599,
+STORE, 140359876493312, 140359876501503,
+STORE, 140359876501504, 140359876505599,
+STORE, 140732504465408, 140732504469503,
+STORE, 140732504453120, 140732504465407,
+STORE, 140359876464640, 140359876493311,
+STORE, 140359876456448, 140359876464639,
+STORE, 140359870455808, 140359874252799,
+SNULL, 140359870455808, 140359872114687,
+STORE, 140359872114688, 140359874252799,
+STORE, 140359870455808, 140359872114687,
+SNULL, 140359874211839, 140359874252799,
+STORE, 140359872114688, 140359874211839,
+STORE, 140359874211840, 140359874252799,
+SNULL, 140359874211840, 140359874236415,
+STORE, 140359874236416, 140359874252799,
+STORE, 140359874211840, 140359874236415,
+ERASE, 140359874211840, 140359874236415,
+STORE, 140359874211840, 140359874236415,
+ERASE, 140359874236416, 140359874252799,
+STORE, 140359874236416, 140359874252799,
+SNULL, 140359874228223, 140359874236415,
+STORE, 140359874211840, 140359874228223,
+STORE, 140359874228224, 140359874236415,
+SNULL, 94077795168255, 94077795172351,
+STORE, 94077795160064, 94077795168255,
+STORE, 94077795168256, 94077795172351,
+SNULL, 140359876497407, 140359876501503,
+STORE, 140359876493312, 140359876497407,
+STORE, 140359876497408, 140359876501503,
+ERASE, 140359876464640, 140359876493311,
+STORE, 94077808717824, 94077808852991,
+STORE, 94549486252032, 94549486465023,
+STORE, 94549488562176, 94549488566271,
+STORE, 94549488566272, 94549488574463,
+STORE, 94549488574464, 94549488586751,
+STORE, 94549503492096, 94549506121727,
+STORE, 140085800894464, 140085802553343,
+STORE, 140085802553344, 140085804650495,
+STORE, 140085804650496, 140085804666879,
+STORE, 140085804666880, 140085804675071,
+STORE, 140085804675072, 140085804691455,
+STORE, 140085804691456, 140085804703743,
+STORE, 140085804703744, 140085806796799,
+STORE, 140085806796800, 140085806800895,
+STORE, 140085806800896, 140085806804991,
+STORE, 140085806804992, 140085806948351,
+STORE, 140085807316992, 140085809000447,
+STORE, 140085809000448, 140085809016831,
+STORE, 140085809045504, 140085809049599,
+STORE, 140085809049600, 140085809053695,
+STORE, 140085809053696, 140085809057791,
+STORE, 140731810545664, 140731810684927,
+STORE, 140731810967552, 140731810979839,
+STORE, 140731810979840, 140731810983935,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724752330752, 140737488351231,
+SNULL, 140724752338943, 140737488351231,
+STORE, 140724752330752, 140724752338943,
+STORE, 140724752199680, 140724752338943,
+STORE, 94656357539840, 94656359874559,
+SNULL, 94656357752831, 94656359874559,
+STORE, 94656357539840, 94656357752831,
+STORE, 94656357752832, 94656359874559,
+ERASE, 94656357752832, 94656359874559,
+STORE, 94656359849984, 94656359862271,
+STORE, 94656359862272, 94656359874559,
+STORE, 139632585203712, 139632587456511,
+SNULL, 139632585347071, 139632587456511,
+STORE, 139632585203712, 139632585347071,
+STORE, 139632585347072, 139632587456511,
+ERASE, 139632585347072, 139632587456511,
+STORE, 139632587444224, 139632587452415,
+STORE, 139632587452416, 139632587456511,
+STORE, 139632587440128, 139632587444223,
+STORE, 139632587427840, 139632587440127,
+STORE, 139632587399168, 139632587427839,
+STORE, 139632587390976, 139632587399167,
+STORE, 139632583090176, 139632585203711,
+SNULL, 139632583090176, 139632583102463,
+STORE, 139632583102464, 139632585203711,
+STORE, 139632583090176, 139632583102463,
+SNULL, 139632585195519, 139632585203711,
+STORE, 139632583102464, 139632585195519,
+STORE, 139632585195520, 139632585203711,
+ERASE, 139632585195520, 139632585203711,
+STORE, 139632585195520, 139632585203711,
+STORE, 139632579293184, 139632583090175,
+SNULL, 139632579293184, 139632580952063,
+STORE, 139632580952064, 139632583090175,
+STORE, 139632579293184, 139632580952063,
+SNULL, 139632583049215, 139632583090175,
+STORE, 139632580952064, 139632583049215,
+STORE, 139632583049216, 139632583090175,
+SNULL, 139632583049216, 139632583073791,
+STORE, 139632583073792, 139632583090175,
+STORE, 139632583049216, 139632583073791,
+ERASE, 139632583049216, 139632583073791,
+STORE, 139632583049216, 139632583073791,
+ERASE, 139632583073792, 139632583090175,
+STORE, 139632583073792, 139632583090175,
+STORE, 139632587382784, 139632587399167,
+SNULL, 139632583065599, 139632583073791,
+STORE, 139632583049216, 139632583065599,
+STORE, 139632583065600, 139632583073791,
+SNULL, 139632585199615, 139632585203711,
+STORE, 139632585195520, 139632585199615,
+STORE, 139632585199616, 139632585203711,
+SNULL, 94656359854079, 94656359862271,
+STORE, 94656359849984, 94656359854079,
+STORE, 94656359854080, 94656359862271,
+SNULL, 139632587448319, 139632587452415,
+STORE, 139632587444224, 139632587448319,
+STORE, 139632587448320, 139632587452415,
+ERASE, 139632587399168, 139632587427839,
+STORE, 94656378912768, 94656379047935,
+STORE, 139632585699328, 139632587382783,
+STORE, 94656378912768, 94656379183103,
+STORE, 94656378912768, 94656379318271,
+STORE, 94656378912768, 94656379494399,
+SNULL, 94656379469823, 94656379494399,
+STORE, 94656378912768, 94656379469823,
+STORE, 94656379469824, 94656379494399,
+ERASE, 94656379469824, 94656379494399,
+STORE, 94656378912768, 94656379621375,
+STORE, 94656378912768, 94656379756543,
+STORE, 94656378912768, 94656379912191,
+STORE, 94656378912768, 94656380055551,
+STORE, 94656378912768, 94656380190719,
+STORE, 94656378912768, 94656380338175,
+SNULL, 94656380313599, 94656380338175,
+STORE, 94656378912768, 94656380313599,
+STORE, 94656380313600, 94656380338175,
+ERASE, 94656380313600, 94656380338175,
+STORE, 94656378912768, 94656380448767,
+SNULL, 94656380432383, 94656380448767,
+STORE, 94656378912768, 94656380432383,
+STORE, 94656380432384, 94656380448767,
+ERASE, 94656380432384, 94656380448767,
+STORE, 94656378912768, 94656380567551,
+STORE, 94656378912768, 94656380719103,
+STORE, 94656378912768, 94656380858367,
+STORE, 94656378912768, 94656380997631,
+STORE, 94656378912768, 94656381132799,
+SNULL, 94656381124607, 94656381132799,
+STORE, 94656378912768, 94656381124607,
+STORE, 94656381124608, 94656381132799,
+ERASE, 94656381124608, 94656381132799,
+STORE, 94656378912768, 94656381276159,
+STORE, 94656378912768, 94656381427711,
+STORE, 94604087611392, 94604087824383,
+STORE, 94604089921536, 94604089925631,
+STORE, 94604089925632, 94604089933823,
+STORE, 94604089933824, 94604089946111,
+STORE, 94604105125888, 94604106424319,
+STORE, 140454937694208, 140454939353087,
+STORE, 140454939353088, 140454941450239,
+STORE, 140454941450240, 140454941466623,
+STORE, 140454941466624, 140454941474815,
+STORE, 140454941474816, 140454941491199,
+STORE, 140454941491200, 140454941503487,
+STORE, 140454941503488, 140454943596543,
+STORE, 140454943596544, 140454943600639,
+STORE, 140454943600640, 140454943604735,
+STORE, 140454943604736, 140454943748095,
+STORE, 140454944116736, 140454945800191,
+STORE, 140454945800192, 140454945816575,
+STORE, 140454945845248, 140454945849343,
+STORE, 140454945849344, 140454945853439,
+STORE, 140454945853440, 140454945857535,
+STORE, 140728438214656, 140728438353919,
+STORE, 140728439095296, 140728439107583,
+STORE, 140728439107584, 140728439111679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727821099008, 140737488351231,
+SNULL, 140727821107199, 140737488351231,
+STORE, 140727821099008, 140727821107199,
+STORE, 140727820967936, 140727821107199,
+STORE, 94088457240576, 94088459575295,
+SNULL, 94088457453567, 94088459575295,
+STORE, 94088457240576, 94088457453567,
+STORE, 94088457453568, 94088459575295,
+ERASE, 94088457453568, 94088459575295,
+STORE, 94088459550720, 94088459563007,
+STORE, 94088459563008, 94088459575295,
+STORE, 140234378989568, 140234381242367,
+SNULL, 140234379132927, 140234381242367,
+STORE, 140234378989568, 140234379132927,
+STORE, 140234379132928, 140234381242367,
+ERASE, 140234379132928, 140234381242367,
+STORE, 140234381230080, 140234381238271,
+STORE, 140234381238272, 140234381242367,
+STORE, 140727822077952, 140727822082047,
+STORE, 140727822065664, 140727822077951,
+STORE, 140234381201408, 140234381230079,
+STORE, 140234381193216, 140234381201407,
+STORE, 140234376876032, 140234378989567,
+SNULL, 140234376876032, 140234376888319,
+STORE, 140234376888320, 140234378989567,
+STORE, 140234376876032, 140234376888319,
+SNULL, 140234378981375, 140234378989567,
+STORE, 140234376888320, 140234378981375,
+STORE, 140234378981376, 140234378989567,
+ERASE, 140234378981376, 140234378989567,
+STORE, 140234378981376, 140234378989567,
+STORE, 140234373079040, 140234376876031,
+SNULL, 140234373079040, 140234374737919,
+STORE, 140234374737920, 140234376876031,
+STORE, 140234373079040, 140234374737919,
+SNULL, 140234376835071, 140234376876031,
+STORE, 140234374737920, 140234376835071,
+STORE, 140234376835072, 140234376876031,
+SNULL, 140234376835072, 140234376859647,
+STORE, 140234376859648, 140234376876031,
+STORE, 140234376835072, 140234376859647,
+ERASE, 140234376835072, 140234376859647,
+STORE, 140234376835072, 140234376859647,
+ERASE, 140234376859648, 140234376876031,
+STORE, 140234376859648, 140234376876031,
+STORE, 140234381185024, 140234381201407,
+SNULL, 140234376851455, 140234376859647,
+STORE, 140234376835072, 140234376851455,
+STORE, 140234376851456, 140234376859647,
+SNULL, 140234378985471, 140234378989567,
+STORE, 140234378981376, 140234378985471,
+STORE, 140234378985472, 140234378989567,
+SNULL, 94088459554815, 94088459563007,
+STORE, 94088459550720, 94088459554815,
+STORE, 94088459554816, 94088459563007,
+SNULL, 140234381234175, 140234381238271,
+STORE, 140234381230080, 140234381234175,
+STORE, 140234381234176, 140234381238271,
+ERASE, 140234381201408, 140234381230079,
+STORE, 94088468852736, 94088468987903,
+STORE, 140234379501568, 140234381185023,
+STORE, 94088468852736, 94088469123071,
+STORE, 94088468852736, 94088469258239,
+STORE, 94110050402304, 94110050615295,
+STORE, 94110052712448, 94110052716543,
+STORE, 94110052716544, 94110052724735,
+STORE, 94110052724736, 94110052737023,
+STORE, 94110061875200, 94110062415871,
+STORE, 140139439357952, 140139441016831,
+STORE, 140139441016832, 140139443113983,
+STORE, 140139443113984, 140139443130367,
+STORE, 140139443130368, 140139443138559,
+STORE, 140139443138560, 140139443154943,
+STORE, 140139443154944, 140139443167231,
+STORE, 140139443167232, 140139445260287,
+STORE, 140139445260288, 140139445264383,
+STORE, 140139445264384, 140139445268479,
+STORE, 140139445268480, 140139445411839,
+STORE, 140139445780480, 140139447463935,
+STORE, 140139447463936, 140139447480319,
+STORE, 140139447508992, 140139447513087,
+STORE, 140139447513088, 140139447517183,
+STORE, 140139447517184, 140139447521279,
+STORE, 140731901427712, 140731901566975,
+STORE, 140731902259200, 140731902271487,
+STORE, 140731902271488, 140731902275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727282622464, 140737488351231,
+SNULL, 140727282630655, 140737488351231,
+STORE, 140727282622464, 140727282630655,
+STORE, 140727282491392, 140727282630655,
+STORE, 94266649866240, 94266652200959,
+SNULL, 94266650079231, 94266652200959,
+STORE, 94266649866240, 94266650079231,
+STORE, 94266650079232, 94266652200959,
+ERASE, 94266650079232, 94266652200959,
+STORE, 94266652176384, 94266652188671,
+STORE, 94266652188672, 94266652200959,
+STORE, 139888497991680, 139888500244479,
+SNULL, 139888498135039, 139888500244479,
+STORE, 139888497991680, 139888498135039,
+STORE, 139888498135040, 139888500244479,
+ERASE, 139888498135040, 139888500244479,
+STORE, 139888500232192, 139888500240383,
+STORE, 139888500240384, 139888500244479,
+STORE, 140727283113984, 140727283118079,
+STORE, 140727283101696, 140727283113983,
+STORE, 139888500203520, 139888500232191,
+STORE, 139888500195328, 139888500203519,
+STORE, 139888495878144, 139888497991679,
+SNULL, 139888495878144, 139888495890431,
+STORE, 139888495890432, 139888497991679,
+STORE, 139888495878144, 139888495890431,
+SNULL, 139888497983487, 139888497991679,
+STORE, 139888495890432, 139888497983487,
+STORE, 139888497983488, 139888497991679,
+ERASE, 139888497983488, 139888497991679,
+STORE, 139888497983488, 139888497991679,
+STORE, 139888492081152, 139888495878143,
+SNULL, 139888492081152, 139888493740031,
+STORE, 139888493740032, 139888495878143,
+STORE, 139888492081152, 139888493740031,
+SNULL, 139888495837183, 139888495878143,
+STORE, 139888493740032, 139888495837183,
+STORE, 139888495837184, 139888495878143,
+SNULL, 139888495837184, 139888495861759,
+STORE, 139888495861760, 139888495878143,
+STORE, 139888495837184, 139888495861759,
+ERASE, 139888495837184, 139888495861759,
+STORE, 139888495837184, 139888495861759,
+ERASE, 139888495861760, 139888495878143,
+STORE, 139888495861760, 139888495878143,
+STORE, 139888500187136, 139888500203519,
+SNULL, 139888495853567, 139888495861759,
+STORE, 139888495837184, 139888495853567,
+STORE, 139888495853568, 139888495861759,
+SNULL, 139888497987583, 139888497991679,
+STORE, 139888497983488, 139888497987583,
+STORE, 139888497987584, 139888497991679,
+SNULL, 94266652180479, 94266652188671,
+STORE, 94266652176384, 94266652180479,
+STORE, 94266652180480, 94266652188671,
+SNULL, 139888500236287, 139888500240383,
+STORE, 139888500232192, 139888500236287,
+STORE, 139888500236288, 139888500240383,
+ERASE, 139888500203520, 139888500232191,
+STORE, 94266678542336, 94266678677503,
+STORE, 139888498503680, 139888500187135,
+STORE, 94266678542336, 94266678812671,
+STORE, 94266678542336, 94266678947839,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722507702272, 140737488351231,
+SNULL, 140722507710463, 140737488351231,
+STORE, 140722507702272, 140722507710463,
+STORE, 140722507571200, 140722507710463,
+STORE, 94313981394944, 94313983729663,
+SNULL, 94313981607935, 94313983729663,
+STORE, 94313981394944, 94313981607935,
+STORE, 94313981607936, 94313983729663,
+ERASE, 94313981607936, 94313983729663,
+STORE, 94313983705088, 94313983717375,
+STORE, 94313983717376, 94313983729663,
+STORE, 140456286076928, 140456288329727,
+SNULL, 140456286220287, 140456288329727,
+STORE, 140456286076928, 140456286220287,
+STORE, 140456286220288, 140456288329727,
+ERASE, 140456286220288, 140456288329727,
+STORE, 140456288317440, 140456288325631,
+STORE, 140456288325632, 140456288329727,
+STORE, 140722507997184, 140722508001279,
+STORE, 140722507984896, 140722507997183,
+STORE, 140456288288768, 140456288317439,
+STORE, 140456288280576, 140456288288767,
+STORE, 140456283963392, 140456286076927,
+SNULL, 140456283963392, 140456283975679,
+STORE, 140456283975680, 140456286076927,
+STORE, 140456283963392, 140456283975679,
+SNULL, 140456286068735, 140456286076927,
+STORE, 140456283975680, 140456286068735,
+STORE, 140456286068736, 140456286076927,
+ERASE, 140456286068736, 140456286076927,
+STORE, 140456286068736, 140456286076927,
+STORE, 140456280166400, 140456283963391,
+SNULL, 140456280166400, 140456281825279,
+STORE, 140456281825280, 140456283963391,
+STORE, 140456280166400, 140456281825279,
+SNULL, 140456283922431, 140456283963391,
+STORE, 140456281825280, 140456283922431,
+STORE, 140456283922432, 140456283963391,
+SNULL, 140456283922432, 140456283947007,
+STORE, 140456283947008, 140456283963391,
+STORE, 140456283922432, 140456283947007,
+ERASE, 140456283922432, 140456283947007,
+STORE, 140456283922432, 140456283947007,
+ERASE, 140456283947008, 140456283963391,
+STORE, 140456283947008, 140456283963391,
+STORE, 140456288272384, 140456288288767,
+SNULL, 140456283938815, 140456283947007,
+STORE, 140456283922432, 140456283938815,
+STORE, 140456283938816, 140456283947007,
+SNULL, 140456286072831, 140456286076927,
+STORE, 140456286068736, 140456286072831,
+STORE, 140456286072832, 140456286076927,
+SNULL, 94313983709183, 94313983717375,
+STORE, 94313983705088, 94313983709183,
+STORE, 94313983709184, 94313983717375,
+SNULL, 140456288321535, 140456288325631,
+STORE, 140456288317440, 140456288321535,
+STORE, 140456288321536, 140456288325631,
+ERASE, 140456288288768, 140456288317439,
+STORE, 94314006716416, 94314006851583,
+STORE, 140456286588928, 140456288272383,
+STORE, 94314006716416, 94314006986751,
+STORE, 94314006716416, 94314007121919,
+STORE, 93948644454400, 93948644667391,
+STORE, 93948646764544, 93948646768639,
+STORE, 93948646768640, 93948646776831,
+STORE, 93948646776832, 93948646789119,
+STORE, 93948664999936, 93948667142143,
+STORE, 140187350659072, 140187352317951,
+STORE, 140187352317952, 140187354415103,
+STORE, 140187354415104, 140187354431487,
+STORE, 140187354431488, 140187354439679,
+STORE, 140187354439680, 140187354456063,
+STORE, 140187354456064, 140187354468351,
+STORE, 140187354468352, 140187356561407,
+STORE, 140187356561408, 140187356565503,
+STORE, 140187356565504, 140187356569599,
+STORE, 140187356569600, 140187356712959,
+STORE, 140187357081600, 140187358765055,
+STORE, 140187358765056, 140187358781439,
+STORE, 140187358810112, 140187358814207,
+STORE, 140187358814208, 140187358818303,
+STORE, 140187358818304, 140187358822399,
+STORE, 140730484518912, 140730484658175,
+STORE, 140730485690368, 140730485702655,
+STORE, 140730485702656, 140730485706751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721211551744, 140737488351231,
+SNULL, 140721211559935, 140737488351231,
+STORE, 140721211551744, 140721211559935,
+STORE, 140721211420672, 140721211559935,
+STORE, 94105221423104, 94105223757823,
+SNULL, 94105221636095, 94105223757823,
+STORE, 94105221423104, 94105221636095,
+STORE, 94105221636096, 94105223757823,
+ERASE, 94105221636096, 94105223757823,
+STORE, 94105223733248, 94105223745535,
+STORE, 94105223745536, 94105223757823,
+STORE, 140474453676032, 140474455928831,
+SNULL, 140474453819391, 140474455928831,
+STORE, 140474453676032, 140474453819391,
+STORE, 140474453819392, 140474455928831,
+ERASE, 140474453819392, 140474455928831,
+STORE, 140474455916544, 140474455924735,
+STORE, 140474455924736, 140474455928831,
+STORE, 140721211703296, 140721211707391,
+STORE, 140721211691008, 140721211703295,
+STORE, 140474455887872, 140474455916543,
+STORE, 140474455879680, 140474455887871,
+STORE, 140474451562496, 140474453676031,
+SNULL, 140474451562496, 140474451574783,
+STORE, 140474451574784, 140474453676031,
+STORE, 140474451562496, 140474451574783,
+SNULL, 140474453667839, 140474453676031,
+STORE, 140474451574784, 140474453667839,
+STORE, 140474453667840, 140474453676031,
+ERASE, 140474453667840, 140474453676031,
+STORE, 140474453667840, 140474453676031,
+STORE, 140474447765504, 140474451562495,
+SNULL, 140474447765504, 140474449424383,
+STORE, 140474449424384, 140474451562495,
+STORE, 140474447765504, 140474449424383,
+SNULL, 140474451521535, 140474451562495,
+STORE, 140474449424384, 140474451521535,
+STORE, 140474451521536, 140474451562495,
+SNULL, 140474451521536, 140474451546111,
+STORE, 140474451546112, 140474451562495,
+STORE, 140474451521536, 140474451546111,
+ERASE, 140474451521536, 140474451546111,
+STORE, 140474451521536, 140474451546111,
+ERASE, 140474451546112, 140474451562495,
+STORE, 140474451546112, 140474451562495,
+STORE, 140474455871488, 140474455887871,
+SNULL, 140474451537919, 140474451546111,
+STORE, 140474451521536, 140474451537919,
+STORE, 140474451537920, 140474451546111,
+SNULL, 140474453671935, 140474453676031,
+STORE, 140474453667840, 140474453671935,
+STORE, 140474453671936, 140474453676031,
+SNULL, 94105223737343, 94105223745535,
+STORE, 94105223733248, 94105223737343,
+STORE, 94105223737344, 94105223745535,
+SNULL, 140474455920639, 140474455924735,
+STORE, 140474455916544, 140474455920639,
+STORE, 140474455920640, 140474455924735,
+ERASE, 140474455887872, 140474455916543,
+STORE, 94105238712320, 94105238847487,
+STORE, 140474454188032, 140474455871487,
+STORE, 94105238712320, 94105238982655,
+STORE, 94105238712320, 94105239117823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732356354048, 140737488351231,
+SNULL, 140732356362239, 140737488351231,
+STORE, 140732356354048, 140732356362239,
+STORE, 140732356222976, 140732356362239,
+STORE, 94461165989888, 94461168324607,
+SNULL, 94461166202879, 94461168324607,
+STORE, 94461165989888, 94461166202879,
+STORE, 94461166202880, 94461168324607,
+ERASE, 94461166202880, 94461168324607,
+STORE, 94461168300032, 94461168312319,
+STORE, 94461168312320, 94461168324607,
+STORE, 140317255110656, 140317257363455,
+SNULL, 140317255254015, 140317257363455,
+STORE, 140317255110656, 140317255254015,
+STORE, 140317255254016, 140317257363455,
+ERASE, 140317255254016, 140317257363455,
+STORE, 140317257351168, 140317257359359,
+STORE, 140317257359360, 140317257363455,
+STORE, 140732356583424, 140732356587519,
+STORE, 140732356571136, 140732356583423,
+STORE, 140317257322496, 140317257351167,
+STORE, 140317257314304, 140317257322495,
+STORE, 140317252997120, 140317255110655,
+SNULL, 140317252997120, 140317253009407,
+STORE, 140317253009408, 140317255110655,
+STORE, 140317252997120, 140317253009407,
+SNULL, 140317255102463, 140317255110655,
+STORE, 140317253009408, 140317255102463,
+STORE, 140317255102464, 140317255110655,
+ERASE, 140317255102464, 140317255110655,
+STORE, 140317255102464, 140317255110655,
+STORE, 140317249200128, 140317252997119,
+SNULL, 140317249200128, 140317250859007,
+STORE, 140317250859008, 140317252997119,
+STORE, 140317249200128, 140317250859007,
+SNULL, 140317252956159, 140317252997119,
+STORE, 140317250859008, 140317252956159,
+STORE, 140317252956160, 140317252997119,
+SNULL, 140317252956160, 140317252980735,
+STORE, 140317252980736, 140317252997119,
+STORE, 140317252956160, 140317252980735,
+ERASE, 140317252956160, 140317252980735,
+STORE, 140317252956160, 140317252980735,
+ERASE, 140317252980736, 140317252997119,
+STORE, 140317252980736, 140317252997119,
+STORE, 140317257306112, 140317257322495,
+SNULL, 140317252972543, 140317252980735,
+STORE, 140317252956160, 140317252972543,
+STORE, 140317252972544, 140317252980735,
+SNULL, 140317255106559, 140317255110655,
+STORE, 140317255102464, 140317255106559,
+STORE, 140317255106560, 140317255110655,
+SNULL, 94461168304127, 94461168312319,
+STORE, 94461168300032, 94461168304127,
+STORE, 94461168304128, 94461168312319,
+SNULL, 140317257355263, 140317257359359,
+STORE, 140317257351168, 140317257355263,
+STORE, 140317257355264, 140317257359359,
+ERASE, 140317257322496, 140317257351167,
+STORE, 94461195268096, 94461195403263,
+STORE, 140317255622656, 140317257306111,
+STORE, 94461195268096, 94461195538431,
+STORE, 94461195268096, 94461195673599,
+STORE, 94110050402304, 94110050615295,
+STORE, 94110052712448, 94110052716543,
+STORE, 94110052716544, 94110052724735,
+STORE, 94110052724736, 94110052737023,
+STORE, 94110061875200, 94110062415871,
+STORE, 140139439357952, 140139441016831,
+STORE, 140139441016832, 140139443113983,
+STORE, 140139443113984, 140139443130367,
+STORE, 140139443130368, 140139443138559,
+STORE, 140139443138560, 140139443154943,
+STORE, 140139443154944, 140139443167231,
+STORE, 140139443167232, 140139445260287,
+STORE, 140139445260288, 140139445264383,
+STORE, 140139445264384, 140139445268479,
+STORE, 140139445268480, 140139445411839,
+STORE, 140139445780480, 140139447463935,
+STORE, 140139447463936, 140139447480319,
+STORE, 140139447508992, 140139447513087,
+STORE, 140139447513088, 140139447517183,
+STORE, 140139447517184, 140139447521279,
+STORE, 140731901427712, 140731901566975,
+STORE, 140731902259200, 140731902271487,
+STORE, 140731902271488, 140731902275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720941613056, 140737488351231,
+SNULL, 140720941621247, 140737488351231,
+STORE, 140720941613056, 140720941621247,
+STORE, 140720941481984, 140720941621247,
+STORE, 93902377721856, 93902379945983,
+SNULL, 93902377832447, 93902379945983,
+STORE, 93902377721856, 93902377832447,
+STORE, 93902377832448, 93902379945983,
+ERASE, 93902377832448, 93902379945983,
+STORE, 93902379925504, 93902379937791,
+STORE, 93902379937792, 93902379945983,
+STORE, 139836543635456, 139836545888255,
+SNULL, 139836543778815, 139836545888255,
+STORE, 139836543635456, 139836543778815,
+STORE, 139836543778816, 139836545888255,
+ERASE, 139836543778816, 139836545888255,
+STORE, 139836545875968, 139836545884159,
+STORE, 139836545884160, 139836545888255,
+STORE, 140720941711360, 140720941715455,
+STORE, 140720941699072, 140720941711359,
+STORE, 139836545847296, 139836545875967,
+STORE, 139836545839104, 139836545847295,
+STORE, 139836539838464, 139836543635455,
+SNULL, 139836539838464, 139836541497343,
+STORE, 139836541497344, 139836543635455,
+STORE, 139836539838464, 139836541497343,
+SNULL, 139836543594495, 139836543635455,
+STORE, 139836541497344, 139836543594495,
+STORE, 139836543594496, 139836543635455,
+SNULL, 139836543594496, 139836543619071,
+STORE, 139836543619072, 139836543635455,
+STORE, 139836543594496, 139836543619071,
+ERASE, 139836543594496, 139836543619071,
+STORE, 139836543594496, 139836543619071,
+ERASE, 139836543619072, 139836543635455,
+STORE, 139836543619072, 139836543635455,
+SNULL, 139836543610879, 139836543619071,
+STORE, 139836543594496, 139836543610879,
+STORE, 139836543610880, 139836543619071,
+SNULL, 93902379933695, 93902379937791,
+STORE, 93902379925504, 93902379933695,
+STORE, 93902379933696, 93902379937791,
+SNULL, 139836545880063, 139836545884159,
+STORE, 139836545875968, 139836545880063,
+STORE, 139836545880064, 139836545884159,
+ERASE, 139836545847296, 139836545875967,
+STORE, 93902396891136, 93902397026303,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736538206208, 140737488351231,
+SNULL, 140736538214399, 140737488351231,
+STORE, 140736538206208, 140736538214399,
+STORE, 140736538075136, 140736538214399,
+STORE, 94173471399936, 94173473734655,
+SNULL, 94173471612927, 94173473734655,
+STORE, 94173471399936, 94173471612927,
+STORE, 94173471612928, 94173473734655,
+ERASE, 94173471612928, 94173473734655,
+STORE, 94173473710080, 94173473722367,
+STORE, 94173473722368, 94173473734655,
+STORE, 140035513556992, 140035515809791,
+SNULL, 140035513700351, 140035515809791,
+STORE, 140035513556992, 140035513700351,
+STORE, 140035513700352, 140035515809791,
+ERASE, 140035513700352, 140035515809791,
+STORE, 140035515797504, 140035515805695,
+STORE, 140035515805696, 140035515809791,
+STORE, 140736538329088, 140736538333183,
+STORE, 140736538316800, 140736538329087,
+STORE, 140035515768832, 140035515797503,
+STORE, 140035515760640, 140035515768831,
+STORE, 140035511443456, 140035513556991,
+SNULL, 140035511443456, 140035511455743,
+STORE, 140035511455744, 140035513556991,
+STORE, 140035511443456, 140035511455743,
+SNULL, 140035513548799, 140035513556991,
+STORE, 140035511455744, 140035513548799,
+STORE, 140035513548800, 140035513556991,
+ERASE, 140035513548800, 140035513556991,
+STORE, 140035513548800, 140035513556991,
+STORE, 140035507646464, 140035511443455,
+SNULL, 140035507646464, 140035509305343,
+STORE, 140035509305344, 140035511443455,
+STORE, 140035507646464, 140035509305343,
+SNULL, 140035511402495, 140035511443455,
+STORE, 140035509305344, 140035511402495,
+STORE, 140035511402496, 140035511443455,
+SNULL, 140035511402496, 140035511427071,
+STORE, 140035511427072, 140035511443455,
+STORE, 140035511402496, 140035511427071,
+ERASE, 140035511402496, 140035511427071,
+STORE, 140035511402496, 140035511427071,
+ERASE, 140035511427072, 140035511443455,
+STORE, 140035511427072, 140035511443455,
+STORE, 140035515752448, 140035515768831,
+SNULL, 140035511418879, 140035511427071,
+STORE, 140035511402496, 140035511418879,
+STORE, 140035511418880, 140035511427071,
+SNULL, 140035513552895, 140035513556991,
+STORE, 140035513548800, 140035513552895,
+STORE, 140035513552896, 140035513556991,
+SNULL, 94173473714175, 94173473722367,
+STORE, 94173473710080, 94173473714175,
+STORE, 94173473714176, 94173473722367,
+SNULL, 140035515801599, 140035515805695,
+STORE, 140035515797504, 140035515801599,
+STORE, 140035515801600, 140035515805695,
+ERASE, 140035515768832, 140035515797503,
+STORE, 94173478645760, 94173478780927,
+STORE, 140035514068992, 140035515752447,
+STORE, 94173478645760, 94173478916095,
+STORE, 94173478645760, 94173479051263,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724216176640, 140737488351231,
+SNULL, 140724216184831, 140737488351231,
+STORE, 140724216176640, 140724216184831,
+STORE, 140724216045568, 140724216184831,
+STORE, 94870930628608, 94870932963327,
+SNULL, 94870930841599, 94870932963327,
+STORE, 94870930628608, 94870930841599,
+STORE, 94870930841600, 94870932963327,
+ERASE, 94870930841600, 94870932963327,
+STORE, 94870932938752, 94870932951039,
+STORE, 94870932951040, 94870932963327,
+STORE, 140453683736576, 140453685989375,
+SNULL, 140453683879935, 140453685989375,
+STORE, 140453683736576, 140453683879935,
+STORE, 140453683879936, 140453685989375,
+ERASE, 140453683879936, 140453685989375,
+STORE, 140453685977088, 140453685985279,
+STORE, 140453685985280, 140453685989375,
+STORE, 140724216832000, 140724216836095,
+STORE, 140724216819712, 140724216831999,
+STORE, 140453685948416, 140453685977087,
+STORE, 140453685940224, 140453685948415,
+STORE, 140453681623040, 140453683736575,
+SNULL, 140453681623040, 140453681635327,
+STORE, 140453681635328, 140453683736575,
+STORE, 140453681623040, 140453681635327,
+SNULL, 140453683728383, 140453683736575,
+STORE, 140453681635328, 140453683728383,
+STORE, 140453683728384, 140453683736575,
+ERASE, 140453683728384, 140453683736575,
+STORE, 140453683728384, 140453683736575,
+STORE, 140453677826048, 140453681623039,
+SNULL, 140453677826048, 140453679484927,
+STORE, 140453679484928, 140453681623039,
+STORE, 140453677826048, 140453679484927,
+SNULL, 140453681582079, 140453681623039,
+STORE, 140453679484928, 140453681582079,
+STORE, 140453681582080, 140453681623039,
+SNULL, 140453681582080, 140453681606655,
+STORE, 140453681606656, 140453681623039,
+STORE, 140453681582080, 140453681606655,
+ERASE, 140453681582080, 140453681606655,
+STORE, 140453681582080, 140453681606655,
+ERASE, 140453681606656, 140453681623039,
+STORE, 140453681606656, 140453681623039,
+STORE, 140453685932032, 140453685948415,
+SNULL, 140453681598463, 140453681606655,
+STORE, 140453681582080, 140453681598463,
+STORE, 140453681598464, 140453681606655,
+SNULL, 140453683732479, 140453683736575,
+STORE, 140453683728384, 140453683732479,
+STORE, 140453683732480, 140453683736575,
+SNULL, 94870932942847, 94870932951039,
+STORE, 94870932938752, 94870932942847,
+STORE, 94870932942848, 94870932951039,
+SNULL, 140453685981183, 140453685985279,
+STORE, 140453685977088, 140453685981183,
+STORE, 140453685981184, 140453685985279,
+ERASE, 140453685948416, 140453685977087,
+STORE, 94870940565504, 94870940700671,
+STORE, 140453684248576, 140453685932031,
+STORE, 94870940565504, 94870940835839,
+STORE, 94870940565504, 94870940971007,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731275661312, 140737488351231,
+SNULL, 140731275669503, 140737488351231,
+STORE, 140731275661312, 140731275669503,
+STORE, 140731275530240, 140731275669503,
+STORE, 94642788548608, 94642790883327,
+SNULL, 94642788761599, 94642790883327,
+STORE, 94642788548608, 94642788761599,
+STORE, 94642788761600, 94642790883327,
+ERASE, 94642788761600, 94642790883327,
+STORE, 94642790858752, 94642790871039,
+STORE, 94642790871040, 94642790883327,
+STORE, 140228458749952, 140228461002751,
+SNULL, 140228458893311, 140228461002751,
+STORE, 140228458749952, 140228458893311,
+STORE, 140228458893312, 140228461002751,
+ERASE, 140228458893312, 140228461002751,
+STORE, 140228460990464, 140228460998655,
+STORE, 140228460998656, 140228461002751,
+STORE, 140731276349440, 140731276353535,
+STORE, 140731276337152, 140731276349439,
+STORE, 140228460961792, 140228460990463,
+STORE, 140228460953600, 140228460961791,
+STORE, 140228456636416, 140228458749951,
+SNULL, 140228456636416, 140228456648703,
+STORE, 140228456648704, 140228458749951,
+STORE, 140228456636416, 140228456648703,
+SNULL, 140228458741759, 140228458749951,
+STORE, 140228456648704, 140228458741759,
+STORE, 140228458741760, 140228458749951,
+ERASE, 140228458741760, 140228458749951,
+STORE, 140228458741760, 140228458749951,
+STORE, 140228452839424, 140228456636415,
+SNULL, 140228452839424, 140228454498303,
+STORE, 140228454498304, 140228456636415,
+STORE, 140228452839424, 140228454498303,
+SNULL, 140228456595455, 140228456636415,
+STORE, 140228454498304, 140228456595455,
+STORE, 140228456595456, 140228456636415,
+SNULL, 140228456595456, 140228456620031,
+STORE, 140228456620032, 140228456636415,
+STORE, 140228456595456, 140228456620031,
+ERASE, 140228456595456, 140228456620031,
+STORE, 140228456595456, 140228456620031,
+ERASE, 140228456620032, 140228456636415,
+STORE, 140228456620032, 140228456636415,
+STORE, 140228460945408, 140228460961791,
+SNULL, 140228456611839, 140228456620031,
+STORE, 140228456595456, 140228456611839,
+STORE, 140228456611840, 140228456620031,
+SNULL, 140228458745855, 140228458749951,
+STORE, 140228458741760, 140228458745855,
+STORE, 140228458745856, 140228458749951,
+SNULL, 94642790862847, 94642790871039,
+STORE, 94642790858752, 94642790862847,
+STORE, 94642790862848, 94642790871039,
+SNULL, 140228460994559, 140228460998655,
+STORE, 140228460990464, 140228460994559,
+STORE, 140228460994560, 140228460998655,
+ERASE, 140228460961792, 140228460990463,
+STORE, 94642801549312, 94642801684479,
+STORE, 140228459261952, 140228460945407,
+STORE, 94642801549312, 94642801819647,
+STORE, 94642801549312, 94642801954815,
+STORE, 94604087611392, 94604087824383,
+STORE, 94604089921536, 94604089925631,
+STORE, 94604089925632, 94604089933823,
+STORE, 94604089933824, 94604089946111,
+STORE, 94604105125888, 94604106424319,
+STORE, 140454937694208, 140454939353087,
+STORE, 140454939353088, 140454941450239,
+STORE, 140454941450240, 140454941466623,
+STORE, 140454941466624, 140454941474815,
+STORE, 140454941474816, 140454941491199,
+STORE, 140454941491200, 140454941503487,
+STORE, 140454941503488, 140454943596543,
+STORE, 140454943596544, 140454943600639,
+STORE, 140454943600640, 140454943604735,
+STORE, 140454943604736, 140454943748095,
+STORE, 140454944116736, 140454945800191,
+STORE, 140454945800192, 140454945816575,
+STORE, 140454945845248, 140454945849343,
+STORE, 140454945849344, 140454945853439,
+STORE, 140454945853440, 140454945857535,
+STORE, 140728438214656, 140728438353919,
+STORE, 140728439095296, 140728439107583,
+STORE, 140728439107584, 140728439111679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721843453952, 140737488351231,
+SNULL, 140721843462143, 140737488351231,
+STORE, 140721843453952, 140721843462143,
+STORE, 140721843322880, 140721843462143,
+STORE, 94465962455040, 94465964789759,
+SNULL, 94465962668031, 94465964789759,
+STORE, 94465962455040, 94465962668031,
+STORE, 94465962668032, 94465964789759,
+ERASE, 94465962668032, 94465964789759,
+STORE, 94465964765184, 94465964777471,
+STORE, 94465964777472, 94465964789759,
+STORE, 139913488314368, 139913490567167,
+SNULL, 139913488457727, 139913490567167,
+STORE, 139913488314368, 139913488457727,
+STORE, 139913488457728, 139913490567167,
+ERASE, 139913488457728, 139913490567167,
+STORE, 139913490554880, 139913490563071,
+STORE, 139913490563072, 139913490567167,
+STORE, 140721843503104, 140721843507199,
+STORE, 140721843490816, 140721843503103,
+STORE, 139913490526208, 139913490554879,
+STORE, 139913490518016, 139913490526207,
+STORE, 139913486200832, 139913488314367,
+SNULL, 139913486200832, 139913486213119,
+STORE, 139913486213120, 139913488314367,
+STORE, 139913486200832, 139913486213119,
+SNULL, 139913488306175, 139913488314367,
+STORE, 139913486213120, 139913488306175,
+STORE, 139913488306176, 139913488314367,
+ERASE, 139913488306176, 139913488314367,
+STORE, 139913488306176, 139913488314367,
+STORE, 139913482403840, 139913486200831,
+SNULL, 139913482403840, 139913484062719,
+STORE, 139913484062720, 139913486200831,
+STORE, 139913482403840, 139913484062719,
+SNULL, 139913486159871, 139913486200831,
+STORE, 139913484062720, 139913486159871,
+STORE, 139913486159872, 139913486200831,
+SNULL, 139913486159872, 139913486184447,
+STORE, 139913486184448, 139913486200831,
+STORE, 139913486159872, 139913486184447,
+ERASE, 139913486159872, 139913486184447,
+STORE, 139913486159872, 139913486184447,
+ERASE, 139913486184448, 139913486200831,
+STORE, 139913486184448, 139913486200831,
+STORE, 139913490509824, 139913490526207,
+SNULL, 139913486176255, 139913486184447,
+STORE, 139913486159872, 139913486176255,
+STORE, 139913486176256, 139913486184447,
+SNULL, 139913488310271, 139913488314367,
+STORE, 139913488306176, 139913488310271,
+STORE, 139913488310272, 139913488314367,
+SNULL, 94465964769279, 94465964777471,
+STORE, 94465964765184, 94465964769279,
+STORE, 94465964769280, 94465964777471,
+SNULL, 139913490558975, 139913490563071,
+STORE, 139913490554880, 139913490558975,
+STORE, 139913490558976, 139913490563071,
+ERASE, 139913490526208, 139913490554879,
+STORE, 94465970024448, 94465970159615,
+STORE, 139913488826368, 139913490509823,
+STORE, 94465970024448, 94465970294783,
+STORE, 94465970024448, 94465970429951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720583307264, 140737488351231,
+SNULL, 140720583315455, 140737488351231,
+STORE, 140720583307264, 140720583315455,
+STORE, 140720583176192, 140720583315455,
+STORE, 94212322082816, 94212324417535,
+SNULL, 94212322295807, 94212324417535,
+STORE, 94212322082816, 94212322295807,
+STORE, 94212322295808, 94212324417535,
+ERASE, 94212322295808, 94212324417535,
+STORE, 94212324392960, 94212324405247,
+STORE, 94212324405248, 94212324417535,
+STORE, 139659688538112, 139659690790911,
+SNULL, 139659688681471, 139659690790911,
+STORE, 139659688538112, 139659688681471,
+STORE, 139659688681472, 139659690790911,
+ERASE, 139659688681472, 139659690790911,
+STORE, 139659690778624, 139659690786815,
+STORE, 139659690786816, 139659690790911,
+STORE, 140720584781824, 140720584785919,
+STORE, 140720584769536, 140720584781823,
+STORE, 139659690749952, 139659690778623,
+STORE, 139659690741760, 139659690749951,
+STORE, 139659686424576, 139659688538111,
+SNULL, 139659686424576, 139659686436863,
+STORE, 139659686436864, 139659688538111,
+STORE, 139659686424576, 139659686436863,
+SNULL, 139659688529919, 139659688538111,
+STORE, 139659686436864, 139659688529919,
+STORE, 139659688529920, 139659688538111,
+ERASE, 139659688529920, 139659688538111,
+STORE, 139659688529920, 139659688538111,
+STORE, 139659682627584, 139659686424575,
+SNULL, 139659682627584, 139659684286463,
+STORE, 139659684286464, 139659686424575,
+STORE, 139659682627584, 139659684286463,
+SNULL, 139659686383615, 139659686424575,
+STORE, 139659684286464, 139659686383615,
+STORE, 139659686383616, 139659686424575,
+SNULL, 139659686383616, 139659686408191,
+STORE, 139659686408192, 139659686424575,
+STORE, 139659686383616, 139659686408191,
+ERASE, 139659686383616, 139659686408191,
+STORE, 139659686383616, 139659686408191,
+ERASE, 139659686408192, 139659686424575,
+STORE, 139659686408192, 139659686424575,
+STORE, 139659690733568, 139659690749951,
+SNULL, 139659686399999, 139659686408191,
+STORE, 139659686383616, 139659686399999,
+STORE, 139659686400000, 139659686408191,
+SNULL, 139659688534015, 139659688538111,
+STORE, 139659688529920, 139659688534015,
+STORE, 139659688534016, 139659688538111,
+SNULL, 94212324397055, 94212324405247,
+STORE, 94212324392960, 94212324397055,
+STORE, 94212324397056, 94212324405247,
+SNULL, 139659690782719, 139659690786815,
+STORE, 139659690778624, 139659690782719,
+STORE, 139659690782720, 139659690786815,
+ERASE, 139659690749952, 139659690778623,
+STORE, 94212355014656, 94212355149823,
+STORE, 139659689050112, 139659690733567,
+STORE, 94212355014656, 94212355284991,
+STORE, 94212355014656, 94212355420159,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727689830400, 140737488351231,
+SNULL, 140727689838591, 140737488351231,
+STORE, 140727689830400, 140727689838591,
+STORE, 140727689699328, 140727689838591,
+STORE, 94572390281216, 94572392615935,
+SNULL, 94572390494207, 94572392615935,
+STORE, 94572390281216, 94572390494207,
+STORE, 94572390494208, 94572392615935,
+ERASE, 94572390494208, 94572392615935,
+STORE, 94572392591360, 94572392603647,
+STORE, 94572392603648, 94572392615935,
+STORE, 140575923769344, 140575926022143,
+SNULL, 140575923912703, 140575926022143,
+STORE, 140575923769344, 140575923912703,
+STORE, 140575923912704, 140575926022143,
+ERASE, 140575923912704, 140575926022143,
+STORE, 140575926009856, 140575926018047,
+STORE, 140575926018048, 140575926022143,
+STORE, 140727689871360, 140727689875455,
+STORE, 140727689859072, 140727689871359,
+STORE, 140575925981184, 140575926009855,
+STORE, 140575925972992, 140575925981183,
+STORE, 140575921655808, 140575923769343,
+SNULL, 140575921655808, 140575921668095,
+STORE, 140575921668096, 140575923769343,
+STORE, 140575921655808, 140575921668095,
+SNULL, 140575923761151, 140575923769343,
+STORE, 140575921668096, 140575923761151,
+STORE, 140575923761152, 140575923769343,
+ERASE, 140575923761152, 140575923769343,
+STORE, 140575923761152, 140575923769343,
+STORE, 140575917858816, 140575921655807,
+SNULL, 140575917858816, 140575919517695,
+STORE, 140575919517696, 140575921655807,
+STORE, 140575917858816, 140575919517695,
+SNULL, 140575921614847, 140575921655807,
+STORE, 140575919517696, 140575921614847,
+STORE, 140575921614848, 140575921655807,
+SNULL, 140575921614848, 140575921639423,
+STORE, 140575921639424, 140575921655807,
+STORE, 140575921614848, 140575921639423,
+ERASE, 140575921614848, 140575921639423,
+STORE, 140575921614848, 140575921639423,
+ERASE, 140575921639424, 140575921655807,
+STORE, 140575921639424, 140575921655807,
+STORE, 140575925964800, 140575925981183,
+SNULL, 140575921631231, 140575921639423,
+STORE, 140575921614848, 140575921631231,
+STORE, 140575921631232, 140575921639423,
+SNULL, 140575923765247, 140575923769343,
+STORE, 140575923761152, 140575923765247,
+STORE, 140575923765248, 140575923769343,
+SNULL, 94572392595455, 94572392603647,
+STORE, 94572392591360, 94572392595455,
+STORE, 94572392595456, 94572392603647,
+SNULL, 140575926013951, 140575926018047,
+STORE, 140575926009856, 140575926013951,
+STORE, 140575926013952, 140575926018047,
+ERASE, 140575925981184, 140575926009855,
+STORE, 94572402278400, 94572402413567,
+STORE, 140575924281344, 140575925964799,
+STORE, 94572402278400, 94572402548735,
+STORE, 94572402278400, 94572402683903,
+STORE, 94572402278400, 94572402851839,
+SNULL, 94572402827263, 94572402851839,
+STORE, 94572402278400, 94572402827263,
+STORE, 94572402827264, 94572402851839,
+ERASE, 94572402827264, 94572402851839,
+STORE, 94572402278400, 94572402966527,
+STORE, 94572402278400, 94572403109887,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725520506880, 140737488351231,
+SNULL, 140725520515071, 140737488351231,
+STORE, 140725520506880, 140725520515071,
+STORE, 140725520375808, 140725520515071,
+STORE, 93829948788736, 93829951012863,
+SNULL, 93829948899327, 93829951012863,
+STORE, 93829948788736, 93829948899327,
+STORE, 93829948899328, 93829951012863,
+ERASE, 93829948899328, 93829951012863,
+STORE, 93829950992384, 93829951004671,
+STORE, 93829951004672, 93829951012863,
+STORE, 140133696794624, 140133699047423,
+SNULL, 140133696937983, 140133699047423,
+STORE, 140133696794624, 140133696937983,
+STORE, 140133696937984, 140133699047423,
+ERASE, 140133696937984, 140133699047423,
+STORE, 140133699035136, 140133699043327,
+STORE, 140133699043328, 140133699047423,
+STORE, 140725520875520, 140725520879615,
+STORE, 140725520863232, 140725520875519,
+STORE, 140133699006464, 140133699035135,
+STORE, 140133698998272, 140133699006463,
+STORE, 140133692997632, 140133696794623,
+SNULL, 140133692997632, 140133694656511,
+STORE, 140133694656512, 140133696794623,
+STORE, 140133692997632, 140133694656511,
+SNULL, 140133696753663, 140133696794623,
+STORE, 140133694656512, 140133696753663,
+STORE, 140133696753664, 140133696794623,
+SNULL, 140133696753664, 140133696778239,
+STORE, 140133696778240, 140133696794623,
+STORE, 140133696753664, 140133696778239,
+ERASE, 140133696753664, 140133696778239,
+STORE, 140133696753664, 140133696778239,
+ERASE, 140133696778240, 140133696794623,
+STORE, 140133696778240, 140133696794623,
+SNULL, 140133696770047, 140133696778239,
+STORE, 140133696753664, 140133696770047,
+STORE, 140133696770048, 140133696778239,
+SNULL, 93829951000575, 93829951004671,
+STORE, 93829950992384, 93829951000575,
+STORE, 93829951000576, 93829951004671,
+SNULL, 140133699039231, 140133699043327,
+STORE, 140133699035136, 140133699039231,
+STORE, 140133699039232, 140133699043327,
+ERASE, 140133699006464, 140133699035135,
+STORE, 93829978693632, 93829978828799,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736118022144, 140737488351231,
+SNULL, 140736118030335, 140737488351231,
+STORE, 140736118022144, 140736118030335,
+STORE, 140736117891072, 140736118030335,
+STORE, 94467663982592, 94467666206719,
+SNULL, 94467664093183, 94467666206719,
+STORE, 94467663982592, 94467664093183,
+STORE, 94467664093184, 94467666206719,
+ERASE, 94467664093184, 94467666206719,
+STORE, 94467666186240, 94467666198527,
+STORE, 94467666198528, 94467666206719,
+STORE, 140525377327104, 140525379579903,
+SNULL, 140525377470463, 140525379579903,
+STORE, 140525377327104, 140525377470463,
+STORE, 140525377470464, 140525379579903,
+ERASE, 140525377470464, 140525379579903,
+STORE, 140525379567616, 140525379575807,
+STORE, 140525379575808, 140525379579903,
+STORE, 140736118771712, 140736118775807,
+STORE, 140736118759424, 140736118771711,
+STORE, 140525379538944, 140525379567615,
+STORE, 140525379530752, 140525379538943,
+STORE, 140525373530112, 140525377327103,
+SNULL, 140525373530112, 140525375188991,
+STORE, 140525375188992, 140525377327103,
+STORE, 140525373530112, 140525375188991,
+SNULL, 140525377286143, 140525377327103,
+STORE, 140525375188992, 140525377286143,
+STORE, 140525377286144, 140525377327103,
+SNULL, 140525377286144, 140525377310719,
+STORE, 140525377310720, 140525377327103,
+STORE, 140525377286144, 140525377310719,
+ERASE, 140525377286144, 140525377310719,
+STORE, 140525377286144, 140525377310719,
+ERASE, 140525377310720, 140525377327103,
+STORE, 140525377310720, 140525377327103,
+SNULL, 140525377302527, 140525377310719,
+STORE, 140525377286144, 140525377302527,
+STORE, 140525377302528, 140525377310719,
+SNULL, 94467666194431, 94467666198527,
+STORE, 94467666186240, 94467666194431,
+STORE, 94467666194432, 94467666198527,
+SNULL, 140525379571711, 140525379575807,
+STORE, 140525379567616, 140525379571711,
+STORE, 140525379571712, 140525379575807,
+ERASE, 140525379538944, 140525379567615,
+STORE, 94467693379584, 94467693514751,
+STORE, 94200172744704, 94200172957695,
+STORE, 94200175054848, 94200175058943,
+STORE, 94200175058944, 94200175067135,
+STORE, 94200175067136, 94200175079423,
+STORE, 94200196673536, 94200198905855,
+STORE, 140053867720704, 140053869379583,
+STORE, 140053869379584, 140053871476735,
+STORE, 140053871476736, 140053871493119,
+STORE, 140053871493120, 140053871501311,
+STORE, 140053871501312, 140053871517695,
+STORE, 140053871517696, 140053871529983,
+STORE, 140053871529984, 140053873623039,
+STORE, 140053873623040, 140053873627135,
+STORE, 140053873627136, 140053873631231,
+STORE, 140053873631232, 140053873774591,
+STORE, 140053874143232, 140053875826687,
+STORE, 140053875826688, 140053875843071,
+STORE, 140053875871744, 140053875875839,
+STORE, 140053875875840, 140053875879935,
+STORE, 140053875879936, 140053875884031,
+STORE, 140728538484736, 140728538623999,
+STORE, 140728538652672, 140728538664959,
+STORE, 140728538664960, 140728538669055,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732307775488, 140737488351231,
+SNULL, 140732307783679, 140737488351231,
+STORE, 140732307775488, 140732307783679,
+STORE, 140732307644416, 140732307783679,
+STORE, 93831417630720, 93831419965439,
+SNULL, 93831417843711, 93831419965439,
+STORE, 93831417630720, 93831417843711,
+STORE, 93831417843712, 93831419965439,
+ERASE, 93831417843712, 93831419965439,
+STORE, 93831419940864, 93831419953151,
+STORE, 93831419953152, 93831419965439,
+STORE, 140241062088704, 140241064341503,
+SNULL, 140241062232063, 140241064341503,
+STORE, 140241062088704, 140241062232063,
+STORE, 140241062232064, 140241064341503,
+ERASE, 140241062232064, 140241064341503,
+STORE, 140241064329216, 140241064337407,
+STORE, 140241064337408, 140241064341503,
+STORE, 140732308140032, 140732308144127,
+STORE, 140732308127744, 140732308140031,
+STORE, 140241064300544, 140241064329215,
+STORE, 140241064292352, 140241064300543,
+STORE, 140241059975168, 140241062088703,
+SNULL, 140241059975168, 140241059987455,
+STORE, 140241059987456, 140241062088703,
+STORE, 140241059975168, 140241059987455,
+SNULL, 140241062080511, 140241062088703,
+STORE, 140241059987456, 140241062080511,
+STORE, 140241062080512, 140241062088703,
+ERASE, 140241062080512, 140241062088703,
+STORE, 140241062080512, 140241062088703,
+STORE, 140241056178176, 140241059975167,
+SNULL, 140241056178176, 140241057837055,
+STORE, 140241057837056, 140241059975167,
+STORE, 140241056178176, 140241057837055,
+SNULL, 140241059934207, 140241059975167,
+STORE, 140241057837056, 140241059934207,
+STORE, 140241059934208, 140241059975167,
+SNULL, 140241059934208, 140241059958783,
+STORE, 140241059958784, 140241059975167,
+STORE, 140241059934208, 140241059958783,
+ERASE, 140241059934208, 140241059958783,
+STORE, 140241059934208, 140241059958783,
+ERASE, 140241059958784, 140241059975167,
+STORE, 140241059958784, 140241059975167,
+STORE, 140241064284160, 140241064300543,
+SNULL, 140241059950591, 140241059958783,
+STORE, 140241059934208, 140241059950591,
+STORE, 140241059950592, 140241059958783,
+SNULL, 140241062084607, 140241062088703,
+STORE, 140241062080512, 140241062084607,
+STORE, 140241062084608, 140241062088703,
+SNULL, 93831419944959, 93831419953151,
+STORE, 93831419940864, 93831419944959,
+STORE, 93831419944960, 93831419953151,
+SNULL, 140241064333311, 140241064337407,
+STORE, 140241064329216, 140241064333311,
+STORE, 140241064333312, 140241064337407,
+ERASE, 140241064300544, 140241064329215,
+STORE, 93831435284480, 93831435419647,
+STORE, 140241062600704, 140241064284159,
+STORE, 93831435284480, 93831435554815,
+STORE, 93831435284480, 93831435689983,
+STORE, 93831435284480, 93831435862015,
+SNULL, 93831435837439, 93831435862015,
+STORE, 93831435284480, 93831435837439,
+STORE, 93831435837440, 93831435862015,
+ERASE, 93831435837440, 93831435862015,
+STORE, 93831435284480, 93831435972607,
+STORE, 93831435284480, 93831436107775,
+SNULL, 93831436091391, 93831436107775,
+STORE, 93831435284480, 93831436091391,
+STORE, 93831436091392, 93831436107775,
+ERASE, 93831436091392, 93831436107775,
+STORE, 93831435284480, 93831436226559,
+STORE, 93831435284480, 93831436361727,
+STORE, 93831435284480, 93831436505087,
+STORE, 93831435284480, 93831436652543,
+STORE, 93831435284480, 93831436787711,
+STORE, 93831435284480, 93831436926975,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728546775040, 140737488351231,
+SNULL, 140728546783231, 140737488351231,
+STORE, 140728546775040, 140728546783231,
+STORE, 140728546643968, 140728546783231,
+STORE, 94456178786304, 94456181010431,
+SNULL, 94456178896895, 94456181010431,
+STORE, 94456178786304, 94456178896895,
+STORE, 94456178896896, 94456181010431,
+ERASE, 94456178896896, 94456181010431,
+STORE, 94456180989952, 94456181002239,
+STORE, 94456181002240, 94456181010431,
+STORE, 140221893091328, 140221895344127,
+SNULL, 140221893234687, 140221895344127,
+STORE, 140221893091328, 140221893234687,
+STORE, 140221893234688, 140221895344127,
+ERASE, 140221893234688, 140221895344127,
+STORE, 140221895331840, 140221895340031,
+STORE, 140221895340032, 140221895344127,
+STORE, 140728547803136, 140728547807231,
+STORE, 140728547790848, 140728547803135,
+STORE, 140221895303168, 140221895331839,
+STORE, 140221895294976, 140221895303167,
+STORE, 140221889294336, 140221893091327,
+SNULL, 140221889294336, 140221890953215,
+STORE, 140221890953216, 140221893091327,
+STORE, 140221889294336, 140221890953215,
+SNULL, 140221893050367, 140221893091327,
+STORE, 140221890953216, 140221893050367,
+STORE, 140221893050368, 140221893091327,
+SNULL, 140221893050368, 140221893074943,
+STORE, 140221893074944, 140221893091327,
+STORE, 140221893050368, 140221893074943,
+ERASE, 140221893050368, 140221893074943,
+STORE, 140221893050368, 140221893074943,
+ERASE, 140221893074944, 140221893091327,
+STORE, 140221893074944, 140221893091327,
+SNULL, 140221893066751, 140221893074943,
+STORE, 140221893050368, 140221893066751,
+STORE, 140221893066752, 140221893074943,
+SNULL, 94456180998143, 94456181002239,
+STORE, 94456180989952, 94456180998143,
+STORE, 94456180998144, 94456181002239,
+SNULL, 140221895335935, 140221895340031,
+STORE, 140221895331840, 140221895335935,
+STORE, 140221895335936, 140221895340031,
+ERASE, 140221895303168, 140221895331839,
+STORE, 94456203730944, 94456203866111,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734438637568, 140737488351231,
+SNULL, 140734438645759, 140737488351231,
+STORE, 140734438637568, 140734438645759,
+STORE, 140734438506496, 140734438645759,
+STORE, 94652233351168, 94652235575295,
+SNULL, 94652233461759, 94652235575295,
+STORE, 94652233351168, 94652233461759,
+STORE, 94652233461760, 94652235575295,
+ERASE, 94652233461760, 94652235575295,
+STORE, 94652235554816, 94652235567103,
+STORE, 94652235567104, 94652235575295,
+STORE, 140536493195264, 140536495448063,
+SNULL, 140536493338623, 140536495448063,
+STORE, 140536493195264, 140536493338623,
+STORE, 140536493338624, 140536495448063,
+ERASE, 140536493338624, 140536495448063,
+STORE, 140536495435776, 140536495443967,
+STORE, 140536495443968, 140536495448063,
+STORE, 140734439002112, 140734439006207,
+STORE, 140734438989824, 140734439002111,
+STORE, 140536495407104, 140536495435775,
+STORE, 140536495398912, 140536495407103,
+STORE, 140536489398272, 140536493195263,
+SNULL, 140536489398272, 140536491057151,
+STORE, 140536491057152, 140536493195263,
+STORE, 140536489398272, 140536491057151,
+SNULL, 140536493154303, 140536493195263,
+STORE, 140536491057152, 140536493154303,
+STORE, 140536493154304, 140536493195263,
+SNULL, 140536493154304, 140536493178879,
+STORE, 140536493178880, 140536493195263,
+STORE, 140536493154304, 140536493178879,
+ERASE, 140536493154304, 140536493178879,
+STORE, 140536493154304, 140536493178879,
+ERASE, 140536493178880, 140536493195263,
+STORE, 140536493178880, 140536493195263,
+SNULL, 140536493170687, 140536493178879,
+STORE, 140536493154304, 140536493170687,
+STORE, 140536493170688, 140536493178879,
+SNULL, 94652235563007, 94652235567103,
+STORE, 94652235554816, 94652235563007,
+STORE, 94652235563008, 94652235567103,
+SNULL, 140536495439871, 140536495443967,
+STORE, 140536495435776, 140536495439871,
+STORE, 140536495439872, 140536495443967,
+ERASE, 140536495407104, 140536495435775,
+STORE, 94652265619456, 94652265754623,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721814200320, 140737488351231,
+SNULL, 140721814208511, 140737488351231,
+STORE, 140721814200320, 140721814208511,
+STORE, 140721814069248, 140721814208511,
+STORE, 94062800691200, 94062802915327,
+SNULL, 94062800801791, 94062802915327,
+STORE, 94062800691200, 94062800801791,
+STORE, 94062800801792, 94062802915327,
+ERASE, 94062800801792, 94062802915327,
+STORE, 94062802894848, 94062802907135,
+STORE, 94062802907136, 94062802915327,
+STORE, 139717739700224, 139717741953023,
+SNULL, 139717739843583, 139717741953023,
+STORE, 139717739700224, 139717739843583,
+STORE, 139717739843584, 139717741953023,
+ERASE, 139717739843584, 139717741953023,
+STORE, 139717741940736, 139717741948927,
+STORE, 139717741948928, 139717741953023,
+STORE, 140721814224896, 140721814228991,
+STORE, 140721814212608, 140721814224895,
+STORE, 139717741912064, 139717741940735,
+STORE, 139717741903872, 139717741912063,
+STORE, 139717735903232, 139717739700223,
+SNULL, 139717735903232, 139717737562111,
+STORE, 139717737562112, 139717739700223,
+STORE, 139717735903232, 139717737562111,
+SNULL, 139717739659263, 139717739700223,
+STORE, 139717737562112, 139717739659263,
+STORE, 139717739659264, 139717739700223,
+SNULL, 139717739659264, 139717739683839,
+STORE, 139717739683840, 139717739700223,
+STORE, 139717739659264, 139717739683839,
+ERASE, 139717739659264, 139717739683839,
+STORE, 139717739659264, 139717739683839,
+ERASE, 139717739683840, 139717739700223,
+STORE, 139717739683840, 139717739700223,
+SNULL, 139717739675647, 139717739683839,
+STORE, 139717739659264, 139717739675647,
+STORE, 139717739675648, 139717739683839,
+SNULL, 94062802903039, 94062802907135,
+STORE, 94062802894848, 94062802903039,
+STORE, 94062802903040, 94062802907135,
+SNULL, 139717741944831, 139717741948927,
+STORE, 139717741940736, 139717741944831,
+STORE, 139717741944832, 139717741948927,
+ERASE, 139717741912064, 139717741940735,
+STORE, 94062814060544, 94062814195711,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723945754624, 140737488351231,
+SNULL, 140723945762815, 140737488351231,
+STORE, 140723945754624, 140723945762815,
+STORE, 140723945623552, 140723945762815,
+STORE, 94886119305216, 94886121639935,
+SNULL, 94886119518207, 94886121639935,
+STORE, 94886119305216, 94886119518207,
+STORE, 94886119518208, 94886121639935,
+ERASE, 94886119518208, 94886121639935,
+STORE, 94886121615360, 94886121627647,
+STORE, 94886121627648, 94886121639935,
+STORE, 140152532131840, 140152534384639,
+SNULL, 140152532275199, 140152534384639,
+STORE, 140152532131840, 140152532275199,
+STORE, 140152532275200, 140152534384639,
+ERASE, 140152532275200, 140152534384639,
+STORE, 140152534372352, 140152534380543,
+STORE, 140152534380544, 140152534384639,
+STORE, 140723946213376, 140723946217471,
+STORE, 140723946201088, 140723946213375,
+STORE, 140152534343680, 140152534372351,
+STORE, 140152534335488, 140152534343679,
+STORE, 140152530018304, 140152532131839,
+SNULL, 140152530018304, 140152530030591,
+STORE, 140152530030592, 140152532131839,
+STORE, 140152530018304, 140152530030591,
+SNULL, 140152532123647, 140152532131839,
+STORE, 140152530030592, 140152532123647,
+STORE, 140152532123648, 140152532131839,
+ERASE, 140152532123648, 140152532131839,
+STORE, 140152532123648, 140152532131839,
+STORE, 140152526221312, 140152530018303,
+SNULL, 140152526221312, 140152527880191,
+STORE, 140152527880192, 140152530018303,
+STORE, 140152526221312, 140152527880191,
+SNULL, 140152529977343, 140152530018303,
+STORE, 140152527880192, 140152529977343,
+STORE, 140152529977344, 140152530018303,
+SNULL, 140152529977344, 140152530001919,
+STORE, 140152530001920, 140152530018303,
+STORE, 140152529977344, 140152530001919,
+ERASE, 140152529977344, 140152530001919,
+STORE, 140152529977344, 140152530001919,
+ERASE, 140152530001920, 140152530018303,
+STORE, 140152530001920, 140152530018303,
+STORE, 140152534327296, 140152534343679,
+SNULL, 140152529993727, 140152530001919,
+STORE, 140152529977344, 140152529993727,
+STORE, 140152529993728, 140152530001919,
+SNULL, 140152532127743, 140152532131839,
+STORE, 140152532123648, 140152532127743,
+STORE, 140152532127744, 140152532131839,
+SNULL, 94886121619455, 94886121627647,
+STORE, 94886121615360, 94886121619455,
+STORE, 94886121619456, 94886121627647,
+SNULL, 140152534376447, 140152534380543,
+STORE, 140152534372352, 140152534376447,
+STORE, 140152534376448, 140152534380543,
+ERASE, 140152534343680, 140152534372351,
+STORE, 94886129770496, 94886129905663,
+STORE, 140152532643840, 140152534327295,
+STORE, 94886129770496, 94886130040831,
+STORE, 94886129770496, 94886130175999,
+STORE, 94886129770496, 94886130348031,
+SNULL, 94886130323455, 94886130348031,
+STORE, 94886129770496, 94886130323455,
+STORE, 94886130323456, 94886130348031,
+ERASE, 94886130323456, 94886130348031,
+STORE, 94886129770496, 94886130458623,
+STORE, 94886129770496, 94886130606079,
+SNULL, 94886130573311, 94886130606079,
+STORE, 94886129770496, 94886130573311,
+STORE, 94886130573312, 94886130606079,
+ERASE, 94886130573312, 94886130606079,
+STORE, 94886129770496, 94886130724863,
+STORE, 94886129770496, 94886130876415,
+STORE, 94886129770496, 94886131023871,
+STORE, 94886129770496, 94886131175423,
+STORE, 94886129770496, 94886131318783,
+STORE, 94886129770496, 94886131453951,
+SNULL, 94886131449855, 94886131453951,
+STORE, 94886129770496, 94886131449855,
+STORE, 94886131449856, 94886131453951,
+ERASE, 94886131449856, 94886131453951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735450779648, 140737488351231,
+SNULL, 140735450787839, 140737488351231,
+STORE, 140735450779648, 140735450787839,
+STORE, 140735450648576, 140735450787839,
+STORE, 93947794079744, 93947796414463,
+SNULL, 93947794292735, 93947796414463,
+STORE, 93947794079744, 93947794292735,
+STORE, 93947794292736, 93947796414463,
+ERASE, 93947794292736, 93947796414463,
+STORE, 93947796389888, 93947796402175,
+STORE, 93947796402176, 93947796414463,
+STORE, 139841993433088, 139841995685887,
+SNULL, 139841993576447, 139841995685887,
+STORE, 139841993433088, 139841993576447,
+STORE, 139841993576448, 139841995685887,
+ERASE, 139841993576448, 139841995685887,
+STORE, 139841995673600, 139841995681791,
+STORE, 139841995681792, 139841995685887,
+STORE, 140735451308032, 140735451312127,
+STORE, 140735451295744, 140735451308031,
+STORE, 139841995644928, 139841995673599,
+STORE, 139841995636736, 139841995644927,
+STORE, 139841991319552, 139841993433087,
+SNULL, 139841991319552, 139841991331839,
+STORE, 139841991331840, 139841993433087,
+STORE, 139841991319552, 139841991331839,
+SNULL, 139841993424895, 139841993433087,
+STORE, 139841991331840, 139841993424895,
+STORE, 139841993424896, 139841993433087,
+ERASE, 139841993424896, 139841993433087,
+STORE, 139841993424896, 139841993433087,
+STORE, 139841987522560, 139841991319551,
+SNULL, 139841987522560, 139841989181439,
+STORE, 139841989181440, 139841991319551,
+STORE, 139841987522560, 139841989181439,
+SNULL, 139841991278591, 139841991319551,
+STORE, 139841989181440, 139841991278591,
+STORE, 139841991278592, 139841991319551,
+SNULL, 139841991278592, 139841991303167,
+STORE, 139841991303168, 139841991319551,
+STORE, 139841991278592, 139841991303167,
+ERASE, 139841991278592, 139841991303167,
+STORE, 139841991278592, 139841991303167,
+ERASE, 139841991303168, 139841991319551,
+STORE, 139841991303168, 139841991319551,
+STORE, 139841995628544, 139841995644927,
+SNULL, 139841991294975, 139841991303167,
+STORE, 139841991278592, 139841991294975,
+STORE, 139841991294976, 139841991303167,
+SNULL, 139841993428991, 139841993433087,
+STORE, 139841993424896, 139841993428991,
+STORE, 139841993428992, 139841993433087,
+SNULL, 93947796393983, 93947796402175,
+STORE, 93947796389888, 93947796393983,
+STORE, 93947796393984, 93947796402175,
+SNULL, 139841995677695, 139841995681791,
+STORE, 139841995673600, 139841995677695,
+STORE, 139841995677696, 139841995681791,
+ERASE, 139841995644928, 139841995673599,
+STORE, 93947829739520, 93947829874687,
+STORE, 139841993945088, 139841995628543,
+STORE, 93947829739520, 93947830009855,
+STORE, 93947829739520, 93947830145023,
+STORE, 94659351814144, 94659352027135,
+STORE, 94659354124288, 94659354128383,
+STORE, 94659354128384, 94659354136575,
+STORE, 94659354136576, 94659354148863,
+STORE, 94659383476224, 94659385057279,
+STORE, 139959054557184, 139959056216063,
+STORE, 139959056216064, 139959058313215,
+STORE, 139959058313216, 139959058329599,
+STORE, 139959058329600, 139959058337791,
+STORE, 139959058337792, 139959058354175,
+STORE, 139959058354176, 139959058366463,
+STORE, 139959058366464, 139959060459519,
+STORE, 139959060459520, 139959060463615,
+STORE, 139959060463616, 139959060467711,
+STORE, 139959060467712, 139959060611071,
+STORE, 139959060979712, 139959062663167,
+STORE, 139959062663168, 139959062679551,
+STORE, 139959062708224, 139959062712319,
+STORE, 139959062712320, 139959062716415,
+STORE, 139959062716416, 139959062720511,
+STORE, 140735532539904, 140735532679167,
+STORE, 140735532830720, 140735532843007,
+STORE, 140735532843008, 140735532847103,
+STORE, 93894361829376, 93894362042367,
+STORE, 93894364139520, 93894364143615,
+STORE, 93894364143616, 93894364151807,
+STORE, 93894364151808, 93894364164095,
+STORE, 93894396944384, 93894397624319,
+STORE, 140075612573696, 140075614232575,
+STORE, 140075614232576, 140075616329727,
+STORE, 140075616329728, 140075616346111,
+STORE, 140075616346112, 140075616354303,
+STORE, 140075616354304, 140075616370687,
+STORE, 140075616370688, 140075616382975,
+STORE, 140075616382976, 140075618476031,
+STORE, 140075618476032, 140075618480127,
+STORE, 140075618480128, 140075618484223,
+STORE, 140075618484224, 140075618627583,
+STORE, 140075618996224, 140075620679679,
+STORE, 140075620679680, 140075620696063,
+STORE, 140075620724736, 140075620728831,
+STORE, 140075620728832, 140075620732927,
+STORE, 140075620732928, 140075620737023,
+STORE, 140720830312448, 140720830451711,
+STORE, 140720830631936, 140720830644223,
+STORE, 140720830644224, 140720830648319,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735116226560, 140737488351231,
+SNULL, 140735116234751, 140737488351231,
+STORE, 140735116226560, 140735116234751,
+STORE, 140735116095488, 140735116234751,
+STORE, 94873398054912, 94873400279039,
+SNULL, 94873398165503, 94873400279039,
+STORE, 94873398054912, 94873398165503,
+STORE, 94873398165504, 94873400279039,
+ERASE, 94873398165504, 94873400279039,
+STORE, 94873400258560, 94873400270847,
+STORE, 94873400270848, 94873400279039,
+STORE, 140303828606976, 140303830859775,
+SNULL, 140303828750335, 140303830859775,
+STORE, 140303828606976, 140303828750335,
+STORE, 140303828750336, 140303830859775,
+ERASE, 140303828750336, 140303830859775,
+STORE, 140303830847488, 140303830855679,
+STORE, 140303830855680, 140303830859775,
+STORE, 140735116251136, 140735116255231,
+STORE, 140735116238848, 140735116251135,
+STORE, 140303830818816, 140303830847487,
+STORE, 140303830810624, 140303830818815,
+STORE, 140303824809984, 140303828606975,
+SNULL, 140303824809984, 140303826468863,
+STORE, 140303826468864, 140303828606975,
+STORE, 140303824809984, 140303826468863,
+SNULL, 140303828566015, 140303828606975,
+STORE, 140303826468864, 140303828566015,
+STORE, 140303828566016, 140303828606975,
+SNULL, 140303828566016, 140303828590591,
+STORE, 140303828590592, 140303828606975,
+STORE, 140303828566016, 140303828590591,
+ERASE, 140303828566016, 140303828590591,
+STORE, 140303828566016, 140303828590591,
+ERASE, 140303828590592, 140303828606975,
+STORE, 140303828590592, 140303828606975,
+SNULL, 140303828582399, 140303828590591,
+STORE, 140303828566016, 140303828582399,
+STORE, 140303828582400, 140303828590591,
+SNULL, 94873400266751, 94873400270847,
+STORE, 94873400258560, 94873400266751,
+STORE, 94873400266752, 94873400270847,
+SNULL, 140303830851583, 140303830855679,
+STORE, 140303830847488, 140303830851583,
+STORE, 140303830851584, 140303830855679,
+ERASE, 140303830818816, 140303830847487,
+STORE, 94873413713920, 94873413849087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732349956096, 140737488351231,
+SNULL, 140732349964287, 140737488351231,
+STORE, 140732349956096, 140732349964287,
+STORE, 140732349825024, 140732349964287,
+STORE, 94009652736000, 94009655070719,
+SNULL, 94009652948991, 94009655070719,
+STORE, 94009652736000, 94009652948991,
+STORE, 94009652948992, 94009655070719,
+ERASE, 94009652948992, 94009655070719,
+STORE, 94009655046144, 94009655058431,
+STORE, 94009655058432, 94009655070719,
+STORE, 140295688531968, 140295690784767,
+SNULL, 140295688675327, 140295690784767,
+STORE, 140295688531968, 140295688675327,
+STORE, 140295688675328, 140295690784767,
+ERASE, 140295688675328, 140295690784767,
+STORE, 140295690772480, 140295690780671,
+STORE, 140295690780672, 140295690784767,
+STORE, 140732350005248, 140732350009343,
+STORE, 140732349992960, 140732350005247,
+STORE, 140295690743808, 140295690772479,
+STORE, 140295690735616, 140295690743807,
+STORE, 140295686418432, 140295688531967,
+SNULL, 140295686418432, 140295686430719,
+STORE, 140295686430720, 140295688531967,
+STORE, 140295686418432, 140295686430719,
+SNULL, 140295688523775, 140295688531967,
+STORE, 140295686430720, 140295688523775,
+STORE, 140295688523776, 140295688531967,
+ERASE, 140295688523776, 140295688531967,
+STORE, 140295688523776, 140295688531967,
+STORE, 140295682621440, 140295686418431,
+SNULL, 140295682621440, 140295684280319,
+STORE, 140295684280320, 140295686418431,
+STORE, 140295682621440, 140295684280319,
+SNULL, 140295686377471, 140295686418431,
+STORE, 140295684280320, 140295686377471,
+STORE, 140295686377472, 140295686418431,
+SNULL, 140295686377472, 140295686402047,
+STORE, 140295686402048, 140295686418431,
+STORE, 140295686377472, 140295686402047,
+ERASE, 140295686377472, 140295686402047,
+STORE, 140295686377472, 140295686402047,
+ERASE, 140295686402048, 140295686418431,
+STORE, 140295686402048, 140295686418431,
+STORE, 140295690727424, 140295690743807,
+SNULL, 140295686393855, 140295686402047,
+STORE, 140295686377472, 140295686393855,
+STORE, 140295686393856, 140295686402047,
+SNULL, 140295688527871, 140295688531967,
+STORE, 140295688523776, 140295688527871,
+STORE, 140295688527872, 140295688531967,
+SNULL, 94009655050239, 94009655058431,
+STORE, 94009655046144, 94009655050239,
+STORE, 94009655050240, 94009655058431,
+SNULL, 140295690776575, 140295690780671,
+STORE, 140295690772480, 140295690776575,
+STORE, 140295690776576, 140295690780671,
+ERASE, 140295690743808, 140295690772479,
+STORE, 94009672114176, 94009672249343,
+STORE, 140295689043968, 140295690727423,
+STORE, 94009672114176, 94009672384511,
+STORE, 94009672114176, 94009672519679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722376515584, 140737488351231,
+SNULL, 140722376523775, 140737488351231,
+STORE, 140722376515584, 140722376523775,
+STORE, 140722376384512, 140722376523775,
+STORE, 94089815773184, 94089818107903,
+SNULL, 94089815986175, 94089818107903,
+STORE, 94089815773184, 94089815986175,
+STORE, 94089815986176, 94089818107903,
+ERASE, 94089815986176, 94089818107903,
+STORE, 94089818083328, 94089818095615,
+STORE, 94089818095616, 94089818107903,
+STORE, 140265595711488, 140265597964287,
+SNULL, 140265595854847, 140265597964287,
+STORE, 140265595711488, 140265595854847,
+STORE, 140265595854848, 140265597964287,
+ERASE, 140265595854848, 140265597964287,
+STORE, 140265597952000, 140265597960191,
+STORE, 140265597960192, 140265597964287,
+STORE, 140722378297344, 140722378301439,
+STORE, 140722378285056, 140722378297343,
+STORE, 140265597923328, 140265597951999,
+STORE, 140265597915136, 140265597923327,
+STORE, 140265593597952, 140265595711487,
+SNULL, 140265593597952, 140265593610239,
+STORE, 140265593610240, 140265595711487,
+STORE, 140265593597952, 140265593610239,
+SNULL, 140265595703295, 140265595711487,
+STORE, 140265593610240, 140265595703295,
+STORE, 140265595703296, 140265595711487,
+ERASE, 140265595703296, 140265595711487,
+STORE, 140265595703296, 140265595711487,
+STORE, 140265589800960, 140265593597951,
+SNULL, 140265589800960, 140265591459839,
+STORE, 140265591459840, 140265593597951,
+STORE, 140265589800960, 140265591459839,
+SNULL, 140265593556991, 140265593597951,
+STORE, 140265591459840, 140265593556991,
+STORE, 140265593556992, 140265593597951,
+SNULL, 140265593556992, 140265593581567,
+STORE, 140265593581568, 140265593597951,
+STORE, 140265593556992, 140265593581567,
+ERASE, 140265593556992, 140265593581567,
+STORE, 140265593556992, 140265593581567,
+ERASE, 140265593581568, 140265593597951,
+STORE, 140265593581568, 140265593597951,
+STORE, 140265597906944, 140265597923327,
+SNULL, 140265593573375, 140265593581567,
+STORE, 140265593556992, 140265593573375,
+STORE, 140265593573376, 140265593581567,
+SNULL, 140265595707391, 140265595711487,
+STORE, 140265595703296, 140265595707391,
+STORE, 140265595707392, 140265595711487,
+SNULL, 94089818087423, 94089818095615,
+STORE, 94089818083328, 94089818087423,
+STORE, 94089818087424, 94089818095615,
+SNULL, 140265597956095, 140265597960191,
+STORE, 140265597952000, 140265597956095,
+STORE, 140265597956096, 140265597960191,
+ERASE, 140265597923328, 140265597951999,
+STORE, 94089837146112, 94089837281279,
+STORE, 140265596223488, 140265597906943,
+STORE, 94089837146112, 94089837416447,
+STORE, 94089837146112, 94089837551615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735265218560, 140737488351231,
+SNULL, 140735265226751, 140737488351231,
+STORE, 140735265218560, 140735265226751,
+STORE, 140735265087488, 140735265226751,
+STORE, 94250422370304, 94250424705023,
+SNULL, 94250422583295, 94250424705023,
+STORE, 94250422370304, 94250422583295,
+STORE, 94250422583296, 94250424705023,
+ERASE, 94250422583296, 94250424705023,
+STORE, 94250424680448, 94250424692735,
+STORE, 94250424692736, 94250424705023,
+STORE, 140344442474496, 140344444727295,
+SNULL, 140344442617855, 140344444727295,
+STORE, 140344442474496, 140344442617855,
+STORE, 140344442617856, 140344444727295,
+ERASE, 140344442617856, 140344444727295,
+STORE, 140344444715008, 140344444723199,
+STORE, 140344444723200, 140344444727295,
+STORE, 140735265341440, 140735265345535,
+STORE, 140735265329152, 140735265341439,
+STORE, 140344444686336, 140344444715007,
+STORE, 140344444678144, 140344444686335,
+STORE, 140344440360960, 140344442474495,
+SNULL, 140344440360960, 140344440373247,
+STORE, 140344440373248, 140344442474495,
+STORE, 140344440360960, 140344440373247,
+SNULL, 140344442466303, 140344442474495,
+STORE, 140344440373248, 140344442466303,
+STORE, 140344442466304, 140344442474495,
+ERASE, 140344442466304, 140344442474495,
+STORE, 140344442466304, 140344442474495,
+STORE, 140344436563968, 140344440360959,
+SNULL, 140344436563968, 140344438222847,
+STORE, 140344438222848, 140344440360959,
+STORE, 140344436563968, 140344438222847,
+SNULL, 140344440319999, 140344440360959,
+STORE, 140344438222848, 140344440319999,
+STORE, 140344440320000, 140344440360959,
+SNULL, 140344440320000, 140344440344575,
+STORE, 140344440344576, 140344440360959,
+STORE, 140344440320000, 140344440344575,
+ERASE, 140344440320000, 140344440344575,
+STORE, 140344440320000, 140344440344575,
+ERASE, 140344440344576, 140344440360959,
+STORE, 140344440344576, 140344440360959,
+STORE, 140344444669952, 140344444686335,
+SNULL, 140344440336383, 140344440344575,
+STORE, 140344440320000, 140344440336383,
+STORE, 140344440336384, 140344440344575,
+SNULL, 140344442470399, 140344442474495,
+STORE, 140344442466304, 140344442470399,
+STORE, 140344442470400, 140344442474495,
+SNULL, 94250424684543, 94250424692735,
+STORE, 94250424680448, 94250424684543,
+STORE, 94250424684544, 94250424692735,
+SNULL, 140344444719103, 140344444723199,
+STORE, 140344444715008, 140344444719103,
+STORE, 140344444719104, 140344444723199,
+ERASE, 140344444686336, 140344444715007,
+STORE, 94250445512704, 94250445647871,
+STORE, 140344442986496, 140344444669951,
+STORE, 94250445512704, 94250445783039,
+STORE, 94250445512704, 94250445918207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725762719744, 140737488351231,
+SNULL, 140725762727935, 140737488351231,
+STORE, 140725762719744, 140725762727935,
+STORE, 140725762588672, 140725762727935,
+STORE, 94819009097728, 94819011432447,
+SNULL, 94819009310719, 94819011432447,
+STORE, 94819009097728, 94819009310719,
+STORE, 94819009310720, 94819011432447,
+ERASE, 94819009310720, 94819011432447,
+STORE, 94819011407872, 94819011420159,
+STORE, 94819011420160, 94819011432447,
+STORE, 139987985596416, 139987987849215,
+SNULL, 139987985739775, 139987987849215,
+STORE, 139987985596416, 139987985739775,
+STORE, 139987985739776, 139987987849215,
+ERASE, 139987985739776, 139987987849215,
+STORE, 139987987836928, 139987987845119,
+STORE, 139987987845120, 139987987849215,
+STORE, 140725763072000, 140725763076095,
+STORE, 140725763059712, 140725763071999,
+STORE, 139987987808256, 139987987836927,
+STORE, 139987987800064, 139987987808255,
+STORE, 139987983482880, 139987985596415,
+SNULL, 139987983482880, 139987983495167,
+STORE, 139987983495168, 139987985596415,
+STORE, 139987983482880, 139987983495167,
+SNULL, 139987985588223, 139987985596415,
+STORE, 139987983495168, 139987985588223,
+STORE, 139987985588224, 139987985596415,
+ERASE, 139987985588224, 139987985596415,
+STORE, 139987985588224, 139987985596415,
+STORE, 139987979685888, 139987983482879,
+SNULL, 139987979685888, 139987981344767,
+STORE, 139987981344768, 139987983482879,
+STORE, 139987979685888, 139987981344767,
+SNULL, 139987983441919, 139987983482879,
+STORE, 139987981344768, 139987983441919,
+STORE, 139987983441920, 139987983482879,
+SNULL, 139987983441920, 139987983466495,
+STORE, 139987983466496, 139987983482879,
+STORE, 139987983441920, 139987983466495,
+ERASE, 139987983441920, 139987983466495,
+STORE, 139987983441920, 139987983466495,
+ERASE, 139987983466496, 139987983482879,
+STORE, 139987983466496, 139987983482879,
+STORE, 139987987791872, 139987987808255,
+SNULL, 139987983458303, 139987983466495,
+STORE, 139987983441920, 139987983458303,
+STORE, 139987983458304, 139987983466495,
+SNULL, 139987985592319, 139987985596415,
+STORE, 139987985588224, 139987985592319,
+STORE, 139987985592320, 139987985596415,
+SNULL, 94819011411967, 94819011420159,
+STORE, 94819011407872, 94819011411967,
+STORE, 94819011411968, 94819011420159,
+SNULL, 139987987841023, 139987987845119,
+STORE, 139987987836928, 139987987841023,
+STORE, 139987987841024, 139987987845119,
+ERASE, 139987987808256, 139987987836927,
+STORE, 94819028176896, 94819028312063,
+STORE, 139987986108416, 139987987791871,
+STORE, 94819028176896, 94819028447231,
+STORE, 94819028176896, 94819028582399,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722475413504, 140737488351231,
+SNULL, 140722475421695, 140737488351231,
+STORE, 140722475413504, 140722475421695,
+STORE, 140722475282432, 140722475421695,
+STORE, 94620599119872, 94620601343999,
+SNULL, 94620599230463, 94620601343999,
+STORE, 94620599119872, 94620599230463,
+STORE, 94620599230464, 94620601343999,
+ERASE, 94620599230464, 94620601343999,
+STORE, 94620601323520, 94620601335807,
+STORE, 94620601335808, 94620601343999,
+STORE, 139891763060736, 139891765313535,
+SNULL, 139891763204095, 139891765313535,
+STORE, 139891763060736, 139891763204095,
+STORE, 139891763204096, 139891765313535,
+ERASE, 139891763204096, 139891765313535,
+STORE, 139891765301248, 139891765309439,
+STORE, 139891765309440, 139891765313535,
+STORE, 140722475700224, 140722475704319,
+STORE, 140722475687936, 140722475700223,
+STORE, 139891765272576, 139891765301247,
+STORE, 139891765264384, 139891765272575,
+STORE, 139891759263744, 139891763060735,
+SNULL, 139891759263744, 139891760922623,
+STORE, 139891760922624, 139891763060735,
+STORE, 139891759263744, 139891760922623,
+SNULL, 139891763019775, 139891763060735,
+STORE, 139891760922624, 139891763019775,
+STORE, 139891763019776, 139891763060735,
+SNULL, 139891763019776, 139891763044351,
+STORE, 139891763044352, 139891763060735,
+STORE, 139891763019776, 139891763044351,
+ERASE, 139891763019776, 139891763044351,
+STORE, 139891763019776, 139891763044351,
+ERASE, 139891763044352, 139891763060735,
+STORE, 139891763044352, 139891763060735,
+SNULL, 139891763036159, 139891763044351,
+STORE, 139891763019776, 139891763036159,
+STORE, 139891763036160, 139891763044351,
+SNULL, 94620601331711, 94620601335807,
+STORE, 94620601323520, 94620601331711,
+STORE, 94620601331712, 94620601335807,
+SNULL, 139891765305343, 139891765309439,
+STORE, 139891765301248, 139891765305343,
+STORE, 139891765305344, 139891765309439,
+ERASE, 139891765272576, 139891765301247,
+STORE, 94620610027520, 94620610162687,
+STORE, 94031976210432, 94031976423423,
+STORE, 94031978520576, 94031978524671,
+STORE, 94031978524672, 94031978532863,
+STORE, 94031978532864, 94031978545151,
+STORE, 94031990398976, 94031992565759,
+STORE, 140336240640000, 140336242298879,
+STORE, 140336242298880, 140336244396031,
+STORE, 140336244396032, 140336244412415,
+STORE, 140336244412416, 140336244420607,
+STORE, 140336244420608, 140336244436991,
+STORE, 140336244436992, 140336244449279,
+STORE, 140336244449280, 140336246542335,
+STORE, 140336246542336, 140336246546431,
+STORE, 140336246546432, 140336246550527,
+STORE, 140336246550528, 140336246693887,
+STORE, 140336247062528, 140336248745983,
+STORE, 140336248745984, 140336248762367,
+STORE, 140336248791040, 140336248795135,
+STORE, 140336248795136, 140336248799231,
+STORE, 140336248799232, 140336248803327,
+STORE, 140728500064256, 140728500203519,
+STORE, 140728501501952, 140728501514239,
+STORE, 140728501514240, 140728501518335,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730503987200, 140737488351231,
+SNULL, 140730503995391, 140737488351231,
+STORE, 140730503987200, 140730503995391,
+STORE, 140730503856128, 140730503995391,
+STORE, 93866544205824, 93866546429951,
+SNULL, 93866544316415, 93866546429951,
+STORE, 93866544205824, 93866544316415,
+STORE, 93866544316416, 93866546429951,
+ERASE, 93866544316416, 93866546429951,
+STORE, 93866546409472, 93866546421759,
+STORE, 93866546421760, 93866546429951,
+STORE, 140216311959552, 140216314212351,
+SNULL, 140216312102911, 140216314212351,
+STORE, 140216311959552, 140216312102911,
+STORE, 140216312102912, 140216314212351,
+ERASE, 140216312102912, 140216314212351,
+STORE, 140216314200064, 140216314208255,
+STORE, 140216314208256, 140216314212351,
+STORE, 140730504626176, 140730504630271,
+STORE, 140730504613888, 140730504626175,
+STORE, 140216314171392, 140216314200063,
+STORE, 140216314163200, 140216314171391,
+STORE, 140216308162560, 140216311959551,
+SNULL, 140216308162560, 140216309821439,
+STORE, 140216309821440, 140216311959551,
+STORE, 140216308162560, 140216309821439,
+SNULL, 140216311918591, 140216311959551,
+STORE, 140216309821440, 140216311918591,
+STORE, 140216311918592, 140216311959551,
+SNULL, 140216311918592, 140216311943167,
+STORE, 140216311943168, 140216311959551,
+STORE, 140216311918592, 140216311943167,
+ERASE, 140216311918592, 140216311943167,
+STORE, 140216311918592, 140216311943167,
+ERASE, 140216311943168, 140216311959551,
+STORE, 140216311943168, 140216311959551,
+SNULL, 140216311934975, 140216311943167,
+STORE, 140216311918592, 140216311934975,
+STORE, 140216311934976, 140216311943167,
+SNULL, 93866546417663, 93866546421759,
+STORE, 93866546409472, 93866546417663,
+STORE, 93866546417664, 93866546421759,
+SNULL, 140216314204159, 140216314208255,
+STORE, 140216314200064, 140216314204159,
+STORE, 140216314204160, 140216314208255,
+ERASE, 140216314171392, 140216314200063,
+STORE, 93866550386688, 93866550521855,
+STORE, 94074292674560, 94074292887551,
+STORE, 94074294984704, 94074294988799,
+STORE, 94074294988800, 94074294996991,
+STORE, 94074294996992, 94074295009279,
+STORE, 94074300219392, 94074301378559,
+STORE, 139781563256832, 139781564915711,
+STORE, 139781564915712, 139781567012863,
+STORE, 139781567012864, 139781567029247,
+STORE, 139781567029248, 139781567037439,
+STORE, 139781567037440, 139781567053823,
+STORE, 139781567053824, 139781567066111,
+STORE, 139781567066112, 139781569159167,
+STORE, 139781569159168, 139781569163263,
+STORE, 139781569163264, 139781569167359,
+STORE, 139781569167360, 139781569310719,
+STORE, 139781569679360, 139781571362815,
+STORE, 139781571362816, 139781571379199,
+STORE, 139781571407872, 139781571411967,
+STORE, 139781571411968, 139781571416063,
+STORE, 139781571416064, 139781571420159,
+STORE, 140723688488960, 140723688628223,
+STORE, 140723689005056, 140723689017343,
+STORE, 140723689017344, 140723689021439,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735189745664, 140737488351231,
+SNULL, 140735189753855, 140737488351231,
+STORE, 140735189745664, 140735189753855,
+STORE, 140735189614592, 140735189753855,
+STORE, 94172072177664, 94172074512383,
+SNULL, 94172072390655, 94172074512383,
+STORE, 94172072177664, 94172072390655,
+STORE, 94172072390656, 94172074512383,
+ERASE, 94172072390656, 94172074512383,
+STORE, 94172074487808, 94172074500095,
+STORE, 94172074500096, 94172074512383,
+STORE, 140687827263488, 140687829516287,
+SNULL, 140687827406847, 140687829516287,
+STORE, 140687827263488, 140687827406847,
+STORE, 140687827406848, 140687829516287,
+ERASE, 140687827406848, 140687829516287,
+STORE, 140687829504000, 140687829512191,
+STORE, 140687829512192, 140687829516287,
+STORE, 140735189766144, 140735189770239,
+STORE, 140735189753856, 140735189766143,
+STORE, 140687829475328, 140687829503999,
+STORE, 140687829467136, 140687829475327,
+STORE, 140687825149952, 140687827263487,
+SNULL, 140687825149952, 140687825162239,
+STORE, 140687825162240, 140687827263487,
+STORE, 140687825149952, 140687825162239,
+SNULL, 140687827255295, 140687827263487,
+STORE, 140687825162240, 140687827255295,
+STORE, 140687827255296, 140687827263487,
+ERASE, 140687827255296, 140687827263487,
+STORE, 140687827255296, 140687827263487,
+STORE, 140687821352960, 140687825149951,
+SNULL, 140687821352960, 140687823011839,
+STORE, 140687823011840, 140687825149951,
+STORE, 140687821352960, 140687823011839,
+SNULL, 140687825108991, 140687825149951,
+STORE, 140687823011840, 140687825108991,
+STORE, 140687825108992, 140687825149951,
+SNULL, 140687825108992, 140687825133567,
+STORE, 140687825133568, 140687825149951,
+STORE, 140687825108992, 140687825133567,
+ERASE, 140687825108992, 140687825133567,
+STORE, 140687825108992, 140687825133567,
+ERASE, 140687825133568, 140687825149951,
+STORE, 140687825133568, 140687825149951,
+STORE, 140687829458944, 140687829475327,
+SNULL, 140687825125375, 140687825133567,
+STORE, 140687825108992, 140687825125375,
+STORE, 140687825125376, 140687825133567,
+SNULL, 140687827259391, 140687827263487,
+STORE, 140687827255296, 140687827259391,
+STORE, 140687827259392, 140687827263487,
+SNULL, 94172074491903, 94172074500095,
+STORE, 94172074487808, 94172074491903,
+STORE, 94172074491904, 94172074500095,
+SNULL, 140687829508095, 140687829512191,
+STORE, 140687829504000, 140687829508095,
+STORE, 140687829508096, 140687829512191,
+ERASE, 140687829475328, 140687829503999,
+STORE, 94172092432384, 94172092567551,
+STORE, 140687827775488, 140687829458943,
+STORE, 94172092432384, 94172092702719,
+STORE, 94172092432384, 94172092837887,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737229504512, 140737488351231,
+SNULL, 140737229512703, 140737488351231,
+STORE, 140737229504512, 140737229512703,
+STORE, 140737229373440, 140737229512703,
+STORE, 94155246866432, 94155249090559,
+SNULL, 94155246977023, 94155249090559,
+STORE, 94155246866432, 94155246977023,
+STORE, 94155246977024, 94155249090559,
+ERASE, 94155246977024, 94155249090559,
+STORE, 94155249070080, 94155249082367,
+STORE, 94155249082368, 94155249090559,
+STORE, 140640993693696, 140640995946495,
+SNULL, 140640993837055, 140640995946495,
+STORE, 140640993693696, 140640993837055,
+STORE, 140640993837056, 140640995946495,
+ERASE, 140640993837056, 140640995946495,
+STORE, 140640995934208, 140640995942399,
+STORE, 140640995942400, 140640995946495,
+STORE, 140737230004224, 140737230008319,
+STORE, 140737229991936, 140737230004223,
+STORE, 140640995905536, 140640995934207,
+STORE, 140640995897344, 140640995905535,
+STORE, 140640989896704, 140640993693695,
+SNULL, 140640989896704, 140640991555583,
+STORE, 140640991555584, 140640993693695,
+STORE, 140640989896704, 140640991555583,
+SNULL, 140640993652735, 140640993693695,
+STORE, 140640991555584, 140640993652735,
+STORE, 140640993652736, 140640993693695,
+SNULL, 140640993652736, 140640993677311,
+STORE, 140640993677312, 140640993693695,
+STORE, 140640993652736, 140640993677311,
+ERASE, 140640993652736, 140640993677311,
+STORE, 140640993652736, 140640993677311,
+ERASE, 140640993677312, 140640993693695,
+STORE, 140640993677312, 140640993693695,
+SNULL, 140640993669119, 140640993677311,
+STORE, 140640993652736, 140640993669119,
+STORE, 140640993669120, 140640993677311,
+SNULL, 94155249078271, 94155249082367,
+STORE, 94155249070080, 94155249078271,
+STORE, 94155249078272, 94155249082367,
+SNULL, 140640995938303, 140640995942399,
+STORE, 140640995934208, 140640995938303,
+STORE, 140640995938304, 140640995942399,
+ERASE, 140640995905536, 140640995934207,
+STORE, 94155281035264, 94155281170431,
+STORE, 94088066453504, 94088066564095,
+STORE, 94088068657152, 94088068665343,
+STORE, 94088068665344, 94088068669439,
+STORE, 94088068669440, 94088068677631,
+STORE, 94088090214400, 94088090349567,
+STORE, 140503024627712, 140503026286591,
+STORE, 140503026286592, 140503028383743,
+STORE, 140503028383744, 140503028400127,
+STORE, 140503028400128, 140503028408319,
+STORE, 140503028408320, 140503028424703,
+STORE, 140503028424704, 140503028568063,
+STORE, 140503030628352, 140503030636543,
+STORE, 140503030665216, 140503030669311,
+STORE, 140503030669312, 140503030673407,
+STORE, 140503030673408, 140503030677503,
+STORE, 140730894725120, 140730894864383,
+STORE, 140730894880768, 140730894893055,
+STORE, 140730894893056, 140730894897151,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730434342912, 140737488351231,
+SNULL, 140730434351103, 140737488351231,
+STORE, 140730434342912, 140730434351103,
+STORE, 140730434211840, 140730434351103,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140109041938432, 140109044191231,
+SNULL, 140109042081791, 140109044191231,
+STORE, 140109041938432, 140109042081791,
+STORE, 140109042081792, 140109044191231,
+ERASE, 140109042081792, 140109044191231,
+STORE, 140109044178944, 140109044187135,
+STORE, 140109044187136, 140109044191231,
+STORE, 140730434850816, 140730434854911,
+STORE, 140730434838528, 140730434850815,
+STORE, 140109044150272, 140109044178943,
+STORE, 140109044142080, 140109044150271,
+STORE, 140109038776320, 140109041938431,
+SNULL, 140109038776320, 140109039837183,
+STORE, 140109039837184, 140109041938431,
+STORE, 140109038776320, 140109039837183,
+SNULL, 140109041930239, 140109041938431,
+STORE, 140109039837184, 140109041930239,
+STORE, 140109041930240, 140109041938431,
+ERASE, 140109041930240, 140109041938431,
+STORE, 140109041930240, 140109041938431,
+STORE, 140109034979328, 140109038776319,
+SNULL, 140109034979328, 140109036638207,
+STORE, 140109036638208, 140109038776319,
+STORE, 140109034979328, 140109036638207,
+SNULL, 140109038735359, 140109038776319,
+STORE, 140109036638208, 140109038735359,
+STORE, 140109038735360, 140109038776319,
+SNULL, 140109038735360, 140109038759935,
+STORE, 140109038759936, 140109038776319,
+STORE, 140109038735360, 140109038759935,
+ERASE, 140109038735360, 140109038759935,
+STORE, 140109038735360, 140109038759935,
+ERASE, 140109038759936, 140109038776319,
+STORE, 140109038759936, 140109038776319,
+STORE, 140109044129792, 140109044150271,
+SNULL, 140109038751743, 140109038759935,
+STORE, 140109038735360, 140109038751743,
+STORE, 140109038751744, 140109038759935,
+SNULL, 140109041934335, 140109041938431,
+STORE, 140109041930240, 140109041934335,
+STORE, 140109041934336, 140109041938431,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140109044183039, 140109044187135,
+STORE, 140109044178944, 140109044183039,
+STORE, 140109044183040, 140109044187135,
+ERASE, 140109044150272, 140109044178943,
+STORE, 20000768, 20135935,
+STORE, 20000768, 20283391,
+STORE, 140109042446336, 140109044129791,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730853408768, 140737488351231,
+SNULL, 140730853416959, 140737488351231,
+STORE, 140730853408768, 140730853416959,
+STORE, 140730853277696, 140730853416959,
+STORE, 94865902977024, 94865905311743,
+SNULL, 94865903190015, 94865905311743,
+STORE, 94865902977024, 94865903190015,
+STORE, 94865903190016, 94865905311743,
+ERASE, 94865903190016, 94865905311743,
+STORE, 94865905287168, 94865905299455,
+STORE, 94865905299456, 94865905311743,
+STORE, 139768865738752, 139768867991551,
+SNULL, 139768865882111, 139768867991551,
+STORE, 139768865738752, 139768865882111,
+STORE, 139768865882112, 139768867991551,
+ERASE, 139768865882112, 139768867991551,
+STORE, 139768867979264, 139768867987455,
+STORE, 139768867987456, 139768867991551,
+STORE, 140730853957632, 140730853961727,
+STORE, 140730853945344, 140730853957631,
+STORE, 139768867950592, 139768867979263,
+STORE, 139768867942400, 139768867950591,
+STORE, 139768863625216, 139768865738751,
+SNULL, 139768863625216, 139768863637503,
+STORE, 139768863637504, 139768865738751,
+STORE, 139768863625216, 139768863637503,
+SNULL, 139768865730559, 139768865738751,
+STORE, 139768863637504, 139768865730559,
+STORE, 139768865730560, 139768865738751,
+ERASE, 139768865730560, 139768865738751,
+STORE, 139768865730560, 139768865738751,
+STORE, 139768859828224, 139768863625215,
+SNULL, 139768859828224, 139768861487103,
+STORE, 139768861487104, 139768863625215,
+STORE, 139768859828224, 139768861487103,
+SNULL, 139768863584255, 139768863625215,
+STORE, 139768861487104, 139768863584255,
+STORE, 139768863584256, 139768863625215,
+SNULL, 139768863584256, 139768863608831,
+STORE, 139768863608832, 139768863625215,
+STORE, 139768863584256, 139768863608831,
+ERASE, 139768863584256, 139768863608831,
+STORE, 139768863584256, 139768863608831,
+ERASE, 139768863608832, 139768863625215,
+STORE, 139768863608832, 139768863625215,
+STORE, 139768867934208, 139768867950591,
+SNULL, 139768863600639, 139768863608831,
+STORE, 139768863584256, 139768863600639,
+STORE, 139768863600640, 139768863608831,
+SNULL, 139768865734655, 139768865738751,
+STORE, 139768865730560, 139768865734655,
+STORE, 139768865734656, 139768865738751,
+SNULL, 94865905291263, 94865905299455,
+STORE, 94865905287168, 94865905291263,
+STORE, 94865905291264, 94865905299455,
+SNULL, 139768867983359, 139768867987455,
+STORE, 139768867979264, 139768867983359,
+STORE, 139768867983360, 139768867987455,
+ERASE, 139768867950592, 139768867979263,
+STORE, 94865923670016, 94865923805183,
+STORE, 139768866250752, 139768867934207,
+STORE, 94865923670016, 94865923940351,
+STORE, 94865923670016, 94865924075519,
+STORE, 94865923670016, 94865924222975,
+SNULL, 94865924210687, 94865924222975,
+STORE, 94865923670016, 94865924210687,
+STORE, 94865924210688, 94865924222975,
+ERASE, 94865924210688, 94865924222975,
+STORE, 94865923670016, 94865924349951,
+STORE, 94865923670016, 94865924493311,
+STORE, 94865923670016, 94865924640767,
+SNULL, 94865924603903, 94865924640767,
+STORE, 94865923670016, 94865924603903,
+STORE, 94865924603904, 94865924640767,
+ERASE, 94865924603904, 94865924640767,
+STORE, 94865923670016, 94865924747263,
+STORE, 94865923670016, 94865924898815,
+SNULL, 94865924874239, 94865924898815,
+STORE, 94865923670016, 94865924874239,
+STORE, 94865924874240, 94865924898815,
+ERASE, 94865924874240, 94865924898815,
+STORE, 94865923670016, 94865925025791,
+SNULL, 94865925013503, 94865925025791,
+STORE, 94865923670016, 94865925013503,
+STORE, 94865925013504, 94865925025791,
+ERASE, 94865925013504, 94865925025791,
+SNULL, 94865924988927, 94865925013503,
+STORE, 94865923670016, 94865924988927,
+STORE, 94865924988928, 94865925013503,
+ERASE, 94865924988928, 94865925013503,
+STORE, 94865923670016, 94865925152767,
+SNULL, 94865925136383, 94865925152767,
+STORE, 94865923670016, 94865925136383,
+STORE, 94865925136384, 94865925152767,
+ERASE, 94865925136384, 94865925152767,
+STORE, 94865923670016, 94865925292031,
+SNULL, 94865925279743, 94865925292031,
+STORE, 94865923670016, 94865925279743,
+STORE, 94865925279744, 94865925292031,
+ERASE, 94865925279744, 94865925292031,
+SNULL, 94865925255167, 94865925279743,
+STORE, 94865923670016, 94865925255167,
+STORE, 94865925255168, 94865925279743,
+ERASE, 94865925255168, 94865925279743,
+STORE, 94865923670016, 94865925406719,
+SNULL, 94865925394431, 94865925406719,
+STORE, 94865923670016, 94865925394431,
+STORE, 94865925394432, 94865925406719,
+ERASE, 94865925394432, 94865925406719,
+STORE, 94865923670016, 94865925545983,
+SNULL, 94865925533695, 94865925545983,
+STORE, 94865923670016, 94865925533695,
+STORE, 94865925533696, 94865925545983,
+ERASE, 94865925533696, 94865925545983,
+SNULL, 94865925492735, 94865925533695,
+STORE, 94865923670016, 94865925492735,
+STORE, 94865925492736, 94865925533695,
+ERASE, 94865925492736, 94865925533695,
+STORE, 94865923670016, 94865925627903,
+SNULL, 94865925599231, 94865925627903,
+STORE, 94865923670016, 94865925599231,
+STORE, 94865925599232, 94865925627903,
+ERASE, 94865925599232, 94865925627903,
+STORE, 94865923670016, 94865925738495,
+SNULL, 94865925726207, 94865925738495,
+STORE, 94865923670016, 94865925726207,
+STORE, 94865925726208, 94865925738495,
+ERASE, 94865925726208, 94865925738495,
+STORE, 94865923670016, 94865925877759,
+SNULL, 94865925865471, 94865925877759,
+STORE, 94865923670016, 94865925865471,
+STORE, 94865925865472, 94865925877759,
+ERASE, 94865925865472, 94865925877759,
+STORE, 94865923670016, 94865926021119,
+SNULL, 94865926008831, 94865926021119,
+STORE, 94865923670016, 94865926008831,
+STORE, 94865926008832, 94865926021119,
+ERASE, 94865926008832, 94865926021119,
+SNULL, 94865925971967, 94865926008831,
+STORE, 94865923670016, 94865925971967,
+STORE, 94865925971968, 94865926008831,
+ERASE, 94865925971968, 94865926008831,
+STORE, 94865923670016, 94865926115327,
+STORE, 94865923670016, 94865926254591,
+SNULL, 94865926246399, 94865926254591,
+STORE, 94865923670016, 94865926246399,
+STORE, 94865926246400, 94865926254591,
+ERASE, 94865926246400, 94865926254591,
+STORE, 94865923670016, 94865926385663,
+STORE, 94865923670016, 94865926537215,
+STORE, 94865923670016, 94865926672383,
+STORE, 94865923670016, 94865926815743,
+STORE, 94865923670016, 94865926955007,
+STORE, 94865923670016, 94865927094271,
+STORE, 94865923670016, 94865927233535,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731148435456, 140737488351231,
+SNULL, 140731148443647, 140737488351231,
+STORE, 140731148435456, 140731148443647,
+STORE, 140731148304384, 140731148443647,
+STORE, 94090775400448, 94090777735167,
+SNULL, 94090775613439, 94090777735167,
+STORE, 94090775400448, 94090775613439,
+STORE, 94090775613440, 94090777735167,
+ERASE, 94090775613440, 94090777735167,
+STORE, 94090777710592, 94090777722879,
+STORE, 94090777722880, 94090777735167,
+STORE, 140301090283520, 140301092536319,
+SNULL, 140301090426879, 140301092536319,
+STORE, 140301090283520, 140301090426879,
+STORE, 140301090426880, 140301092536319,
+ERASE, 140301090426880, 140301092536319,
+STORE, 140301092524032, 140301092532223,
+STORE, 140301092532224, 140301092536319,
+STORE, 140731148570624, 140731148574719,
+STORE, 140731148558336, 140731148570623,
+STORE, 140301092495360, 140301092524031,
+STORE, 140301092487168, 140301092495359,
+STORE, 140301088169984, 140301090283519,
+SNULL, 140301088169984, 140301088182271,
+STORE, 140301088182272, 140301090283519,
+STORE, 140301088169984, 140301088182271,
+SNULL, 140301090275327, 140301090283519,
+STORE, 140301088182272, 140301090275327,
+STORE, 140301090275328, 140301090283519,
+ERASE, 140301090275328, 140301090283519,
+STORE, 140301090275328, 140301090283519,
+STORE, 140301084372992, 140301088169983,
+SNULL, 140301084372992, 140301086031871,
+STORE, 140301086031872, 140301088169983,
+STORE, 140301084372992, 140301086031871,
+SNULL, 140301088129023, 140301088169983,
+STORE, 140301086031872, 140301088129023,
+STORE, 140301088129024, 140301088169983,
+SNULL, 140301088129024, 140301088153599,
+STORE, 140301088153600, 140301088169983,
+STORE, 140301088129024, 140301088153599,
+ERASE, 140301088129024, 140301088153599,
+STORE, 140301088129024, 140301088153599,
+ERASE, 140301088153600, 140301088169983,
+STORE, 140301088153600, 140301088169983,
+STORE, 140301092478976, 140301092495359,
+SNULL, 140301088145407, 140301088153599,
+STORE, 140301088129024, 140301088145407,
+STORE, 140301088145408, 140301088153599,
+SNULL, 140301090279423, 140301090283519,
+STORE, 140301090275328, 140301090279423,
+STORE, 140301090279424, 140301090283519,
+SNULL, 94090777714687, 94090777722879,
+STORE, 94090777710592, 94090777714687,
+STORE, 94090777714688, 94090777722879,
+SNULL, 140301092528127, 140301092532223,
+STORE, 140301092524032, 140301092528127,
+STORE, 140301092528128, 140301092532223,
+ERASE, 140301092495360, 140301092524031,
+STORE, 94090794590208, 94090794725375,
+STORE, 140301090795520, 140301092478975,
+STORE, 94090794590208, 94090794860543,
+STORE, 94090794590208, 94090794995711,
+STORE, 94090794590208, 94090795163647,
+SNULL, 94090795139071, 94090795163647,
+STORE, 94090794590208, 94090795139071,
+STORE, 94090795139072, 94090795163647,
+ERASE, 94090795139072, 94090795163647,
+STORE, 94090794590208, 94090795278335,
+STORE, 94090794590208, 94090795425791,
+SNULL, 94090795388927, 94090795425791,
+STORE, 94090794590208, 94090795388927,
+STORE, 94090795388928, 94090795425791,
+ERASE, 94090795388928, 94090795425791,
+STORE, 94090794590208, 94090795528191,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733084430336, 140737488351231,
+SNULL, 140733084438527, 140737488351231,
+STORE, 140733084430336, 140733084438527,
+STORE, 140733084299264, 140733084438527,
+STORE, 94116169183232, 94116171517951,
+SNULL, 94116169396223, 94116171517951,
+STORE, 94116169183232, 94116169396223,
+STORE, 94116169396224, 94116171517951,
+ERASE, 94116169396224, 94116171517951,
+STORE, 94116171493376, 94116171505663,
+STORE, 94116171505664, 94116171517951,
+STORE, 139772214128640, 139772216381439,
+SNULL, 139772214271999, 139772216381439,
+STORE, 139772214128640, 139772214271999,
+STORE, 139772214272000, 139772216381439,
+ERASE, 139772214272000, 139772216381439,
+STORE, 139772216369152, 139772216377343,
+STORE, 139772216377344, 139772216381439,
+STORE, 140733085270016, 140733085274111,
+STORE, 140733085257728, 140733085270015,
+STORE, 139772216340480, 139772216369151,
+STORE, 139772216332288, 139772216340479,
+STORE, 139772212015104, 139772214128639,
+SNULL, 139772212015104, 139772212027391,
+STORE, 139772212027392, 139772214128639,
+STORE, 139772212015104, 139772212027391,
+SNULL, 139772214120447, 139772214128639,
+STORE, 139772212027392, 139772214120447,
+STORE, 139772214120448, 139772214128639,
+ERASE, 139772214120448, 139772214128639,
+STORE, 139772214120448, 139772214128639,
+STORE, 139772208218112, 139772212015103,
+SNULL, 139772208218112, 139772209876991,
+STORE, 139772209876992, 139772212015103,
+STORE, 139772208218112, 139772209876991,
+SNULL, 139772211974143, 139772212015103,
+STORE, 139772209876992, 139772211974143,
+STORE, 139772211974144, 139772212015103,
+SNULL, 139772211974144, 139772211998719,
+STORE, 139772211998720, 139772212015103,
+STORE, 139772211974144, 139772211998719,
+ERASE, 139772211974144, 139772211998719,
+STORE, 139772211974144, 139772211998719,
+ERASE, 139772211998720, 139772212015103,
+STORE, 139772211998720, 139772212015103,
+STORE, 139772216324096, 139772216340479,
+SNULL, 139772211990527, 139772211998719,
+STORE, 139772211974144, 139772211990527,
+STORE, 139772211990528, 139772211998719,
+SNULL, 139772214124543, 139772214128639,
+STORE, 139772214120448, 139772214124543,
+STORE, 139772214124544, 139772214128639,
+SNULL, 94116171497471, 94116171505663,
+STORE, 94116171493376, 94116171497471,
+STORE, 94116171497472, 94116171505663,
+SNULL, 139772216373247, 139772216377343,
+STORE, 139772216369152, 139772216373247,
+STORE, 139772216373248, 139772216377343,
+ERASE, 139772216340480, 139772216369151,
+STORE, 94116199383040, 94116199518207,
+STORE, 139772214640640, 139772216324095,
+STORE, 94116199383040, 94116199653375,
+STORE, 94116199383040, 94116199788543,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726067826688, 140737488351231,
+SNULL, 140726067830783, 140737488351231,
+STORE, 140726067826688, 140726067830783,
+STORE, 140726067695616, 140726067830783,
+STORE, 94535150673920, 94535152898047,
+SNULL, 94535150784511, 94535152898047,
+STORE, 94535150673920, 94535150784511,
+STORE, 94535150784512, 94535152898047,
+ERASE, 94535150784512, 94535152898047,
+STORE, 94535152877568, 94535152889855,
+STORE, 94535152889856, 94535152898047,
+STORE, 140381257314304, 140381259567103,
+SNULL, 140381257457663, 140381259567103,
+STORE, 140381257314304, 140381257457663,
+STORE, 140381257457664, 140381259567103,
+ERASE, 140381257457664, 140381259567103,
+STORE, 140381259554816, 140381259563007,
+STORE, 140381259563008, 140381259567103,
+STORE, 140726068060160, 140726068064255,
+STORE, 140726068047872, 140726068060159,
+STORE, 140381259526144, 140381259554815,
+STORE, 140381259517952, 140381259526143,
+STORE, 140381253517312, 140381257314303,
+SNULL, 140381253517312, 140381255176191,
+STORE, 140381255176192, 140381257314303,
+STORE, 140381253517312, 140381255176191,
+SNULL, 140381257273343, 140381257314303,
+STORE, 140381255176192, 140381257273343,
+STORE, 140381257273344, 140381257314303,
+SNULL, 140381257273344, 140381257297919,
+STORE, 140381257297920, 140381257314303,
+STORE, 140381257273344, 140381257297919,
+ERASE, 140381257273344, 140381257297919,
+STORE, 140381257273344, 140381257297919,
+ERASE, 140381257297920, 140381257314303,
+STORE, 140381257297920, 140381257314303,
+SNULL, 140381257289727, 140381257297919,
+STORE, 140381257273344, 140381257289727,
+STORE, 140381257289728, 140381257297919,
+SNULL, 94535152885759, 94535152889855,
+STORE, 94535152877568, 94535152885759,
+STORE, 94535152885760, 94535152889855,
+SNULL, 140381259558911, 140381259563007,
+STORE, 140381259554816, 140381259558911,
+STORE, 140381259558912, 140381259563007,
+ERASE, 140381259526144, 140381259554815,
+STORE, 94535186296832, 94535186431999,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729189425152, 140737488351231,
+SNULL, 140729189433343, 140737488351231,
+STORE, 140729189425152, 140729189433343,
+STORE, 140729189294080, 140729189433343,
+STORE, 94428200128512, 94428202352639,
+SNULL, 94428200239103, 94428202352639,
+STORE, 94428200128512, 94428200239103,
+STORE, 94428200239104, 94428202352639,
+ERASE, 94428200239104, 94428202352639,
+STORE, 94428202332160, 94428202344447,
+STORE, 94428202344448, 94428202352639,
+STORE, 139707216986112, 139707219238911,
+SNULL, 139707217129471, 139707219238911,
+STORE, 139707216986112, 139707217129471,
+STORE, 139707217129472, 139707219238911,
+ERASE, 139707217129472, 139707219238911,
+STORE, 139707219226624, 139707219234815,
+STORE, 139707219234816, 139707219238911,
+STORE, 140729189785600, 140729189789695,
+STORE, 140729189773312, 140729189785599,
+STORE, 139707219197952, 139707219226623,
+STORE, 139707219189760, 139707219197951,
+STORE, 139707213189120, 139707216986111,
+SNULL, 139707213189120, 139707214847999,
+STORE, 139707214848000, 139707216986111,
+STORE, 139707213189120, 139707214847999,
+SNULL, 139707216945151, 139707216986111,
+STORE, 139707214848000, 139707216945151,
+STORE, 139707216945152, 139707216986111,
+SNULL, 139707216945152, 139707216969727,
+STORE, 139707216969728, 139707216986111,
+STORE, 139707216945152, 139707216969727,
+ERASE, 139707216945152, 139707216969727,
+STORE, 139707216945152, 139707216969727,
+ERASE, 139707216969728, 139707216986111,
+STORE, 139707216969728, 139707216986111,
+SNULL, 139707216961535, 139707216969727,
+STORE, 139707216945152, 139707216961535,
+STORE, 139707216961536, 139707216969727,
+SNULL, 94428202340351, 94428202344447,
+STORE, 94428202332160, 94428202340351,
+STORE, 94428202340352, 94428202344447,
+SNULL, 139707219230719, 139707219234815,
+STORE, 139707219226624, 139707219230719,
+STORE, 139707219230720, 139707219234815,
+ERASE, 139707219197952, 139707219226623,
+STORE, 94428208599040, 94428208734207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722000953344, 140737488351231,
+SNULL, 140722000961535, 140737488351231,
+STORE, 140722000953344, 140722000961535,
+STORE, 140722000822272, 140722000961535,
+STORE, 94636494757888, 94636496982015,
+SNULL, 94636494868479, 94636496982015,
+STORE, 94636494757888, 94636494868479,
+STORE, 94636494868480, 94636496982015,
+ERASE, 94636494868480, 94636496982015,
+STORE, 94636496961536, 94636496973823,
+STORE, 94636496973824, 94636496982015,
+STORE, 140142275100672, 140142277353471,
+SNULL, 140142275244031, 140142277353471,
+STORE, 140142275100672, 140142275244031,
+STORE, 140142275244032, 140142277353471,
+ERASE, 140142275244032, 140142277353471,
+STORE, 140142277341184, 140142277349375,
+STORE, 140142277349376, 140142277353471,
+STORE, 140722002747392, 140722002751487,
+STORE, 140722002735104, 140722002747391,
+STORE, 140142277312512, 140142277341183,
+STORE, 140142277304320, 140142277312511,
+STORE, 140142271303680, 140142275100671,
+SNULL, 140142271303680, 140142272962559,
+STORE, 140142272962560, 140142275100671,
+STORE, 140142271303680, 140142272962559,
+SNULL, 140142275059711, 140142275100671,
+STORE, 140142272962560, 140142275059711,
+STORE, 140142275059712, 140142275100671,
+SNULL, 140142275059712, 140142275084287,
+STORE, 140142275084288, 140142275100671,
+STORE, 140142275059712, 140142275084287,
+ERASE, 140142275059712, 140142275084287,
+STORE, 140142275059712, 140142275084287,
+ERASE, 140142275084288, 140142275100671,
+STORE, 140142275084288, 140142275100671,
+SNULL, 140142275076095, 140142275084287,
+STORE, 140142275059712, 140142275076095,
+STORE, 140142275076096, 140142275084287,
+SNULL, 94636496969727, 94636496973823,
+STORE, 94636496961536, 94636496969727,
+STORE, 94636496969728, 94636496973823,
+SNULL, 140142277345279, 140142277349375,
+STORE, 140142277341184, 140142277345279,
+STORE, 140142277345280, 140142277349375,
+ERASE, 140142277312512, 140142277341183,
+STORE, 94636516286464, 94636516421631,
+STORE, 94071103692800, 94071103905791,
+STORE, 94071106002944, 94071106007039,
+STORE, 94071106007040, 94071106015231,
+STORE, 94071106015232, 94071106027519,
+STORE, 94071138521088, 94071140368383,
+STORE, 140145668190208, 140145669849087,
+STORE, 140145669849088, 140145671946239,
+STORE, 140145671946240, 140145671962623,
+STORE, 140145671962624, 140145671970815,
+STORE, 140145671970816, 140145671987199,
+STORE, 140145671987200, 140145671999487,
+STORE, 140145671999488, 140145674092543,
+STORE, 140145674092544, 140145674096639,
+STORE, 140145674096640, 140145674100735,
+STORE, 140145674100736, 140145674244095,
+STORE, 140145674612736, 140145676296191,
+STORE, 140145676296192, 140145676312575,
+STORE, 140145676341248, 140145676345343,
+STORE, 140145676345344, 140145676349439,
+STORE, 140145676349440, 140145676353535,
+STORE, 140734927740928, 140734927880191,
+STORE, 140734928842752, 140734928855039,
+STORE, 140734928855040, 140734928859135,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722342535168, 140737488351231,
+SNULL, 140722342543359, 140737488351231,
+STORE, 140722342535168, 140722342543359,
+STORE, 140722342404096, 140722342543359,
+STORE, 94399699714048, 94399702048767,
+SNULL, 94399699927039, 94399702048767,
+STORE, 94399699714048, 94399699927039,
+STORE, 94399699927040, 94399702048767,
+ERASE, 94399699927040, 94399702048767,
+STORE, 94399702024192, 94399702036479,
+STORE, 94399702036480, 94399702048767,
+STORE, 139811024748544, 139811027001343,
+SNULL, 139811024891903, 139811027001343,
+STORE, 139811024748544, 139811024891903,
+STORE, 139811024891904, 139811027001343,
+ERASE, 139811024891904, 139811027001343,
+STORE, 139811026989056, 139811026997247,
+STORE, 139811026997248, 139811027001343,
+STORE, 140722342707200, 140722342711295,
+STORE, 140722342694912, 140722342707199,
+STORE, 139811026960384, 139811026989055,
+STORE, 139811026952192, 139811026960383,
+STORE, 139811022635008, 139811024748543,
+SNULL, 139811022635008, 139811022647295,
+STORE, 139811022647296, 139811024748543,
+STORE, 139811022635008, 139811022647295,
+SNULL, 139811024740351, 139811024748543,
+STORE, 139811022647296, 139811024740351,
+STORE, 139811024740352, 139811024748543,
+ERASE, 139811024740352, 139811024748543,
+STORE, 139811024740352, 139811024748543,
+STORE, 139811018838016, 139811022635007,
+SNULL, 139811018838016, 139811020496895,
+STORE, 139811020496896, 139811022635007,
+STORE, 139811018838016, 139811020496895,
+SNULL, 139811022594047, 139811022635007,
+STORE, 139811020496896, 139811022594047,
+STORE, 139811022594048, 139811022635007,
+SNULL, 139811022594048, 139811022618623,
+STORE, 139811022618624, 139811022635007,
+STORE, 139811022594048, 139811022618623,
+ERASE, 139811022594048, 139811022618623,
+STORE, 139811022594048, 139811022618623,
+ERASE, 139811022618624, 139811022635007,
+STORE, 139811022618624, 139811022635007,
+STORE, 139811026944000, 139811026960383,
+SNULL, 139811022610431, 139811022618623,
+STORE, 139811022594048, 139811022610431,
+STORE, 139811022610432, 139811022618623,
+SNULL, 139811024744447, 139811024748543,
+STORE, 139811024740352, 139811024744447,
+STORE, 139811024744448, 139811024748543,
+SNULL, 94399702028287, 94399702036479,
+STORE, 94399702024192, 94399702028287,
+STORE, 94399702028288, 94399702036479,
+SNULL, 139811026993151, 139811026997247,
+STORE, 139811026989056, 139811026993151,
+STORE, 139811026993152, 139811026997247,
+ERASE, 139811026960384, 139811026989055,
+STORE, 94399723880448, 94399724015615,
+STORE, 139811025260544, 139811026943999,
+STORE, 94399723880448, 94399724150783,
+STORE, 94399723880448, 94399724285951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735364939776, 140737488351231,
+SNULL, 140735364947967, 140737488351231,
+STORE, 140735364939776, 140735364947967,
+STORE, 140735364808704, 140735364947967,
+STORE, 94421528674304, 94421531009023,
+SNULL, 94421528887295, 94421531009023,
+STORE, 94421528674304, 94421528887295,
+STORE, 94421528887296, 94421531009023,
+ERASE, 94421528887296, 94421531009023,
+STORE, 94421530984448, 94421530996735,
+STORE, 94421530996736, 94421531009023,
+STORE, 140162004742144, 140162006994943,
+SNULL, 140162004885503, 140162006994943,
+STORE, 140162004742144, 140162004885503,
+STORE, 140162004885504, 140162006994943,
+ERASE, 140162004885504, 140162006994943,
+STORE, 140162006982656, 140162006990847,
+STORE, 140162006990848, 140162006994943,
+STORE, 140735365402624, 140735365406719,
+STORE, 140735365390336, 140735365402623,
+STORE, 140162006953984, 140162006982655,
+STORE, 140162006945792, 140162006953983,
+STORE, 140162002628608, 140162004742143,
+SNULL, 140162002628608, 140162002640895,
+STORE, 140162002640896, 140162004742143,
+STORE, 140162002628608, 140162002640895,
+SNULL, 140162004733951, 140162004742143,
+STORE, 140162002640896, 140162004733951,
+STORE, 140162004733952, 140162004742143,
+ERASE, 140162004733952, 140162004742143,
+STORE, 140162004733952, 140162004742143,
+STORE, 140161998831616, 140162002628607,
+SNULL, 140161998831616, 140162000490495,
+STORE, 140162000490496, 140162002628607,
+STORE, 140161998831616, 140162000490495,
+SNULL, 140162002587647, 140162002628607,
+STORE, 140162000490496, 140162002587647,
+STORE, 140162002587648, 140162002628607,
+SNULL, 140162002587648, 140162002612223,
+STORE, 140162002612224, 140162002628607,
+STORE, 140162002587648, 140162002612223,
+ERASE, 140162002587648, 140162002612223,
+STORE, 140162002587648, 140162002612223,
+ERASE, 140162002612224, 140162002628607,
+STORE, 140162002612224, 140162002628607,
+STORE, 140162006937600, 140162006953983,
+SNULL, 140162002604031, 140162002612223,
+STORE, 140162002587648, 140162002604031,
+STORE, 140162002604032, 140162002612223,
+SNULL, 140162004738047, 140162004742143,
+STORE, 140162004733952, 140162004738047,
+STORE, 140162004738048, 140162004742143,
+SNULL, 94421530988543, 94421530996735,
+STORE, 94421530984448, 94421530988543,
+STORE, 94421530988544, 94421530996735,
+SNULL, 140162006986751, 140162006990847,
+STORE, 140162006982656, 140162006986751,
+STORE, 140162006986752, 140162006990847,
+ERASE, 140162006953984, 140162006982655,
+STORE, 94421551697920, 94421551833087,
+STORE, 140162005254144, 140162006937599,
+STORE, 94421551697920, 94421551968255,
+STORE, 94421551697920, 94421552103423,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733498486784, 140737488351231,
+SNULL, 140733498494975, 140737488351231,
+STORE, 140733498486784, 140733498494975,
+STORE, 140733498355712, 140733498494975,
+STORE, 94567985836032, 94567988170751,
+SNULL, 94567986049023, 94567988170751,
+STORE, 94567985836032, 94567986049023,
+STORE, 94567986049024, 94567988170751,
+ERASE, 94567986049024, 94567988170751,
+STORE, 94567988146176, 94567988158463,
+STORE, 94567988158464, 94567988170751,
+STORE, 139634278572032, 139634280824831,
+SNULL, 139634278715391, 139634280824831,
+STORE, 139634278572032, 139634278715391,
+STORE, 139634278715392, 139634280824831,
+ERASE, 139634278715392, 139634280824831,
+STORE, 139634280812544, 139634280820735,
+STORE, 139634280820736, 139634280824831,
+STORE, 140733498544128, 140733498548223,
+STORE, 140733498531840, 140733498544127,
+STORE, 139634280783872, 139634280812543,
+STORE, 139634280775680, 139634280783871,
+STORE, 139634276458496, 139634278572031,
+SNULL, 139634276458496, 139634276470783,
+STORE, 139634276470784, 139634278572031,
+STORE, 139634276458496, 139634276470783,
+SNULL, 139634278563839, 139634278572031,
+STORE, 139634276470784, 139634278563839,
+STORE, 139634278563840, 139634278572031,
+ERASE, 139634278563840, 139634278572031,
+STORE, 139634278563840, 139634278572031,
+STORE, 139634272661504, 139634276458495,
+SNULL, 139634272661504, 139634274320383,
+STORE, 139634274320384, 139634276458495,
+STORE, 139634272661504, 139634274320383,
+SNULL, 139634276417535, 139634276458495,
+STORE, 139634274320384, 139634276417535,
+STORE, 139634276417536, 139634276458495,
+SNULL, 139634276417536, 139634276442111,
+STORE, 139634276442112, 139634276458495,
+STORE, 139634276417536, 139634276442111,
+ERASE, 139634276417536, 139634276442111,
+STORE, 139634276417536, 139634276442111,
+ERASE, 139634276442112, 139634276458495,
+STORE, 139634276442112, 139634276458495,
+STORE, 139634280767488, 139634280783871,
+SNULL, 139634276433919, 139634276442111,
+STORE, 139634276417536, 139634276433919,
+STORE, 139634276433920, 139634276442111,
+SNULL, 139634278567935, 139634278572031,
+STORE, 139634278563840, 139634278567935,
+STORE, 139634278567936, 139634278572031,
+SNULL, 94567988150271, 94567988158463,
+STORE, 94567988146176, 94567988150271,
+STORE, 94567988150272, 94567988158463,
+SNULL, 139634280816639, 139634280820735,
+STORE, 139634280812544, 139634280816639,
+STORE, 139634280816640, 139634280820735,
+ERASE, 139634280783872, 139634280812543,
+STORE, 94567996379136, 94567996514303,
+STORE, 139634279084032, 139634280767487,
+STORE, 94567996379136, 94567996649471,
+STORE, 94567996379136, 94567996784639,
+STORE, 94567996379136, 94567996960767,
+SNULL, 94567996932095, 94567996960767,
+STORE, 94567996379136, 94567996932095,
+STORE, 94567996932096, 94567996960767,
+ERASE, 94567996932096, 94567996960767,
+STORE, 94567996379136, 94567997071359,
+STORE, 94567996379136, 94567997206527,
+SNULL, 94567997186047, 94567997206527,
+STORE, 94567996379136, 94567997186047,
+STORE, 94567997186048, 94567997206527,
+ERASE, 94567997186048, 94567997206527,
+STORE, 94567996379136, 94567997358079,
+STORE, 94567996379136, 94567997493247,
+SNULL, 94567997476863, 94567997493247,
+STORE, 94567996379136, 94567997476863,
+STORE, 94567997476864, 94567997493247,
+ERASE, 94567997476864, 94567997493247,
+STORE, 94567996379136, 94567997612031,
+STORE, 94567996379136, 94567997767679,
+SNULL, 94567997739007, 94567997767679,
+STORE, 94567996379136, 94567997739007,
+STORE, 94567997739008, 94567997767679,
+ERASE, 94567997739008, 94567997767679,
+SNULL, 94567997698047, 94567997739007,
+STORE, 94567996379136, 94567997698047,
+STORE, 94567997698048, 94567997739007,
+ERASE, 94567997698048, 94567997739007,
+STORE, 94567996379136, 94567997853695,
+STORE, 94567996379136, 94567997988863,
+STORE, 94567996379136, 94567998132223,
+STORE, 94567996379136, 94567998275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723667759104, 140737488351231,
+SNULL, 140723667767295, 140737488351231,
+STORE, 140723667759104, 140723667767295,
+STORE, 140723667628032, 140723667767295,
+STORE, 94231598800896, 94231601135615,
+SNULL, 94231599013887, 94231601135615,
+STORE, 94231598800896, 94231599013887,
+STORE, 94231599013888, 94231601135615,
+ERASE, 94231599013888, 94231601135615,
+STORE, 94231601111040, 94231601123327,
+STORE, 94231601123328, 94231601135615,
+STORE, 140269472649216, 140269474902015,
+SNULL, 140269472792575, 140269474902015,
+STORE, 140269472649216, 140269472792575,
+STORE, 140269472792576, 140269474902015,
+ERASE, 140269472792576, 140269474902015,
+STORE, 140269474889728, 140269474897919,
+STORE, 140269474897920, 140269474902015,
+STORE, 140723667836928, 140723667841023,
+STORE, 140723667824640, 140723667836927,
+STORE, 140269474861056, 140269474889727,
+STORE, 140269474852864, 140269474861055,
+STORE, 140269470535680, 140269472649215,
+SNULL, 140269470535680, 140269470547967,
+STORE, 140269470547968, 140269472649215,
+STORE, 140269470535680, 140269470547967,
+SNULL, 140269472641023, 140269472649215,
+STORE, 140269470547968, 140269472641023,
+STORE, 140269472641024, 140269472649215,
+ERASE, 140269472641024, 140269472649215,
+STORE, 140269472641024, 140269472649215,
+STORE, 140269466738688, 140269470535679,
+SNULL, 140269466738688, 140269468397567,
+STORE, 140269468397568, 140269470535679,
+STORE, 140269466738688, 140269468397567,
+SNULL, 140269470494719, 140269470535679,
+STORE, 140269468397568, 140269470494719,
+STORE, 140269470494720, 140269470535679,
+SNULL, 140269470494720, 140269470519295,
+STORE, 140269470519296, 140269470535679,
+STORE, 140269470494720, 140269470519295,
+ERASE, 140269470494720, 140269470519295,
+STORE, 140269470494720, 140269470519295,
+ERASE, 140269470519296, 140269470535679,
+STORE, 140269470519296, 140269470535679,
+STORE, 140269474844672, 140269474861055,
+SNULL, 140269470511103, 140269470519295,
+STORE, 140269470494720, 140269470511103,
+STORE, 140269470511104, 140269470519295,
+SNULL, 140269472645119, 140269472649215,
+STORE, 140269472641024, 140269472645119,
+STORE, 140269472645120, 140269472649215,
+SNULL, 94231601115135, 94231601123327,
+STORE, 94231601111040, 94231601115135,
+STORE, 94231601115136, 94231601123327,
+SNULL, 140269474893823, 140269474897919,
+STORE, 140269474889728, 140269474893823,
+STORE, 140269474893824, 140269474897919,
+ERASE, 140269474861056, 140269474889727,
+STORE, 94231626592256, 94231626727423,
+STORE, 140269473161216, 140269474844671,
+STORE, 94231626592256, 94231626862591,
+STORE, 94231626592256, 94231626997759,
+STORE, 94327178862592, 94327179075583,
+STORE, 94327181172736, 94327181176831,
+STORE, 94327181176832, 94327181185023,
+STORE, 94327181185024, 94327181197311,
+STORE, 94327185715200, 94327186685951,
+STORE, 140172071755776, 140172073414655,
+STORE, 140172073414656, 140172075511807,
+STORE, 140172075511808, 140172075528191,
+STORE, 140172075528192, 140172075536383,
+STORE, 140172075536384, 140172075552767,
+STORE, 140172075552768, 140172075565055,
+STORE, 140172075565056, 140172077658111,
+STORE, 140172077658112, 140172077662207,
+STORE, 140172077662208, 140172077666303,
+STORE, 140172077666304, 140172077809663,
+STORE, 140172078178304, 140172079861759,
+STORE, 140172079861760, 140172079878143,
+STORE, 140172079878144, 140172079906815,
+STORE, 140172079906816, 140172079910911,
+STORE, 140172079910912, 140172079915007,
+STORE, 140172079915008, 140172079919103,
+STORE, 140720358359040, 140720358494207,
+STORE, 140720358498304, 140720358510591,
+STORE, 140720358510592, 140720358514687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722548621312, 140737488351231,
+SNULL, 140722548629503, 140737488351231,
+STORE, 140722548621312, 140722548629503,
+STORE, 140722548490240, 140722548629503,
+STORE, 93949289504768, 93949291728895,
+SNULL, 93949289615359, 93949291728895,
+STORE, 93949289504768, 93949289615359,
+STORE, 93949289615360, 93949291728895,
+ERASE, 93949289615360, 93949291728895,
+STORE, 93949291708416, 93949291720703,
+STORE, 93949291720704, 93949291728895,
+STORE, 140305861902336, 140305864155135,
+SNULL, 140305862045695, 140305864155135,
+STORE, 140305861902336, 140305862045695,
+STORE, 140305862045696, 140305864155135,
+ERASE, 140305862045696, 140305864155135,
+STORE, 140305864142848, 140305864151039,
+STORE, 140305864151040, 140305864155135,
+STORE, 140722549821440, 140722549825535,
+STORE, 140722549809152, 140722549821439,
+STORE, 140305864114176, 140305864142847,
+STORE, 140305864105984, 140305864114175,
+STORE, 140305858105344, 140305861902335,
+SNULL, 140305858105344, 140305859764223,
+STORE, 140305859764224, 140305861902335,
+STORE, 140305858105344, 140305859764223,
+SNULL, 140305861861375, 140305861902335,
+STORE, 140305859764224, 140305861861375,
+STORE, 140305861861376, 140305861902335,
+SNULL, 140305861861376, 140305861885951,
+STORE, 140305861885952, 140305861902335,
+STORE, 140305861861376, 140305861885951,
+ERASE, 140305861861376, 140305861885951,
+STORE, 140305861861376, 140305861885951,
+ERASE, 140305861885952, 140305861902335,
+STORE, 140305861885952, 140305861902335,
+SNULL, 140305861877759, 140305861885951,
+STORE, 140305861861376, 140305861877759,
+STORE, 140305861877760, 140305861885951,
+SNULL, 93949291716607, 93949291720703,
+STORE, 93949291708416, 93949291716607,
+STORE, 93949291716608, 93949291720703,
+SNULL, 140305864146943, 140305864151039,
+STORE, 140305864142848, 140305864146943,
+STORE, 140305864146944, 140305864151039,
+ERASE, 140305864114176, 140305864142847,
+STORE, 93949324136448, 93949324271615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725754908672, 140737488351231,
+SNULL, 140725754916863, 140737488351231,
+STORE, 140725754908672, 140725754916863,
+STORE, 140725754777600, 140725754916863,
+STORE, 94831184375808, 94831186599935,
+SNULL, 94831184486399, 94831186599935,
+STORE, 94831184375808, 94831184486399,
+STORE, 94831184486400, 94831186599935,
+ERASE, 94831184486400, 94831186599935,
+STORE, 94831186579456, 94831186591743,
+STORE, 94831186591744, 94831186599935,
+STORE, 140605482479616, 140605484732415,
+SNULL, 140605482622975, 140605484732415,
+STORE, 140605482479616, 140605482622975,
+STORE, 140605482622976, 140605484732415,
+ERASE, 140605482622976, 140605484732415,
+STORE, 140605484720128, 140605484728319,
+STORE, 140605484728320, 140605484732415,
+STORE, 140725755670528, 140725755674623,
+STORE, 140725755658240, 140725755670527,
+STORE, 140605484691456, 140605484720127,
+STORE, 140605484683264, 140605484691455,
+STORE, 140605478682624, 140605482479615,
+SNULL, 140605478682624, 140605480341503,
+STORE, 140605480341504, 140605482479615,
+STORE, 140605478682624, 140605480341503,
+SNULL, 140605482438655, 140605482479615,
+STORE, 140605480341504, 140605482438655,
+STORE, 140605482438656, 140605482479615,
+SNULL, 140605482438656, 140605482463231,
+STORE, 140605482463232, 140605482479615,
+STORE, 140605482438656, 140605482463231,
+ERASE, 140605482438656, 140605482463231,
+STORE, 140605482438656, 140605482463231,
+ERASE, 140605482463232, 140605482479615,
+STORE, 140605482463232, 140605482479615,
+SNULL, 140605482455039, 140605482463231,
+STORE, 140605482438656, 140605482455039,
+STORE, 140605482455040, 140605482463231,
+SNULL, 94831186587647, 94831186591743,
+STORE, 94831186579456, 94831186587647,
+STORE, 94831186587648, 94831186591743,
+SNULL, 140605484724223, 140605484728319,
+STORE, 140605484720128, 140605484724223,
+STORE, 140605484724224, 140605484728319,
+ERASE, 140605484691456, 140605484720127,
+STORE, 94831217156096, 94831217291263,
+STORE, 94327178862592, 94327179075583,
+STORE, 94327181172736, 94327181176831,
+STORE, 94327181176832, 94327181185023,
+STORE, 94327181185024, 94327181197311,
+STORE, 94327185715200, 94327186685951,
+STORE, 140172071755776, 140172073414655,
+STORE, 140172073414656, 140172075511807,
+STORE, 140172075511808, 140172075528191,
+STORE, 140172075528192, 140172075536383,
+STORE, 140172075536384, 140172075552767,
+STORE, 140172075552768, 140172075565055,
+STORE, 140172075565056, 140172077658111,
+STORE, 140172077658112, 140172077662207,
+STORE, 140172077662208, 140172077666303,
+STORE, 140172077666304, 140172077809663,
+STORE, 140172078178304, 140172079861759,
+STORE, 140172079861760, 140172079878143,
+STORE, 140172079878144, 140172079906815,
+STORE, 140172079906816, 140172079910911,
+STORE, 140172079910912, 140172079915007,
+STORE, 140172079915008, 140172079919103,
+STORE, 140720358359040, 140720358494207,
+STORE, 140720358498304, 140720358510591,
+STORE, 140720358510592, 140720358514687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140734529933312, 140737488351231,
+SNULL, 140734529945599, 140737488351231,
+STORE, 140734529933312, 140734529945599,
+STORE, 140734529802240, 140734529945599,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140249744060416, 140249746313215,
+SNULL, 140249744203775, 140249746313215,
+STORE, 140249744060416, 140249744203775,
+STORE, 140249744203776, 140249746313215,
+ERASE, 140249744203776, 140249746313215,
+STORE, 140249746300928, 140249746309119,
+STORE, 140249746309120, 140249746313215,
+STORE, 140734530174976, 140734530179071,
+STORE, 140734530162688, 140734530174975,
+STORE, 140249746272256, 140249746300927,
+STORE, 140249746264064, 140249746272255,
+STORE, 140249740226560, 140249744060415,
+SNULL, 140249740226560, 140249741934591,
+STORE, 140249741934592, 140249744060415,
+STORE, 140249740226560, 140249741934591,
+SNULL, 140249744027647, 140249744060415,
+STORE, 140249741934592, 140249744027647,
+STORE, 140249744027648, 140249744060415,
+ERASE, 140249744027648, 140249744060415,
+STORE, 140249744027648, 140249744060415,
+STORE, 140249738031104, 140249740226559,
+SNULL, 140249738031104, 140249738125311,
+STORE, 140249738125312, 140249740226559,
+STORE, 140249738031104, 140249738125311,
+SNULL, 140249740218367, 140249740226559,
+STORE, 140249738125312, 140249740218367,
+STORE, 140249740218368, 140249740226559,
+ERASE, 140249740218368, 140249740226559,
+STORE, 140249740218368, 140249740226559,
+STORE, 140249735512064, 140249738031103,
+SNULL, 140249735512064, 140249735925759,
+STORE, 140249735925760, 140249738031103,
+STORE, 140249735512064, 140249735925759,
+SNULL, 140249738018815, 140249738031103,
+STORE, 140249735925760, 140249738018815,
+STORE, 140249738018816, 140249738031103,
+ERASE, 140249738018816, 140249738031103,
+STORE, 140249738018816, 140249738031103,
+STORE, 140249732878336, 140249735512063,
+SNULL, 140249732878336, 140249733406719,
+STORE, 140249733406720, 140249735512063,
+STORE, 140249732878336, 140249733406719,
+SNULL, 140249735503871, 140249735512063,
+STORE, 140249733406720, 140249735503871,
+STORE, 140249735503872, 140249735512063,
+ERASE, 140249735503872, 140249735512063,
+STORE, 140249735503872, 140249735512063,
+STORE, 140249730764800, 140249732878335,
+SNULL, 140249730764800, 140249730777087,
+STORE, 140249730777088, 140249732878335,
+STORE, 140249730764800, 140249730777087,
+SNULL, 140249732870143, 140249732878335,
+STORE, 140249730777088, 140249732870143,
+STORE, 140249732870144, 140249732878335,
+ERASE, 140249732870144, 140249732878335,
+STORE, 140249732870144, 140249732878335,
+STORE, 140249728561152, 140249730764799,
+SNULL, 140249728561152, 140249728663551,
+STORE, 140249728663552, 140249730764799,
+STORE, 140249728561152, 140249728663551,
+SNULL, 140249730756607, 140249730764799,
+STORE, 140249728663552, 140249730756607,
+STORE, 140249730756608, 140249730764799,
+ERASE, 140249730756608, 140249730764799,
+STORE, 140249730756608, 140249730764799,
+STORE, 140249746255872, 140249746272255,
+STORE, 140249725399040, 140249728561151,
+SNULL, 140249725399040, 140249726459903,
+STORE, 140249726459904, 140249728561151,
+STORE, 140249725399040, 140249726459903,
+SNULL, 140249728552959, 140249728561151,
+STORE, 140249726459904, 140249728552959,
+STORE, 140249728552960, 140249728561151,
+ERASE, 140249728552960, 140249728561151,
+STORE, 140249728552960, 140249728561151,
+STORE, 140249721602048, 140249725399039,
+SNULL, 140249721602048, 140249723260927,
+STORE, 140249723260928, 140249725399039,
+STORE, 140249721602048, 140249723260927,
+SNULL, 140249725358079, 140249725399039,
+STORE, 140249723260928, 140249725358079,
+STORE, 140249725358080, 140249725399039,
+SNULL, 140249725358080, 140249725382655,
+STORE, 140249725382656, 140249725399039,
+STORE, 140249725358080, 140249725382655,
+ERASE, 140249725358080, 140249725382655,
+STORE, 140249725358080, 140249725382655,
+ERASE, 140249725382656, 140249725399039,
+STORE, 140249725382656, 140249725399039,
+STORE, 140249746243584, 140249746272255,
+SNULL, 140249725374463, 140249725382655,
+STORE, 140249725358080, 140249725374463,
+STORE, 140249725374464, 140249725382655,
+SNULL, 140249728557055, 140249728561151,
+STORE, 140249728552960, 140249728557055,
+STORE, 140249728557056, 140249728561151,
+SNULL, 140249730760703, 140249730764799,
+STORE, 140249730756608, 140249730760703,
+STORE, 140249730760704, 140249730764799,
+SNULL, 140249732874239, 140249732878335,
+STORE, 140249732870144, 140249732874239,
+STORE, 140249732874240, 140249732878335,
+SNULL, 140249735507967, 140249735512063,
+STORE, 140249735503872, 140249735507967,
+STORE, 140249735507968, 140249735512063,
+SNULL, 140249738027007, 140249738031103,
+STORE, 140249738018816, 140249738027007,
+STORE, 140249738027008, 140249738031103,
+SNULL, 140249740222463, 140249740226559,
+STORE, 140249740218368, 140249740222463,
+STORE, 140249740222464, 140249740226559,
+SNULL, 140249744031743, 140249744060415,
+STORE, 140249744027648, 140249744031743,
+STORE, 140249744031744, 140249744060415,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140249746305023, 140249746309119,
+STORE, 140249746300928, 140249746305023,
+STORE, 140249746305024, 140249746309119,
+ERASE, 140249746272256, 140249746300927,
+STORE, 33853440, 33988607,
+STORE, 140249744560128, 140249746243583,
+STORE, 140249746296832, 140249746300927,
+STORE, 140249744424960, 140249744560127,
+STORE, 33853440, 34131967,
+STORE, 140249719504896, 140249721602047,
+STORE, 140249746288640, 140249746300927,
+STORE, 140249746280448, 140249746300927,
+STORE, 140249746243584, 140249746280447,
+STORE, 140249744408576, 140249744560127,
+STORE, 33853440, 34267135,
+STORE, 33853440, 34422783,
+STORE, 140249744400384, 140249744560127,
+STORE, 140249744392192, 140249744560127,
+STORE, 33853440, 34557951,
+STORE, 33853440, 34693119,
+STORE, 140249744375808, 140249744560127,
+STORE, 140249744367616, 140249744560127,
+STORE, 33853440, 34832383,
+STORE, 140249719230464, 140249721602047,
+STORE, 140249744207872, 140249744560127,
+STORE, 33853440, 34971647,
+SNULL, 34963455, 34971647,
+STORE, 33853440, 34963455,
+STORE, 34963456, 34971647,
+ERASE, 34963456, 34971647,
+SNULL, 34955263, 34963455,
+STORE, 33853440, 34955263,
+STORE, 34955264, 34963455,
+ERASE, 34955264, 34963455,
+SNULL, 34947071, 34955263,
+STORE, 33853440, 34947071,
+STORE, 34947072, 34955263,
+ERASE, 34947072, 34955263,
+SNULL, 34938879, 34947071,
+STORE, 33853440, 34938879,
+STORE, 34938880, 34947071,
+ERASE, 34938880, 34947071,
+STORE, 140249719214080, 140249721602047,
+STORE, 140249719148544, 140249721602047,
+STORE, 140249719115776, 140249721602047,
+STORE, 140249717018624, 140249721602047,
+STORE, 140249716953088, 140249721602047,
+STORE, 33853440, 35086335,
+STORE, 140249716822016, 140249721602047,
+STORE, 140249716559872, 140249721602047,
+STORE, 140249716551680, 140249721602047,
+STORE, 140249716535296, 140249721602047,
+STORE, 140249716527104, 140249721602047,
+STORE, 140249716518912, 140249721602047,
+STORE, 33853440, 35221503,
+SNULL, 35213311, 35221503,
+STORE, 33853440, 35213311,
+STORE, 35213312, 35221503,
+ERASE, 35213312, 35221503,
+SNULL, 35205119, 35213311,
+STORE, 33853440, 35205119,
+STORE, 35205120, 35213311,
+ERASE, 35205120, 35213311,
+SNULL, 35192831, 35205119,
+STORE, 33853440, 35192831,
+STORE, 35192832, 35205119,
+ERASE, 35192832, 35205119,
+SNULL, 35176447, 35192831,
+STORE, 33853440, 35176447,
+STORE, 35176448, 35192831,
+ERASE, 35176448, 35192831,
+STORE, 140249716502528, 140249721602047,
+STORE, 33853440, 35311615,
+SNULL, 35307519, 35311615,
+STORE, 33853440, 35307519,
+STORE, 35307520, 35311615,
+ERASE, 35307520, 35311615,
+SNULL, 35303423, 35307519,
+STORE, 33853440, 35303423,
+STORE, 35303424, 35307519,
+ERASE, 35303424, 35307519,
+SNULL, 35299327, 35303423,
+STORE, 33853440, 35299327,
+STORE, 35299328, 35303423,
+ERASE, 35299328, 35303423,
+SNULL, 35295231, 35299327,
+STORE, 33853440, 35295231,
+STORE, 35295232, 35299327,
+ERASE, 35295232, 35299327,
+SNULL, 35291135, 35295231,
+STORE, 33853440, 35291135,
+STORE, 35291136, 35295231,
+ERASE, 35291136, 35295231,
+SNULL, 35287039, 35291135,
+STORE, 33853440, 35287039,
+STORE, 35287040, 35291135,
+ERASE, 35287040, 35291135,
+SNULL, 35282943, 35287039,
+STORE, 33853440, 35282943,
+STORE, 35282944, 35287039,
+ERASE, 35282944, 35287039,
+STORE, 140249716486144, 140249721602047,
+STORE, 140249716453376, 140249721602047,
+STORE, 33853440, 35418111,
+SNULL, 35401727, 35418111,
+STORE, 33853440, 35401727,
+STORE, 35401728, 35418111,
+ERASE, 35401728, 35418111,
+SNULL, 35389439, 35401727,
+STORE, 33853440, 35389439,
+STORE, 35389440, 35401727,
+ERASE, 35389440, 35401727,
+STORE, 140249714356224, 140249721602047,
+STORE, 33853440, 35540991,
+STORE, 140249714339840, 140249721602047,
+STORE, 140249714077696, 140249721602047,
+STORE, 140249714069504, 140249721602047,
+STORE, 140249714061312, 140249721602047,
+STORE, 33853440, 35680255,
+SNULL, 35672063, 35680255,
+STORE, 33853440, 35672063,
+STORE, 35672064, 35680255,
+ERASE, 35672064, 35680255,
+SNULL, 35627007, 35672063,
+STORE, 33853440, 35627007,
+STORE, 35627008, 35672063,
+ERASE, 35627008, 35672063,
+STORE, 140249711964160, 140249721602047,
+STORE, 33853440, 35762175,
+SNULL, 35753983, 35762175,
+STORE, 33853440, 35753983,
+STORE, 35753984, 35762175,
+ERASE, 35753984, 35762175,
+SNULL, 35745791, 35753983,
+STORE, 33853440, 35745791,
+STORE, 35745792, 35753983,
+ERASE, 35745792, 35753983,
+STORE, 140249711955968, 140249721602047,
+STORE, 140249711947776, 140249721602047,
+STORE, 140249710899200, 140249721602047,
+STORE, 140249710866432, 140249721602047,
+STORE, 140249710600192, 140249721602047,
+SNULL, 140249744424959, 140249744560127,
+STORE, 140249744207872, 140249744424959,
+STORE, 140249744424960, 140249744560127,
+ERASE, 140249744424960, 140249744560127,
+STORE, 140249708503040, 140249721602047,
+STORE, 33853440, 35885055,
+STORE, 140249707978752, 140249721602047,
+STORE, 140249705881600, 140249721602047,
+STORE, 33853440, 36036607,
+STORE, 33853440, 36175871,
+STORE, 140249744551936, 140249744560127,
+STORE, 140249744543744, 140249744560127,
+STORE, 140249744535552, 140249744560127,
+STORE, 140249744527360, 140249744560127,
+STORE, 140249744519168, 140249744560127,
+STORE, 140249705619456, 140249721602047,
+STORE, 140249744510976, 140249744560127,
+STORE, 140249744502784, 140249744560127,
+STORE, 140249744494592, 140249744560127,
+STORE, 140249744486400, 140249744560127,
+STORE, 140249744478208, 140249744560127,
+STORE, 140249744470016, 140249744560127,
+STORE, 140249744461824, 140249744560127,
+STORE, 140249744453632, 140249744560127,
+STORE, 140249744445440, 140249744560127,
+STORE, 140249744437248, 140249744560127,
+STORE, 140249744429056, 140249744560127,
+STORE, 140249703522304, 140249721602047,
+STORE, 33853440, 36311039,
+STORE, 140249703489536, 140249721602047,
+STORE, 33853440, 36474879,
+STORE, 140249703456768, 140249721602047,
+STORE, 33853440, 36622335,
+STORE, 140249703424000, 140249721602047,
+STORE, 140249703391232, 140249721602047,
+STORE, 33853440, 36810751,
+STORE, 140249703358464, 140249721602047,
+STORE, 140249703325696, 140249721602047,
+SNULL, 36655103, 36810751,
+STORE, 33853440, 36655103,
+STORE, 36655104, 36810751,
+ERASE, 36655104, 36810751,
+SNULL, 36438015, 36655103,
+STORE, 33853440, 36438015,
+STORE, 36438016, 36655103,
+ERASE, 36438016, 36655103,
+STORE, 140249703317504, 140249721602047,
+STORE, 140249701220352, 140249721602047,
+STORE, 33853440, 36585471,
+STORE, 33853440, 36782079,
+STORE, 140249701212160, 140249721602047,
+STORE, 140249701203968, 140249721602047,
+STORE, 140249701195776, 140249721602047,
+STORE, 140249701187584, 140249721602047,
+STORE, 140249701179392, 140249721602047,
+STORE, 140249701171200, 140249721602047,
+STORE, 140249701163008, 140249721602047,
+STORE, 140249701154816, 140249721602047,
+STORE, 140249701146624, 140249721602047,
+STORE, 140249701138432, 140249721602047,
+STORE, 140249701130240, 140249721602047,
+STORE, 140249700081664, 140249721602047,
+STORE, 140249700073472, 140249721602047,
+STORE, 33853440, 36978687,
+STORE, 140249697976320, 140249721602047,
+STORE, 33853440, 37240831,
+STORE, 140249695879168, 140249721602047,
+STORE, 140249695870976, 140249721602047,
+STORE, 140249695862784, 140249721602047,
+STORE, 140249695854592, 140249721602047,
+STORE, 140249695326208, 140249721602047,
+SNULL, 140249710600191, 140249721602047,
+STORE, 140249695326208, 140249710600191,
+STORE, 140249710600192, 140249721602047,
+SNULL, 140249710600192, 140249710866431,
+STORE, 140249710866432, 140249721602047,
+STORE, 140249710600192, 140249710866431,
+ERASE, 140249710600192, 140249710866431,
+STORE, 140249691131904, 140249710600191,
+STORE, 33853440, 37474303,
+STORE, 140249710858240, 140249721602047,
+STORE, 140249710850048, 140249721602047,
+STORE, 140249710841856, 140249721602047,
+STORE, 140249710833664, 140249721602047,
+STORE, 140249710825472, 140249721602047,
+STORE, 140249710817280, 140249721602047,
+STORE, 140249710809088, 140249721602047,
+STORE, 140249710800896, 140249721602047,
+STORE, 140249710792704, 140249721602047,
+STORE, 140249710784512, 140249721602047,
+STORE, 140249710776320, 140249721602047,
+STORE, 140249710768128, 140249721602047,
+STORE, 140249710759936, 140249721602047,
+STORE, 140249710751744, 140249721602047,
+STORE, 140249710743552, 140249721602047,
+STORE, 140249710735360, 140249721602047,
+STORE, 140249689034752, 140249710600191,
+STORE, 140249710727168, 140249721602047,
+STORE, 140249686937600, 140249710600191,
+STORE, 33853440, 37867519,
+STORE, 140249684840448, 140249710600191,
+STORE, 140249710718976, 140249721602047,
+STORE, 140249682743296, 140249710600191,
+STORE, 140249710710784, 140249721602047,
+STORE, 140249710702592, 140249721602047,
+STORE, 140249710694400, 140249721602047,
+STORE, 140249710686208, 140249721602047,
+STORE, 140249710678016, 140249721602047,
+STORE, 140249682612224, 140249710600191,
+STORE, 140249682087936, 140249710600191,
+SNULL, 140249705619455, 140249710600191,
+STORE, 140249682087936, 140249705619455,
+STORE, 140249705619456, 140249710600191,
+SNULL, 140249705619456, 140249705881599,
+STORE, 140249705881600, 140249710600191,
+STORE, 140249705619456, 140249705881599,
+ERASE, 140249705619456, 140249705881599,
+STORE, 140249679990784, 140249705619455,
+STORE, 140249710669824, 140249721602047,
+STORE, 140249677893632, 140249705619455,
+STORE, 140249710653440, 140249721602047,
+STORE, 140249710645248, 140249721602047,
+STORE, 140249710637056, 140249721602047,
+STORE, 140249710628864, 140249721602047,
+STORE, 140249710620672, 140249721602047,
+STORE, 140249710612480, 140249721602047,
+STORE, 140249710604288, 140249721602047,
+STORE, 140249705873408, 140249710600191,
+STORE, 140249705865216, 140249710600191,
+STORE, 140249705857024, 140249710600191,
+STORE, 140249705848832, 140249710600191,
+STORE, 140249705840640, 140249710600191,
+STORE, 140249705832448, 140249710600191,
+STORE, 140249705824256, 140249710600191,
+STORE, 140249705816064, 140249710600191,
+STORE, 140249705807872, 140249710600191,
+STORE, 140249705799680, 140249710600191,
+STORE, 33853440, 38129663,
+SNULL, 140249744207872, 140249744367615,
+STORE, 140249744367616, 140249744424959,
+STORE, 140249744207872, 140249744367615,
+ERASE, 140249744207872, 140249744367615,
+STORE, 140249677606912, 140249705619455,
+STORE, 140249675509760, 140249705619455,
+SNULL, 140249677606911, 140249705619455,
+STORE, 140249675509760, 140249677606911,
+STORE, 140249677606912, 140249705619455,
+SNULL, 140249677606912, 140249677893631,
+STORE, 140249677893632, 140249705619455,
+STORE, 140249677606912, 140249677893631,
+ERASE, 140249677606912, 140249677893631,
+STORE, 140249744359424, 140249744424959,
+STORE, 33853440, 38391807,
+STORE, 140249674981376, 140249677606911,
+STORE, 140249672884224, 140249677606911,
+SNULL, 140249719230463, 140249721602047,
+STORE, 140249710604288, 140249719230463,
+STORE, 140249719230464, 140249721602047,
+SNULL, 140249719230464, 140249719504895,
+STORE, 140249719504896, 140249721602047,
+STORE, 140249719230464, 140249719504895,
+ERASE, 140249719230464, 140249719504895,
+STORE, 140249744351232, 140249744424959,
+STORE, 140249744343040, 140249744424959,
+STORE, 140249744334848, 140249744424959,
+STORE, 140249744326656, 140249744424959,
+STORE, 140249744310272, 140249744424959,
+STORE, 140249744302080, 140249744424959,
+STORE, 140249744285696, 140249744424959,
+STORE, 140249744277504, 140249744424959,
+STORE, 140249744261120, 140249744424959,
+STORE, 140249744252928, 140249744424959,
+STORE, 140249744220160, 140249744424959,
+STORE, 140249744211968, 140249744424959,
+STORE, 140249719488512, 140249721602047,
+STORE, 140249744203776, 140249744424959,
+STORE, 140249719472128, 140249721602047,
+STORE, 140249719463936, 140249721602047,
+STORE, 140249719447552, 140249721602047,
+STORE, 140249719439360, 140249721602047,
+STORE, 140249719406592, 140249721602047,
+STORE, 140249719398400, 140249721602047,
+STORE, 140249719382016, 140249721602047,
+STORE, 140249719373824, 140249721602047,
+STORE, 140249719357440, 140249721602047,
+STORE, 140249719349248, 140249721602047,
+STORE, 140249719332864, 140249721602047,
+STORE, 140249719324672, 140249721602047,
+STORE, 140249719291904, 140249721602047,
+STORE, 140249719283712, 140249721602047,
+STORE, 140249719267328, 140249721602047,
+STORE, 140249719259136, 140249721602047,
+STORE, 140249719242752, 140249721602047,
+STORE, 140249719234560, 140249721602047,
+STORE, 140249705783296, 140249710600191,
+STORE, 140249705775104, 140249710600191,
+STORE, 140249705742336, 140249710600191,
+STORE, 140249705734144, 140249710600191,
+STORE, 140249705717760, 140249710600191,
+STORE, 140249670787072, 140249677606911,
+STORE, 140249705709568, 140249710600191,
+STORE, 140249705693184, 140249710600191,
+STORE, 140249705684992, 140249710600191,
+STORE, 140249705668608, 140249710600191,
+STORE, 140249705660416, 140249710600191,
+STORE, 140249705627648, 140249710600191,
+STORE, 140249677893632, 140249710600191,
+STORE, 140249677877248, 140249710600191,
+STORE, 140249677869056, 140249710600191,
+STORE, 140249677852672, 140249710600191,
+STORE, 140249677844480, 140249710600191,
+STORE, 140249677828096, 140249710600191,
+STORE, 140249668689920, 140249677606911,
+STORE, 140249677819904, 140249710600191,
+STORE, 140249677787136, 140249710600191,
+STORE, 140249677778944, 140249710600191,
+STORE, 140249677762560, 140249710600191,
+STORE, 140249677754368, 140249710600191,
+STORE, 140249677737984, 140249710600191,
+STORE, 140249677729792, 140249710600191,
+STORE, 140249677713408, 140249710600191,
+STORE, 140249677705216, 140249710600191,
+STORE, 140249677672448, 140249710600191,
+STORE, 140249677664256, 140249710600191,
+STORE, 140249677647872, 140249710600191,
+STORE, 140249677639680, 140249710600191,
+STORE, 140249677623296, 140249710600191,
+STORE, 140249677615104, 140249710600191,
+STORE, 140249668673536, 140249677606911,
+STORE, 140249668673536, 140249710600191,
+STORE, 140249668640768, 140249710600191,
+STORE, 140249668632576, 140249710600191,
+STORE, 140249668616192, 140249710600191,
+STORE, 140249668608000, 140249710600191,
+STORE, 140249668591616, 140249710600191,
+STORE, 140249668583424, 140249710600191,
+STORE, 140249668567040, 140249710600191,
+STORE, 140249668558848, 140249710600191,
+STORE, 140249668526080, 140249710600191,
+STORE, 140249668517888, 140249710600191,
+STORE, 140249668501504, 140249710600191,
+STORE, 140249668493312, 140249710600191,
+STORE, 140249668476928, 140249710600191,
+STORE, 140249668468736, 140249710600191,
+STORE, 140249668452352, 140249710600191,
+STORE, 140249668444160, 140249710600191,
+STORE, 140249668411392, 140249710600191,
+STORE, 140249668403200, 140249710600191,
+STORE, 140249668386816, 140249710600191,
+STORE, 140249668378624, 140249710600191,
+STORE, 140249668362240, 140249710600191,
+STORE, 140249668354048, 140249710600191,
+STORE, 140249668337664, 140249710600191,
+STORE, 140249668329472, 140249710600191,
+STORE, 140249668296704, 140249710600191,
+STORE, 140249668288512, 140249710600191,
+STORE, 140249668272128, 140249710600191,
+STORE, 140249668263936, 140249710600191,
+STORE, 140249668247552, 140249710600191,
+STORE, 140249668239360, 140249710600191,
+STORE, 140249668222976, 140249710600191,
+STORE, 140249668214784, 140249710600191,
+STORE, 140249668182016, 140249710600191,
+STORE, 140249668173824, 140249710600191,
+STORE, 140249668157440, 140249710600191,
+STORE, 140249668149248, 140249710600191,
+STORE, 140249668132864, 140249710600191,
+STORE, 140249668124672, 140249710600191,
+STORE, 140249668108288, 140249710600191,
+STORE, 140249668100096, 140249710600191,
+STORE, 140249668067328, 140249710600191,
+STORE, 140249668059136, 140249710600191,
+STORE, 140249668042752, 140249710600191,
+STORE, 140249668034560, 140249710600191,
+STORE, 140249668018176, 140249710600191,
+STORE, 140249668009984, 140249710600191,
+STORE, 140249667993600, 140249710600191,
+STORE, 140249667985408, 140249710600191,
+STORE, 140249667952640, 140249710600191,
+STORE, 140249667944448, 140249710600191,
+STORE, 140249667928064, 140249710600191,
+STORE, 140249667919872, 140249710600191,
+STORE, 140249667903488, 140249710600191,
+STORE, 140249667895296, 140249710600191,
+STORE, 140249667878912, 140249710600191,
+STORE, 140249667870720, 140249710600191,
+STORE, 140249667837952, 140249710600191,
+STORE, 140249667829760, 140249710600191,
+STORE, 140249667813376, 140249710600191,
+STORE, 140249667805184, 140249710600191,
+STORE, 140249667788800, 140249710600191,
+STORE, 140249667780608, 140249710600191,
+STORE, 140249667764224, 140249710600191,
+STORE, 140249667756032, 140249710600191,
+STORE, 140249667723264, 140249710600191,
+STORE, 140249667715072, 140249710600191,
+STORE, 140249667698688, 140249710600191,
+STORE, 140249667690496, 140249710600191,
+STORE, 140249667674112, 140249710600191,
+STORE, 140249667665920, 140249710600191,
+STORE, 140249667649536, 140249710600191,
+STORE, 140249667641344, 140249710600191,
+STORE, 140249667608576, 140249710600191,
+STORE, 140249667600384, 140249710600191,
+STORE, 140249667584000, 140249710600191,
+STORE, 140249667575808, 140249710600191,
+STORE, 140249667559424, 140249710600191,
+STORE, 140249667551232, 140249710600191,
+STORE, 140249667534848, 140249710600191,
+STORE, 140249667526656, 140249710600191,
+STORE, 140249667493888, 140249710600191,
+STORE, 140249667485696, 140249710600191,
+STORE, 140249667469312, 140249710600191,
+STORE, 140249667461120, 140249710600191,
+STORE, 140249667444736, 140249710600191,
+STORE, 140249667436544, 140249710600191,
+STORE, 140249667420160, 140249710600191,
+STORE, 140249665323008, 140249710600191,
+STORE, 140249665314816, 140249710600191,
+STORE, 140249665282048, 140249710600191,
+STORE, 140249665273856, 140249710600191,
+STORE, 140249665257472, 140249710600191,
+STORE, 140249665249280, 140249710600191,
+STORE, 140249665232896, 140249710600191,
+STORE, 140249665224704, 140249710600191,
+STORE, 140249665208320, 140249710600191,
+STORE, 140249665200128, 140249710600191,
+STORE, 140249665167360, 140249710600191,
+STORE, 140249665159168, 140249710600191,
+STORE, 140249665142784, 140249710600191,
+STORE, 140249665134592, 140249710600191,
+STORE, 140249665118208, 140249710600191,
+STORE, 140249665110016, 140249710600191,
+STORE, 140249665093632, 140249710600191,
+STORE, 140249665085440, 140249710600191,
+STORE, 140249665052672, 140249710600191,
+STORE, 140249665044480, 140249710600191,
+STORE, 140249665028096, 140249710600191,
+STORE, 140249665019904, 140249710600191,
+STORE, 140249665003520, 140249710600191,
+STORE, 140249664995328, 140249710600191,
+STORE, 140249664978944, 140249710600191,
+STORE, 140249664970752, 140249710600191,
+STORE, 140249664937984, 140249710600191,
+STORE, 140249664929792, 140249710600191,
+STORE, 140249664913408, 140249710600191,
+STORE, 140249664905216, 140249710600191,
+STORE, 140249664888832, 140249710600191,
+STORE, 140249664880640, 140249710600191,
+STORE, 140249664864256, 140249710600191,
+STORE, 140249664856064, 140249710600191,
+STORE, 140249664823296, 140249710600191,
+STORE, 140249664815104, 140249710600191,
+STORE, 140249664798720, 140249710600191,
+STORE, 140249664790528, 140249710600191,
+STORE, 140249664774144, 140249710600191,
+STORE, 140249664765952, 140249710600191,
+STORE, 140249664749568, 140249710600191,
+STORE, 140249664741376, 140249710600191,
+STORE, 140249664708608, 140249710600191,
+STORE, 140249664700416, 140249710600191,
+STORE, 140249664684032, 140249710600191,
+STORE, 140249664675840, 140249710600191,
+STORE, 140249664659456, 140249710600191,
+STORE, 140249664651264, 140249710600191,
+STORE, 140249664634880, 140249710600191,
+STORE, 140249664626688, 140249710600191,
+STORE, 140249664593920, 140249710600191,
+STORE, 140249664585728, 140249710600191,
+STORE, 140249664569344, 140249710600191,
+STORE, 140249664561152, 140249710600191,
+STORE, 140249664544768, 140249710600191,
+STORE, 140249664536576, 140249710600191,
+STORE, 140249664520192, 140249710600191,
+STORE, 140249664512000, 140249710600191,
+STORE, 140249664479232, 140249710600191,
+STORE, 140249664471040, 140249710600191,
+STORE, 140249664454656, 140249710600191,
+STORE, 140249664446464, 140249710600191,
+STORE, 140249664430080, 140249710600191,
+STORE, 140249664421888, 140249710600191,
+STORE, 140249664405504, 140249710600191,
+STORE, 140249664397312, 140249710600191,
+STORE, 140249664364544, 140249710600191,
+STORE, 140249664356352, 140249710600191,
+STORE, 140249664339968, 140249710600191,
+STORE, 140249664331776, 140249710600191,
+STORE, 140249664315392, 140249710600191,
+STORE, 140249664307200, 140249710600191,
+STORE, 140249664290816, 140249710600191,
+STORE, 140249664282624, 140249710600191,
+STORE, 140249664249856, 140249710600191,
+STORE, 140249664241664, 140249710600191,
+STORE, 140249664225280, 140249710600191,
+STORE, 140249664217088, 140249710600191,
+STORE, 140249664200704, 140249710600191,
+STORE, 140249664192512, 140249710600191,
+STORE, 140249664176128, 140249710600191,
+STORE, 140249664167936, 140249710600191,
+STORE, 140249664135168, 140249710600191,
+STORE, 140249664126976, 140249710600191,
+STORE, 140249664110592, 140249710600191,
+STORE, 140249664102400, 140249710600191,
+STORE, 140249664086016, 140249710600191,
+STORE, 140249664077824, 140249710600191,
+STORE, 140249664061440, 140249710600191,
+STORE, 140249664053248, 140249710600191,
+STORE, 140249664020480, 140249710600191,
+STORE, 140249664012288, 140249710600191,
+STORE, 140249663995904, 140249710600191,
+STORE, 140249663987712, 140249710600191,
+STORE, 140249663971328, 140249710600191,
+STORE, 140249663963136, 140249710600191,
+STORE, 140249663946752, 140249710600191,
+STORE, 140249663938560, 140249710600191,
+STORE, 140249663905792, 140249710600191,
+STORE, 140249663897600, 140249710600191,
+STORE, 140249663881216, 140249710600191,
+STORE, 140249663873024, 140249710600191,
+STORE, 140249663856640, 140249710600191,
+STORE, 140249663848448, 140249710600191,
+STORE, 140249663832064, 140249710600191,
+STORE, 140249663823872, 140249710600191,
+STORE, 140249663791104, 140249710600191,
+STORE, 140249663782912, 140249710600191,
+STORE, 140249663766528, 140249710600191,
+STORE, 140249663758336, 140249710600191,
+STORE, 140249663741952, 140249710600191,
+STORE, 140249663733760, 140249710600191,
+STORE, 140249663717376, 140249710600191,
+STORE, 140249663709184, 140249710600191,
+STORE, 140249663676416, 140249710600191,
+STORE, 140249663668224, 140249710600191,
+STORE, 140249663651840, 140249710600191,
+STORE, 140249663643648, 140249710600191,
+STORE, 140249663627264, 140249710600191,
+STORE, 33853440, 38526975,
+STORE, 140249663619072, 140249710600191,
+STORE, 140249663602688, 140249710600191,
+STORE, 140249661505536, 140249710600191,
+STORE, 140249661497344, 140249710600191,
+STORE, 140249661464576, 140249710600191,
+STORE, 140249661456384, 140249710600191,
+STORE, 140249661440000, 140249710600191,
+STORE, 140249661431808, 140249710600191,
+STORE, 140249661415424, 140249710600191,
+STORE, 140249661407232, 140249710600191,
+STORE, 140249661390848, 140249710600191,
+STORE, 140249661382656, 140249710600191,
+STORE, 140249661349888, 140249710600191,
+STORE, 140249661341696, 140249710600191,
+STORE, 140249661325312, 140249710600191,
+STORE, 140249661317120, 140249710600191,
+STORE, 140249661300736, 140249710600191,
+STORE, 140249661292544, 140249710600191,
+STORE, 140249661276160, 140249710600191,
+STORE, 140249661267968, 140249710600191,
+STORE, 140249661235200, 140249710600191,
+STORE, 140249661227008, 140249710600191,
+STORE, 140249661210624, 140249710600191,
+STORE, 140249661202432, 140249710600191,
+STORE, 140249661186048, 140249710600191,
+STORE, 140249661177856, 140249710600191,
+STORE, 140249661161472, 140249710600191,
+STORE, 140249661153280, 140249710600191,
+STORE, 140249661120512, 140249710600191,
+STORE, 140249661112320, 140249710600191,
+STORE, 140249661095936, 140249710600191,
+STORE, 140249661087744, 140249710600191,
+STORE, 140249661071360, 140249710600191,
+STORE, 140249661063168, 140249710600191,
+STORE, 140249661046784, 140249710600191,
+STORE, 140249661038592, 140249710600191,
+STORE, 140249661005824, 140249710600191,
+STORE, 140249660997632, 140249710600191,
+STORE, 140249660981248, 140249710600191,
+STORE, 140249660973056, 140249710600191,
+STORE, 140249660956672, 140249710600191,
+STORE, 140249660948480, 140249710600191,
+STORE, 140249660932096, 140249710600191,
+STORE, 140249660923904, 140249710600191,
+STORE, 140249660891136, 140249710600191,
+STORE, 140249660882944, 140249710600191,
+STORE, 140249660866560, 140249710600191,
+STORE, 140249660858368, 140249710600191,
+STORE, 140249660841984, 140249710600191,
+STORE, 140249660833792, 140249710600191,
+STORE, 140249660817408, 140249710600191,
+STORE, 140249660809216, 140249710600191,
+STORE, 140249660776448, 140249710600191,
+STORE, 140249660768256, 140249710600191,
+STORE, 140249660751872, 140249710600191,
+STORE, 140249660743680, 140249710600191,
+STORE, 140249660727296, 140249710600191,
+STORE, 140249660719104, 140249710600191,
+STORE, 140249660702720, 140249710600191,
+STORE, 140249660694528, 140249710600191,
+STORE, 140249660661760, 140249710600191,
+STORE, 140249660653568, 140249710600191,
+STORE, 140249660637184, 140249710600191,
+STORE, 140249660628992, 140249710600191,
+STORE, 140249660612608, 140249710600191,
+STORE, 140249660604416, 140249710600191,
+STORE, 140249660588032, 140249710600191,
+STORE, 140249660579840, 140249710600191,
+STORE, 140249660547072, 140249710600191,
+STORE, 140249660538880, 140249710600191,
+STORE, 140249660522496, 140249710600191,
+STORE, 140249660514304, 140249710600191,
+STORE, 140249660497920, 140249710600191,
+STORE, 140249660489728, 140249710600191,
+STORE, 140249660473344, 140249710600191,
+STORE, 140249660465152, 140249710600191,
+STORE, 140249660432384, 140249710600191,
+STORE, 140249660424192, 140249710600191,
+STORE, 140249660407808, 140249710600191,
+STORE, 140249660399616, 140249710600191,
+STORE, 140249660383232, 140249710600191,
+STORE, 140249660375040, 140249710600191,
+STORE, 140249660358656, 140249710600191,
+STORE, 140249660350464, 140249710600191,
+STORE, 140249660317696, 140249710600191,
+STORE, 140249660309504, 140249710600191,
+STORE, 140249660293120, 140249710600191,
+STORE, 140249660284928, 140249710600191,
+STORE, 140249660268544, 140249710600191,
+STORE, 140249660260352, 140249710600191,
+STORE, 140249660243968, 140249710600191,
+STORE, 140249660235776, 140249710600191,
+STORE, 140249660203008, 140249710600191,
+STORE, 140249660194816, 140249710600191,
+STORE, 140249660178432, 140249710600191,
+STORE, 140249660170240, 140249710600191,
+STORE, 140249660153856, 140249710600191,
+STORE, 140249660145664, 140249710600191,
+STORE, 140249660129280, 140249710600191,
+STORE, 140249660121088, 140249710600191,
+STORE, 140249660088320, 140249710600191,
+STORE, 140249660080128, 140249710600191,
+STORE, 140249660063744, 140249710600191,
+STORE, 140249660055552, 140249710600191,
+STORE, 140249660039168, 140249710600191,
+STORE, 140249660030976, 140249710600191,
+STORE, 140249660014592, 140249710600191,
+STORE, 140249660006400, 140249710600191,
+STORE, 140249659973632, 140249710600191,
+STORE, 140249659965440, 140249710600191,
+STORE, 140249659949056, 140249710600191,
+STORE, 140249659940864, 140249710600191,
+STORE, 140249659924480, 140249710600191,
+STORE, 140249659916288, 140249710600191,
+STORE, 140249659899904, 140249710600191,
+STORE, 140249659891712, 140249710600191,
+STORE, 140249659858944, 140249710600191,
+STORE, 140249659850752, 140249710600191,
+STORE, 140249659834368, 140249710600191,
+STORE, 140249659826176, 140249710600191,
+STORE, 140249659809792, 140249710600191,
+STORE, 140249659801600, 140249710600191,
+STORE, 140249659785216, 140249710600191,
+STORE, 140249657688064, 140249710600191,
+STORE, 140249657679872, 140249710600191,
+STORE, 140249657647104, 140249710600191,
+STORE, 140249657638912, 140249710600191,
+STORE, 140249657622528, 140249710600191,
+STORE, 140249657614336, 140249710600191,
+STORE, 140249657597952, 140249710600191,
+STORE, 140249657589760, 140249710600191,
+STORE, 140249657573376, 140249710600191,
+STORE, 140249657565184, 140249710600191,
+STORE, 140249657532416, 140249710600191,
+STORE, 140249657524224, 140249710600191,
+STORE, 140249657507840, 140249710600191,
+STORE, 140249657499648, 140249710600191,
+STORE, 140249657483264, 140249710600191,
+STORE, 140249657475072, 140249710600191,
+STORE, 140249657458688, 140249710600191,
+STORE, 140249657450496, 140249710600191,
+STORE, 140249657417728, 140249710600191,
+STORE, 140249657409536, 140249710600191,
+STORE, 140249657393152, 140249710600191,
+STORE, 140249657384960, 140249710600191,
+STORE, 140249657368576, 140249710600191,
+STORE, 140249657360384, 140249710600191,
+STORE, 140249657344000, 140249710600191,
+STORE, 140249657335808, 140249710600191,
+STORE, 140249657303040, 140249710600191,
+STORE, 140249657294848, 140249710600191,
+STORE, 140249657278464, 140249710600191,
+STORE, 140249657270272, 140249710600191,
+STORE, 140249657253888, 140249710600191,
+STORE, 140249657245696, 140249710600191,
+STORE, 140249657229312, 140249710600191,
+STORE, 140249657221120, 140249710600191,
+STORE, 140249657188352, 140249710600191,
+STORE, 140249657180160, 140249710600191,
+STORE, 140249657163776, 140249710600191,
+STORE, 140249657155584, 140249710600191,
+STORE, 140249657139200, 140249710600191,
+STORE, 140249657131008, 140249710600191,
+STORE, 140249657114624, 140249710600191,
+STORE, 140249657106432, 140249710600191,
+STORE, 140249657073664, 140249710600191,
+STORE, 140249657065472, 140249710600191,
+STORE, 140249657049088, 140249710600191,
+STORE, 140249657040896, 140249710600191,
+STORE, 140249657024512, 140249710600191,
+STORE, 140249657016320, 140249710600191,
+STORE, 140249656999936, 140249710600191,
+STORE, 140249656991744, 140249710600191,
+STORE, 140249656958976, 140249710600191,
+STORE, 140249656950784, 140249710600191,
+STORE, 140249656934400, 140249710600191,
+STORE, 140249656926208, 140249710600191,
+STORE, 140249656909824, 140249710600191,
+STORE, 140249656901632, 140249710600191,
+STORE, 140249656885248, 140249710600191,
+STORE, 140249656877056, 140249710600191,
+STORE, 140249656844288, 140249710600191,
+STORE, 140249656836096, 140249710600191,
+STORE, 140249656819712, 140249710600191,
+STORE, 140249656811520, 140249710600191,
+STORE, 140249656795136, 140249710600191,
+STORE, 33853440, 38662143,
+STORE, 140249656786944, 140249710600191,
+STORE, 140249656770560, 140249710600191,
+STORE, 140249656762368, 140249710600191,
+STORE, 140249656729600, 140249710600191,
+STORE, 140249656721408, 140249710600191,
+STORE, 140249656705024, 140249710600191,
+STORE, 140249656696832, 140249710600191,
+STORE, 140249656680448, 140249710600191,
+STORE, 140249656672256, 140249710600191,
+STORE, 140249656655872, 140249710600191,
+STORE, 140249656647680, 140249710600191,
+STORE, 140249656614912, 140249710600191,
+STORE, 140249656606720, 140249710600191,
+STORE, 140249656590336, 140249710600191,
+STORE, 140249656582144, 140249710600191,
+STORE, 140249656565760, 140249710600191,
+STORE, 140249656557568, 140249710600191,
+STORE, 140249656541184, 140249710600191,
+STORE, 140249656532992, 140249710600191,
+STORE, 140249656500224, 140249710600191,
+STORE, 140249656492032, 140249710600191,
+STORE, 140249656475648, 140249710600191,
+STORE, 140249656467456, 140249710600191,
+STORE, 140249656451072, 140249710600191,
+STORE, 140249656442880, 140249710600191,
+STORE, 140249656426496, 140249710600191,
+STORE, 140249656418304, 140249710600191,
+STORE, 140249656385536, 140249710600191,
+STORE, 140249656377344, 140249710600191,
+STORE, 140249656360960, 140249710600191,
+STORE, 140249656352768, 140249710600191,
+STORE, 140249656336384, 140249710600191,
+STORE, 140249656328192, 140249710600191,
+STORE, 140249656311808, 140249710600191,
+STORE, 140249656303616, 140249710600191,
+STORE, 140249656270848, 140249710600191,
+STORE, 140249656262656, 140249710600191,
+STORE, 140249656246272, 140249710600191,
+STORE, 140249656238080, 140249710600191,
+STORE, 140249656221696, 140249710600191,
+STORE, 140249656213504, 140249710600191,
+STORE, 140249656197120, 140249710600191,
+STORE, 140249656188928, 140249710600191,
+STORE, 140249656156160, 140249710600191,
+STORE, 140249656147968, 140249710600191,
+STORE, 140249656131584, 140249710600191,
+STORE, 140249656123392, 140249710600191,
+STORE, 140249656107008, 140249710600191,
+STORE, 140249656098816, 140249710600191,
+STORE, 140249656082432, 140249710600191,
+STORE, 140249656074240, 140249710600191,
+STORE, 140249656041472, 140249710600191,
+STORE, 140249656033280, 140249710600191,
+STORE, 140249656016896, 140249710600191,
+STORE, 140249656008704, 140249710600191,
+STORE, 140249655992320, 140249710600191,
+STORE, 140249655984128, 140249710600191,
+STORE, 140249655967744, 140249710600191,
+STORE, 140249653870592, 140249710600191,
+STORE, 140249653862400, 140249710600191,
+STORE, 140249653829632, 140249710600191,
+STORE, 140249653821440, 140249710600191,
+STORE, 140249653805056, 140249710600191,
+STORE, 140249653796864, 140249710600191,
+STORE, 140249653780480, 140249710600191,
+STORE, 140249653772288, 140249710600191,
+STORE, 140249653755904, 140249710600191,
+STORE, 140249652703232, 140249710600191,
+SNULL, 140249682087935, 140249710600191,
+STORE, 140249652703232, 140249682087935,
+STORE, 140249682087936, 140249710600191,
+ };
+
+ static const unsigned long set26[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140729464770560, 140737488351231,
+SNULL, 140729464774655, 140737488351231,
+STORE, 140729464770560, 140729464774655,
+STORE, 140729464639488, 140729464774655,
+STORE, 4194304, 5066751,
+STORE, 7159808, 7172095,
+STORE, 7172096, 7180287,
+STORE, 140729465114624, 140729465118719,
+STORE, 140729465102336, 140729465114623,
+STORE, 30867456, 30875647,
+STORE, 30867456, 31010815,
+STORE, 140109040988160, 140109042671615,
+STORE, 140109040959488, 140109040988159,
+STORE, 140109040943104, 140109040959487,
+ERASE, 140109040943104, 140109040959487,
+STORE, 140109040840704, 140109040959487,
+ERASE, 140109040840704, 140109040959487,
+STORE, 140109040951296, 140109040959487,
+ERASE, 140109040951296, 140109040959487,
+STORE, 140109040955392, 140109040959487,
+ERASE, 140109040955392, 140109040959487,
+ };
+ static const unsigned long set27[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140726128070656, 140737488351231,
+SNULL, 140726128074751, 140737488351231,
+STORE, 140726128070656, 140726128074751,
+STORE, 140726127939584, 140726128074751,
+STORE, 94478497189888, 94478499303423,
+SNULL, 94478497202175, 94478499303423,
+STORE, 94478497189888, 94478497202175,
+STORE, 94478497202176, 94478499303423,
+ERASE, 94478497202176, 94478499303423,
+STORE, 94478499295232, 94478499303423,
+STORE, 140415605723136, 140415607975935,
+SNULL, 140415605866495, 140415607975935,
+STORE, 140415605723136, 140415605866495,
+STORE, 140415605866496, 140415607975935,
+ERASE, 140415605866496, 140415607975935,
+STORE, 140415607963648, 140415607971839,
+STORE, 140415607971840, 140415607975935,
+STORE, 140726130024448, 140726130028543,
+STORE, 140726130012160, 140726130024447,
+STORE, 140415607934976, 140415607963647,
+STORE, 140415607926784, 140415607934975,
+STORE, 140415603245056, 140415605723135,
+SNULL, 140415603245056, 140415603613695,
+STORE, 140415603613696, 140415605723135,
+STORE, 140415603245056, 140415603613695,
+SNULL, 140415605710847, 140415605723135,
+STORE, 140415603613696, 140415605710847,
+STORE, 140415605710848, 140415605723135,
+ERASE, 140415605710848, 140415605723135,
+STORE, 140415605710848, 140415605723135,
+STORE, 140415599370240, 140415603245055,
+SNULL, 140415599370240, 140415601111039,
+STORE, 140415601111040, 140415603245055,
+STORE, 140415599370240, 140415601111039,
+SNULL, 140415603208191, 140415603245055,
+STORE, 140415601111040, 140415603208191,
+STORE, 140415603208192, 140415603245055,
+ERASE, 140415603208192, 140415603245055,
+STORE, 140415603208192, 140415603245055,
+STORE, 140415595692032, 140415599370239,
+SNULL, 140415595692032, 140415597207551,
+STORE, 140415597207552, 140415599370239,
+STORE, 140415595692032, 140415597207551,
+SNULL, 140415599304703, 140415599370239,
+STORE, 140415597207552, 140415599304703,
+STORE, 140415599304704, 140415599370239,
+SNULL, 140415599304704, 140415599353855,
+STORE, 140415599353856, 140415599370239,
+STORE, 140415599304704, 140415599353855,
+ERASE, 140415599304704, 140415599353855,
+STORE, 140415599304704, 140415599353855,
+ERASE, 140415599353856, 140415599370239,
+STORE, 140415599353856, 140415599370239,
+STORE, 140415593500672, 140415595692031,
+SNULL, 140415593500672, 140415593590783,
+STORE, 140415593590784, 140415595692031,
+STORE, 140415593500672, 140415593590783,
+SNULL, 140415595683839, 140415595692031,
+STORE, 140415593590784, 140415595683839,
+STORE, 140415595683840, 140415595692031,
+ERASE, 140415595683840, 140415595692031,
+STORE, 140415595683840, 140415595692031,
+STORE, 140415589703680, 140415593500671,
+SNULL, 140415589703680, 140415591362559,
+STORE, 140415591362560, 140415593500671,
+STORE, 140415589703680, 140415591362559,
+SNULL, 140415593459711, 140415593500671,
+STORE, 140415591362560, 140415593459711,
+STORE, 140415593459712, 140415593500671,
+SNULL, 140415593459712, 140415593484287,
+STORE, 140415593484288, 140415593500671,
+STORE, 140415593459712, 140415593484287,
+ERASE, 140415593459712, 140415593484287,
+STORE, 140415593459712, 140415593484287,
+ERASE, 140415593484288, 140415593500671,
+STORE, 140415593484288, 140415593500671,
+STORE, 140415587590144, 140415589703679,
+SNULL, 140415587590144, 140415587602431,
+STORE, 140415587602432, 140415589703679,
+STORE, 140415587590144, 140415587602431,
+SNULL, 140415589695487, 140415589703679,
+STORE, 140415587602432, 140415589695487,
+STORE, 140415589695488, 140415589703679,
+ERASE, 140415589695488, 140415589703679,
+STORE, 140415589695488, 140415589703679,
+STORE, 140415607918592, 140415607934975,
+STORE, 140415585398784, 140415587590143,
+SNULL, 140415585398784, 140415585480703,
+STORE, 140415585480704, 140415587590143,
+STORE, 140415585398784, 140415585480703,
+SNULL, 140415587573759, 140415587590143,
+STORE, 140415585480704, 140415587573759,
+STORE, 140415587573760, 140415587590143,
+SNULL, 140415587573760, 140415587581951,
+STORE, 140415587581952, 140415587590143,
+STORE, 140415587573760, 140415587581951,
+ERASE, 140415587573760, 140415587581951,
+STORE, 140415587573760, 140415587581951,
+ERASE, 140415587581952, 140415587590143,
+STORE, 140415587581952, 140415587590143,
+STORE, 140415583182848, 140415585398783,
+SNULL, 140415583182848, 140415583281151,
+STORE, 140415583281152, 140415585398783,
+STORE, 140415583182848, 140415583281151,
+SNULL, 140415585374207, 140415585398783,
+STORE, 140415583281152, 140415585374207,
+STORE, 140415585374208, 140415585398783,
+SNULL, 140415585374208, 140415585382399,
+STORE, 140415585382400, 140415585398783,
+STORE, 140415585374208, 140415585382399,
+ERASE, 140415585374208, 140415585382399,
+STORE, 140415585374208, 140415585382399,
+ERASE, 140415585382400, 140415585398783,
+STORE, 140415585382400, 140415585398783,
+STORE, 140415580979200, 140415583182847,
+SNULL, 140415580979200, 140415581081599,
+STORE, 140415581081600, 140415583182847,
+STORE, 140415580979200, 140415581081599,
+SNULL, 140415583174655, 140415583182847,
+STORE, 140415581081600, 140415583174655,
+STORE, 140415583174656, 140415583182847,
+ERASE, 140415583174656, 140415583182847,
+STORE, 140415583174656, 140415583182847,
+STORE, 140415578816512, 140415580979199,
+SNULL, 140415578816512, 140415578877951,
+STORE, 140415578877952, 140415580979199,
+STORE, 140415578816512, 140415578877951,
+SNULL, 140415580971007, 140415580979199,
+STORE, 140415578877952, 140415580971007,
+STORE, 140415580971008, 140415580979199,
+ERASE, 140415580971008, 140415580979199,
+STORE, 140415580971008, 140415580979199,
+STORE, 140415576563712, 140415578816511,
+SNULL, 140415576563712, 140415576715263,
+STORE, 140415576715264, 140415578816511,
+STORE, 140415576563712, 140415576715263,
+SNULL, 140415578808319, 140415578816511,
+STORE, 140415576715264, 140415578808319,
+STORE, 140415578808320, 140415578816511,
+ERASE, 140415578808320, 140415578816511,
+STORE, 140415578808320, 140415578816511,
+STORE, 140415574392832, 140415576563711,
+SNULL, 140415574392832, 140415574462463,
+STORE, 140415574462464, 140415576563711,
+STORE, 140415574392832, 140415574462463,
+SNULL, 140415576555519, 140415576563711,
+STORE, 140415574462464, 140415576555519,
+STORE, 140415576555520, 140415576563711,
+ERASE, 140415576555520, 140415576563711,
+STORE, 140415576555520, 140415576563711,
+STORE, 140415607910400, 140415607934975,
+STORE, 140415571230720, 140415574392831,
+SNULL, 140415571230720, 140415572291583,
+STORE, 140415572291584, 140415574392831,
+STORE, 140415571230720, 140415572291583,
+SNULL, 140415574384639, 140415574392831,
+STORE, 140415572291584, 140415574384639,
+STORE, 140415574384640, 140415574392831,
+ERASE, 140415574384640, 140415574392831,
+STORE, 140415574384640, 140415574392831,
+STORE, 140415607902208, 140415607934975,
+SNULL, 140415593476095, 140415593484287,
+STORE, 140415593459712, 140415593476095,
+STORE, 140415593476096, 140415593484287,
+SNULL, 140415574388735, 140415574392831,
+STORE, 140415574384640, 140415574388735,
+STORE, 140415574388736, 140415574392831,
+SNULL, 140415576559615, 140415576563711,
+STORE, 140415576555520, 140415576559615,
+STORE, 140415576559616, 140415576563711,
+SNULL, 140415589699583, 140415589703679,
+STORE, 140415589695488, 140415589699583,
+STORE, 140415589699584, 140415589703679,
+SNULL, 140415585378303, 140415585382399,
+STORE, 140415585374208, 140415585378303,
+STORE, 140415585378304, 140415585382399,
+SNULL, 140415578812415, 140415578816511,
+STORE, 140415578808320, 140415578812415,
+STORE, 140415578812416, 140415578816511,
+SNULL, 140415580975103, 140415580979199,
+STORE, 140415580971008, 140415580975103,
+STORE, 140415580975104, 140415580979199,
+SNULL, 140415583178751, 140415583182847,
+STORE, 140415583174656, 140415583178751,
+STORE, 140415583178752, 140415583182847,
+SNULL, 140415587577855, 140415587581951,
+STORE, 140415587573760, 140415587577855,
+STORE, 140415587577856, 140415587581951,
+SNULL, 140415595687935, 140415595692031,
+STORE, 140415595683840, 140415595687935,
+STORE, 140415595687936, 140415595692031,
+STORE, 140415607894016, 140415607934975,
+SNULL, 140415599345663, 140415599353855,
+STORE, 140415599304704, 140415599345663,
+STORE, 140415599345664, 140415599353855,
+SNULL, 140415603240959, 140415603245055,
+STORE, 140415603208192, 140415603240959,
+STORE, 140415603240960, 140415603245055,
+SNULL, 140415605719039, 140415605723135,
+STORE, 140415605710848, 140415605719039,
+STORE, 140415605719040, 140415605723135,
+SNULL, 94478499299327, 94478499303423,
+STORE, 94478499295232, 94478499299327,
+STORE, 94478499299328, 94478499303423,
+SNULL, 140415607967743, 140415607971839,
+STORE, 140415607963648, 140415607967743,
+STORE, 140415607967744, 140415607971839,
+ERASE, 140415607934976, 140415607963647,
+STORE, 94478511173632, 94478511378431,
+STORE, 140415606210560, 140415607894015,
+STORE, 140415607934976, 140415607963647,
+STORE, 94478511173632, 94478511513599,
+STORE, 94478511173632, 94478511648767,
+SNULL, 94478511615999, 94478511648767,
+STORE, 94478511173632, 94478511615999,
+STORE, 94478511616000, 94478511648767,
+ERASE, 94478511616000, 94478511648767,
+STORE, 94478511173632, 94478511751167,
+SNULL, 94478511747071, 94478511751167,
+STORE, 94478511173632, 94478511747071,
+STORE, 94478511747072, 94478511751167,
+ERASE, 94478511747072, 94478511751167,
+STORE, 94478511173632, 94478511882239,
+SNULL, 94478511878143, 94478511882239,
+STORE, 94478511173632, 94478511878143,
+STORE, 94478511878144, 94478511882239,
+ERASE, 94478511878144, 94478511882239,
+STORE, 94478511173632, 94478512013311,
+SNULL, 94478512009215, 94478512013311,
+STORE, 94478511173632, 94478512009215,
+STORE, 94478512009216, 94478512013311,
+ERASE, 94478512009216, 94478512013311,
+STORE, 94478511173632, 94478512144383,
+STORE, 94478511173632, 94478512279551,
+STORE, 140415606181888, 140415606210559,
+STORE, 140415569100800, 140415571230719,
+SNULL, 140415569100800, 140415569129471,
+STORE, 140415569129472, 140415571230719,
+STORE, 140415569100800, 140415569129471,
+SNULL, 140415571222527, 140415571230719,
+STORE, 140415569129472, 140415571222527,
+STORE, 140415571222528, 140415571230719,
+ERASE, 140415571222528, 140415571230719,
+STORE, 140415571222528, 140415571230719,
+STORE, 140415566905344, 140415569100799,
+SNULL, 140415566905344, 140415566987263,
+STORE, 140415566987264, 140415569100799,
+STORE, 140415566905344, 140415566987263,
+SNULL, 140415569084415, 140415569100799,
+STORE, 140415566987264, 140415569084415,
+STORE, 140415569084416, 140415569100799,
+SNULL, 140415569084416, 140415569092607,
+STORE, 140415569092608, 140415569100799,
+STORE, 140415569084416, 140415569092607,
+ERASE, 140415569084416, 140415569092607,
+STORE, 140415569084416, 140415569092607,
+ERASE, 140415569092608, 140415569100799,
+STORE, 140415569092608, 140415569100799,
+SNULL, 140415569088511, 140415569092607,
+STORE, 140415569084416, 140415569088511,
+STORE, 140415569088512, 140415569092607,
+SNULL, 140415571226623, 140415571230719,
+STORE, 140415571222528, 140415571226623,
+STORE, 140415571226624, 140415571230719,
+ERASE, 140415606181888, 140415606210559,
+STORE, 140415606181888, 140415606210559,
+STORE, 140415564759040, 140415566905343,
+SNULL, 140415564759040, 140415564804095,
+STORE, 140415564804096, 140415566905343,
+STORE, 140415564759040, 140415564804095,
+SNULL, 140415566897151, 140415566905343,
+STORE, 140415564804096, 140415566897151,
+STORE, 140415566897152, 140415566905343,
+ERASE, 140415566897152, 140415566905343,
+STORE, 140415566897152, 140415566905343,
+STORE, 140415562588160, 140415564759039,
+SNULL, 140415562588160, 140415562629119,
+STORE, 140415562629120, 140415564759039,
+STORE, 140415562588160, 140415562629119,
+SNULL, 140415564726271, 140415564759039,
+STORE, 140415562629120, 140415564726271,
+STORE, 140415564726272, 140415564759039,
+SNULL, 140415564726272, 140415564734463,
+STORE, 140415564734464, 140415564759039,
+STORE, 140415564726272, 140415564734463,
+ERASE, 140415564726272, 140415564734463,
+STORE, 140415564726272, 140415564734463,
+ERASE, 140415564734464, 140415564759039,
+STORE, 140415564734464, 140415564759039,
+SNULL, 140415564730367, 140415564734463,
+STORE, 140415564726272, 140415564730367,
+STORE, 140415564730368, 140415564734463,
+SNULL, 140415566901247, 140415566905343,
+STORE, 140415566897152, 140415566901247,
+STORE, 140415566901248, 140415566905343,
+ERASE, 140415606181888, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415605944320, 140415606210559,
+ERASE, 140415605944320, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 94478511173632, 94478512414719,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 94478511173632, 94478512652287,
+STORE, 94478511173632, 94478512787455,
+STORE, 94478511173632, 94478512922623,
+STORE, 94478511173632, 94478513057791,
+STORE, 140415537422336, 140415562588159,
+STORE, 94478511173632, 94478513192959,
+STORE, 94478511173632, 94478513356799,
+STORE, 94478511173632, 94478513491967,
+STORE, 94478511173632, 94478513627135,
+STORE, 94478511173632, 94478513790975,
+STORE, 94478511173632, 94478513926143,
+STORE, 94478511173632, 94478514061311,
+STORE, 94478511173632, 94478514196479,
+STORE, 94478511173632, 94478514331647,
+STORE, 94478511173632, 94478514606079,
+STORE, 94478511173632, 94478514741247,
+STORE, 94478511173632, 94478514876415,
+STORE, 94478511173632, 94478515011583,
+STORE, 94478511173632, 94478515146751,
+STORE, 94478511173632, 94478515281919,
+STORE, 94478511173632, 94478515474431,
+STORE, 94478511173632, 94478515609599,
+STORE, 94478511173632, 94478515744767,
+STORE, 140415536922624, 140415562588159,
+STORE, 94478511173632, 94478515879935,
+STORE, 94478511173632, 94478516015103,
+STORE, 94478511173632, 94478516150271,
+STORE, 94478511173632, 94478516285439,
+STORE, 94478511173632, 94478516420607,
+STORE, 94478511173632, 94478516555775,
+STORE, 94478511173632, 94478516690943,
+STORE, 94478511173632, 94478516826111,
+STORE, 94478511173632, 94478516961279,
+STORE, 94478511173632, 94478517231615,
+STORE, 94478511173632, 94478517366783,
+STORE, 94478511173632, 94478517501951,
+STORE, 94478511173632, 94478517637119,
+STORE, 94478511173632, 94478517772287,
+STORE, 94478511173632, 94478517907455,
+STORE, 94478511173632, 94478518042623,
+STORE, 94478511173632, 94478518177791,
+STORE, 94478511173632, 94478518312959,
+STORE, 94478511173632, 94478518448127,
+STORE, 140415535910912, 140415562588159,
+SNULL, 140415536922623, 140415562588159,
+STORE, 140415535910912, 140415536922623,
+STORE, 140415536922624, 140415562588159,
+SNULL, 140415536922624, 140415537422335,
+STORE, 140415537422336, 140415562588159,
+STORE, 140415536922624, 140415537422335,
+ERASE, 140415536922624, 140415537422335,
+STORE, 94478511173632, 94478518583295,
+STORE, 94478511173632, 94478518718463,
+STORE, 94478511173632, 94478518853631,
+STORE, 94478511173632, 94478518988799,
+STORE, 94478511173632, 94478519123967,
+STORE, 94478511173632, 94478519259135,
+STORE, 140415509696512, 140415535910911,
+ERASE, 140415537422336, 140415562588159,
+STORE, 140415482433536, 140415509696511,
+ };
+ static const unsigned long set28[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722475622400, 140737488351231,
+SNULL, 140722475626495, 140737488351231,
+STORE, 140722475622400, 140722475626495,
+STORE, 140722475491328, 140722475626495,
+STORE, 93865834291200, 93865836548095,
+SNULL, 93865834422271, 93865836548095,
+STORE, 93865834291200, 93865834422271,
+STORE, 93865834422272, 93865836548095,
+ERASE, 93865834422272, 93865836548095,
+STORE, 93865836519424, 93865836527615,
+STORE, 93865836527616, 93865836548095,
+STORE, 139918411104256, 139918413357055,
+SNULL, 139918411247615, 139918413357055,
+STORE, 139918411104256, 139918411247615,
+STORE, 139918411247616, 139918413357055,
+ERASE, 139918411247616, 139918413357055,
+STORE, 139918413344768, 139918413352959,
+STORE, 139918413352960, 139918413357055,
+STORE, 140722476642304, 140722476646399,
+STORE, 140722476630016, 140722476642303,
+STORE, 139918413316096, 139918413344767,
+STORE, 139918413307904, 139918413316095,
+STORE, 139918408888320, 139918411104255,
+SNULL, 139918408888320, 139918408986623,
+STORE, 139918408986624, 139918411104255,
+STORE, 139918408888320, 139918408986623,
+SNULL, 139918411079679, 139918411104255,
+STORE, 139918408986624, 139918411079679,
+STORE, 139918411079680, 139918411104255,
+SNULL, 139918411079680, 139918411087871,
+STORE, 139918411087872, 139918411104255,
+STORE, 139918411079680, 139918411087871,
+ERASE, 139918411079680, 139918411087871,
+STORE, 139918411079680, 139918411087871,
+ERASE, 139918411087872, 139918411104255,
+STORE, 139918411087872, 139918411104255,
+STORE, 139918405091328, 139918408888319,
+SNULL, 139918405091328, 139918406750207,
+STORE, 139918406750208, 139918408888319,
+STORE, 139918405091328, 139918406750207,
+SNULL, 139918408847359, 139918408888319,
+STORE, 139918406750208, 139918408847359,
+STORE, 139918408847360, 139918408888319,
+SNULL, 139918408847360, 139918408871935,
+STORE, 139918408871936, 139918408888319,
+STORE, 139918408847360, 139918408871935,
+ERASE, 139918408847360, 139918408871935,
+STORE, 139918408847360, 139918408871935,
+ERASE, 139918408871936, 139918408888319,
+STORE, 139918408871936, 139918408888319,
+STORE, 139918413299712, 139918413316095,
+SNULL, 139918408863743, 139918408871935,
+STORE, 139918408847360, 139918408863743,
+STORE, 139918408863744, 139918408871935,
+SNULL, 139918411083775, 139918411087871,
+STORE, 139918411079680, 139918411083775,
+STORE, 139918411083776, 139918411087871,
+SNULL, 93865836523519, 93865836527615,
+STORE, 93865836519424, 93865836523519,
+STORE, 93865836523520, 93865836527615,
+SNULL, 139918413348863, 139918413352959,
+STORE, 139918413344768, 139918413348863,
+STORE, 139918413348864, 139918413352959,
+ERASE, 139918413316096, 139918413344767,
+STORE, 93865848528896, 93865848664063,
+ };
+ static const unsigned long set29[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734467944448, 140737488351231,
+SNULL, 140734467948543, 140737488351231,
+STORE, 140734467944448, 140734467948543,
+STORE, 140734467813376, 140734467948543,
+STORE, 94880407924736, 94880410177535,
+SNULL, 94880408055807, 94880410177535,
+STORE, 94880407924736, 94880408055807,
+STORE, 94880408055808, 94880410177535,
+ERASE, 94880408055808, 94880410177535,
+STORE, 94880410148864, 94880410157055,
+STORE, 94880410157056, 94880410177535,
+STORE, 140143367815168, 140143370067967,
+SNULL, 140143367958527, 140143370067967,
+STORE, 140143367815168, 140143367958527,
+STORE, 140143367958528, 140143370067967,
+ERASE, 140143367958528, 140143370067967,
+STORE, 140143370055680, 140143370063871,
+STORE, 140143370063872, 140143370067967,
+STORE, 140734468329472, 140734468333567,
+STORE, 140734468317184, 140734468329471,
+STORE, 140143370027008, 140143370055679,
+STORE, 140143370018816, 140143370027007,
+STORE, 140143365599232, 140143367815167,
+SNULL, 140143365599232, 140143365697535,
+STORE, 140143365697536, 140143367815167,
+STORE, 140143365599232, 140143365697535,
+SNULL, 140143367790591, 140143367815167,
+STORE, 140143365697536, 140143367790591,
+STORE, 140143367790592, 140143367815167,
+SNULL, 140143367790592, 140143367798783,
+STORE, 140143367798784, 140143367815167,
+STORE, 140143367790592, 140143367798783,
+ERASE, 140143367790592, 140143367798783,
+STORE, 140143367790592, 140143367798783,
+ERASE, 140143367798784, 140143367815167,
+STORE, 140143367798784, 140143367815167,
+STORE, 140143361802240, 140143365599231,
+SNULL, 140143361802240, 140143363461119,
+STORE, 140143363461120, 140143365599231,
+STORE, 140143361802240, 140143363461119,
+SNULL, 140143365558271, 140143365599231,
+STORE, 140143363461120, 140143365558271,
+STORE, 140143365558272, 140143365599231,
+SNULL, 140143365558272, 140143365582847,
+STORE, 140143365582848, 140143365599231,
+STORE, 140143365558272, 140143365582847,
+ERASE, 140143365558272, 140143365582847,
+STORE, 140143365558272, 140143365582847,
+ERASE, 140143365582848, 140143365599231,
+STORE, 140143365582848, 140143365599231,
+STORE, 140143370010624, 140143370027007,
+SNULL, 140143365574655, 140143365582847,
+STORE, 140143365558272, 140143365574655,
+STORE, 140143365574656, 140143365582847,
+SNULL, 140143367794687, 140143367798783,
+STORE, 140143367790592, 140143367794687,
+STORE, 140143367794688, 140143367798783,
+SNULL, 94880410152959, 94880410157055,
+STORE, 94880410148864, 94880410152959,
+STORE, 94880410152960, 94880410157055,
+SNULL, 140143370059775, 140143370063871,
+STORE, 140143370055680, 140143370059775,
+STORE, 140143370059776, 140143370063871,
+ERASE, 140143370027008, 140143370055679,
+STORE, 94880442400768, 94880442535935,
+STORE, 140143353409536, 140143361802239,
+SNULL, 140143353413631, 140143361802239,
+STORE, 140143353409536, 140143353413631,
+STORE, 140143353413632, 140143361802239,
+STORE, 140143345016832, 140143353409535,
+STORE, 140143210799104, 140143345016831,
+SNULL, 140143210799104, 140143239364607,
+STORE, 140143239364608, 140143345016831,
+STORE, 140143210799104, 140143239364607,
+ERASE, 140143210799104, 140143239364607,
+SNULL, 140143306473471, 140143345016831,
+STORE, 140143239364608, 140143306473471,
+STORE, 140143306473472, 140143345016831,
+ERASE, 140143306473472, 140143345016831,
+SNULL, 140143239499775, 140143306473471,
+STORE, 140143239364608, 140143239499775,
+STORE, 140143239499776, 140143306473471,
+SNULL, 140143345020927, 140143353409535,
+STORE, 140143345016832, 140143345020927,
+STORE, 140143345020928, 140143353409535,
+STORE, 140143336624128, 140143345016831,
+SNULL, 140143336628223, 140143345016831,
+STORE, 140143336624128, 140143336628223,
+STORE, 140143336628224, 140143345016831,
+STORE, 140143328231424, 140143336624127,
+SNULL, 140143328235519, 140143336624127,
+STORE, 140143328231424, 140143328235519,
+STORE, 140143328235520, 140143336624127,
+STORE, 140143319838720, 140143328231423,
+SNULL, 140143319842815, 140143328231423,
+STORE, 140143319838720, 140143319842815,
+STORE, 140143319842816, 140143328231423,
+STORE, 140143311446016, 140143319838719,
+STORE, 140143105146880, 140143239364607,
+STORE, 140143096754176, 140143105146879,
+STORE, 140143029645312, 140143096754175,
+ERASE, 140143029645312, 140143096754175,
+STORE, 140142962536448, 140143096754175,
+SNULL, 140142962536448, 140142970929151,
+STORE, 140142970929152, 140143096754175,
+STORE, 140142962536448, 140142970929151,
+ERASE, 140142962536448, 140142970929151,
+STORE, 140142962536448, 140142970929151,
+STORE, 140142828318720, 140142962536447,
+STORE, 140142819926016, 140142828318719,
+SNULL, 140142828318720, 140142836711423,
+STORE, 140142836711424, 140142962536447,
+STORE, 140142828318720, 140142836711423,
+ERASE, 140142828318720, 140142836711423,
+SNULL, 140143172255743, 140143239364607,
+STORE, 140143105146880, 140143172255743,
+STORE, 140143172255744, 140143239364607,
+ERASE, 140143172255744, 140143239364607,
+SNULL, 140143105282047, 140143172255743,
+STORE, 140143105146880, 140143105282047,
+STORE, 140143105282048, 140143172255743,
+SNULL, 140143038038015, 140143096754175,
+STORE, 140142970929152, 140143038038015,
+STORE, 140143038038016, 140143096754175,
+ERASE, 140143038038016, 140143096754175,
+SNULL, 140142971064319, 140143038038015,
+STORE, 140142970929152, 140142971064319,
+STORE, 140142971064320, 140143038038015,
+SNULL, 140142903820287, 140142962536447,
+STORE, 140142836711424, 140142903820287,
+STORE, 140142903820288, 140142962536447,
+ERASE, 140142903820288, 140142962536447,
+SNULL, 140142836846591, 140142903820287,
+STORE, 140142836711424, 140142836846591,
+STORE, 140142836846592, 140142903820287,
+STORE, 140142685708288, 140142819926015,
+SNULL, 140143311450111, 140143319838719,
+STORE, 140143311446016, 140143311450111,
+STORE, 140143311450112, 140143319838719,
+SNULL, 140142962540543, 140142970929151,
+STORE, 140142962536448, 140142962540543,
+STORE, 140142962540544, 140142970929151,
+SNULL, 140142685708288, 140142702493695,
+STORE, 140142702493696, 140142819926015,
+STORE, 140142685708288, 140142702493695,
+ERASE, 140142685708288, 140142702493695,
+SNULL, 140142769602559, 140142819926015,
+STORE, 140142702493696, 140142769602559,
+STORE, 140142769602560, 140142819926015,
+ERASE, 140142769602560, 140142819926015,
+SNULL, 140142702628863, 140142769602559,
+STORE, 140142702493696, 140142702628863,
+STORE, 140142702628864, 140142769602559,
+STORE, 140143230971904, 140143239364607,
+SNULL, 140143230975999, 140143239364607,
+STORE, 140143230971904, 140143230975999,
+STORE, 140143230976000, 140143239364607,
+SNULL, 140143096758271, 140143105146879,
+STORE, 140143096754176, 140143096758271,
+STORE, 140143096758272, 140143105146879,
+STORE, 140143222579200, 140143230971903,
+SNULL, 140143222583295, 140143230971903,
+STORE, 140143222579200, 140143222583295,
+STORE, 140143222583296, 140143230971903,
+STORE, 140143214186496, 140143222579199,
+SNULL, 140142819930111, 140142828318719,
+STORE, 140142819926016, 140142819930111,
+STORE, 140142819930112, 140142828318719,
+STORE, 140143205793792, 140143222579199,
+SNULL, 140143205793792, 140143214186495,
+STORE, 140143214186496, 140143222579199,
+STORE, 140143205793792, 140143214186495,
+SNULL, 140143214190591, 140143222579199,
+STORE, 140143214186496, 140143214190591,
+STORE, 140143214190592, 140143222579199,
+SNULL, 140143205797887, 140143214186495,
+STORE, 140143205793792, 140143205797887,
+STORE, 140143205797888, 140143214186495,
+STORE, 140143197401088, 140143205793791,
+SNULL, 140143197405183, 140143205793791,
+STORE, 140143197401088, 140143197405183,
+STORE, 140143197405184, 140143205793791,
+STORE, 140143189008384, 140143197401087,
+STORE, 140143180615680, 140143197401087,
+STORE, 140143088361472, 140143096754175,
+SNULL, 140143180619775, 140143197401087,
+STORE, 140143180615680, 140143180619775,
+STORE, 140143180619776, 140143197401087,
+SNULL, 140143180619776, 140143189008383,
+STORE, 140143189008384, 140143197401087,
+STORE, 140143180619776, 140143189008383,
+SNULL, 140143189012479, 140143197401087,
+STORE, 140143189008384, 140143189012479,
+STORE, 140143189012480, 140143197401087,
+SNULL, 140143088365567, 140143096754175,
+STORE, 140143088361472, 140143088365567,
+STORE, 140143088365568, 140143096754175,
+STORE, 140143079968768, 140143088361471,
+SNULL, 140143079972863, 140143088361471,
+STORE, 140143079968768, 140143079972863,
+STORE, 140143079972864, 140143088361471,
+STORE, 140143071576064, 140143079968767,
+SNULL, 140143071580159, 140143079968767,
+STORE, 140143071576064, 140143071580159,
+STORE, 140143071580160, 140143079968767,
+STORE, 140143063183360, 140143071576063,
+STORE, 140143054790656, 140143071576063,
+SNULL, 140143054794751, 140143071576063,
+STORE, 140143054790656, 140143054794751,
+STORE, 140143054794752, 140143071576063,
+SNULL, 140143054794752, 140143063183359,
+STORE, 140143063183360, 140143071576063,
+STORE, 140143054794752, 140143063183359,
+SNULL, 140143063187455, 140143071576063,
+STORE, 140143063183360, 140143063187455,
+STORE, 140143063187456, 140143071576063,
+STORE, 140143046397952, 140143054790655,
+STORE, 140142954143744, 140142962536447,
+STORE, 140142945751040, 140142962536447,
+STORE, 140142937358336, 140142962536447,
+STORE, 140142928965632, 140142962536447,
+STORE, 140142568275968, 140142702493695,
+SNULL, 140142635384831, 140142702493695,
+STORE, 140142568275968, 140142635384831,
+STORE, 140142635384832, 140142702493695,
+ERASE, 140142635384832, 140142702493695,
+STORE, 140142920572928, 140142962536447,
+STORE, 140142912180224, 140142962536447,
+STORE, 140142568275968, 140142702493695,
+SNULL, 140142568275968, 140142635384831,
+STORE, 140142635384832, 140142702493695,
+STORE, 140142568275968, 140142635384831,
+SNULL, 140142635519999, 140142702493695,
+STORE, 140142635384832, 140142635519999,
+STORE, 140142635520000, 140142702493695,
+STORE, 140142819930112, 140142836711423,
+STORE, 140142811533312, 140142819926015,
+STORE, 140142434058240, 140142635384831,
+SNULL, 140142501167103, 140142635384831,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142501167104, 140142635384831,
+SNULL, 140142501167104, 140142568275967,
+STORE, 140142568275968, 140142635384831,
+STORE, 140142501167104, 140142568275967,
+ERASE, 140142501167104, 140142568275967,
+STORE, 140142299840512, 140142501167103,
+STORE, 140142803140608, 140142819926015,
+SNULL, 140142366949375, 140142501167103,
+STORE, 140142299840512, 140142366949375,
+STORE, 140142366949376, 140142501167103,
+SNULL, 140142366949376, 140142434058239,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142366949376, 140142434058239,
+ERASE, 140142366949376, 140142434058239,
+STORE, 140142794747904, 140142819926015,
+STORE, 140142786355200, 140142819926015,
+STORE, 140142299840512, 140142501167103,
+STORE, 140142777962496, 140142819926015,
+STORE, 140142559883264, 140142568275967,
+STORE, 140142232731648, 140142501167103,
+STORE, 140142551490560, 140142568275967,
+SNULL, 140142777962496, 140142803140607,
+STORE, 140142803140608, 140142819926015,
+STORE, 140142777962496, 140142803140607,
+SNULL, 140142803144703, 140142819926015,
+STORE, 140142803140608, 140142803144703,
+STORE, 140142803144704, 140142819926015,
+STORE, 140142543097856, 140142568275967,
+STORE, 140142098513920, 140142501167103,
+SNULL, 140142165622783, 140142501167103,
+STORE, 140142098513920, 140142165622783,
+STORE, 140142165622784, 140142501167103,
+SNULL, 140142165622784, 140142232731647,
+STORE, 140142232731648, 140142501167103,
+STORE, 140142165622784, 140142232731647,
+ERASE, 140142165622784, 140142232731647,
+SNULL, 140142568411135, 140142635384831,
+STORE, 140142568275968, 140142568411135,
+STORE, 140142568411136, 140142635384831,
+STORE, 140141964296192, 140142165622783,
+SNULL, 140142912180224, 140142928965631,
+STORE, 140142928965632, 140142962536447,
+STORE, 140142912180224, 140142928965631,
+SNULL, 140142928969727, 140142962536447,
+STORE, 140142928965632, 140142928969727,
+STORE, 140142928969728, 140142962536447,
+STORE, 140141830078464, 140142165622783,
+SNULL, 140142912184319, 140142928965631,
+STORE, 140142912180224, 140142912184319,
+STORE, 140142912184320, 140142928965631,
+SNULL, 140142232731648, 140142434058239,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142232731648, 140142434058239,
+SNULL, 140142434193407, 140142501167103,
+STORE, 140142434058240, 140142434193407,
+STORE, 140142434193408, 140142501167103,
+SNULL, 140142232731648, 140142299840511,
+STORE, 140142299840512, 140142434058239,
+STORE, 140142232731648, 140142299840511,
+SNULL, 140142299975679, 140142434058239,
+STORE, 140142299840512, 140142299975679,
+STORE, 140142299975680, 140142434058239,
+SNULL, 140142928969728, 140142954143743,
+STORE, 140142954143744, 140142962536447,
+STORE, 140142928969728, 140142954143743,
+SNULL, 140142954147839, 140142962536447,
+STORE, 140142954143744, 140142954147839,
+STORE, 140142954147840, 140142962536447,
+STORE, 140141830078464, 140142299840511,
+SNULL, 140142543097856, 140142559883263,
+STORE, 140142559883264, 140142568275967,
+STORE, 140142543097856, 140142559883263,
+SNULL, 140142559887359, 140142568275967,
+STORE, 140142559883264, 140142559887359,
+STORE, 140142559887360, 140142568275967,
+STORE, 140142534705152, 140142559883263,
+SNULL, 140142928969728, 140142945751039,
+STORE, 140142945751040, 140142954143743,
+STORE, 140142928969728, 140142945751039,
+SNULL, 140142945755135, 140142954143743,
+STORE, 140142945751040, 140142945755135,
+STORE, 140142945755136, 140142954143743,
+SNULL, 140142299975680, 140142366949375,
+STORE, 140142366949376, 140142434058239,
+STORE, 140142299975680, 140142366949375,
+SNULL, 140142367084543, 140142434058239,
+STORE, 140142366949376, 140142367084543,
+STORE, 140142367084544, 140142434058239,
+SNULL, 140142928969728, 140142937358335,
+STORE, 140142937358336, 140142945751039,
+STORE, 140142928969728, 140142937358335,
+SNULL, 140142937362431, 140142945751039,
+STORE, 140142937358336, 140142937362431,
+STORE, 140142937362432, 140142945751039,
+SNULL, 140141830078464, 140142232731647,
+STORE, 140142232731648, 140142299840511,
+STORE, 140141830078464, 140142232731647,
+SNULL, 140142232866815, 140142299840511,
+STORE, 140142232731648, 140142232866815,
+STORE, 140142232866816, 140142299840511,
+SNULL, 140142534705152, 140142543097855,
+STORE, 140142543097856, 140142559883263,
+STORE, 140142534705152, 140142543097855,
+SNULL, 140142543101951, 140142559883263,
+STORE, 140142543097856, 140142543101951,
+STORE, 140142543101952, 140142559883263,
+STORE, 140142526312448, 140142543097855,
+STORE, 140142517919744, 140142543097855,
+SNULL, 140141830078464, 140142098513919,
+STORE, 140142098513920, 140142232731647,
+STORE, 140141830078464, 140142098513919,
+SNULL, 140142098649087, 140142232731647,
+STORE, 140142098513920, 140142098649087,
+STORE, 140142098649088, 140142232731647,
+SNULL, 140142031405055, 140142098513919,
+STORE, 140141830078464, 140142031405055,
+STORE, 140142031405056, 140142098513919,
+ERASE, 140142031405056, 140142098513919,
+SNULL, 140141830078464, 140141964296191,
+STORE, 140141964296192, 140142031405055,
+STORE, 140141830078464, 140141964296191,
+SNULL, 140141964431359, 140142031405055,
+STORE, 140141964296192, 140141964431359,
+STORE, 140141964431360, 140142031405055,
+STORE, 140142509527040, 140142543097855,
+SNULL, 140141897187327, 140141964296191,
+STORE, 140141830078464, 140141897187327,
+STORE, 140141897187328, 140141964296191,
+ERASE, 140141897187328, 140141964296191,
+SNULL, 140141830213631, 140141897187327,
+STORE, 140141830078464, 140141830213631,
+STORE, 140141830213632, 140141897187327,
+SNULL, 140142803144704, 140142811533311,
+STORE, 140142811533312, 140142819926015,
+STORE, 140142803144704, 140142811533311,
+SNULL, 140142811537407, 140142819926015,
+STORE, 140142811533312, 140142811537407,
+STORE, 140142811537408, 140142819926015,
+SNULL, 140142098649088, 140142165622783,
+STORE, 140142165622784, 140142232731647,
+STORE, 140142098649088, 140142165622783,
+SNULL, 140142165757951, 140142232731647,
+STORE, 140142165622784, 140142165757951,
+STORE, 140142165757952, 140142232731647,
+STORE, 140142090121216, 140142098513919,
+SNULL, 140142777962496, 140142786355199,
+STORE, 140142786355200, 140142803140607,
+STORE, 140142777962496, 140142786355199,
+SNULL, 140142786359295, 140142803140607,
+STORE, 140142786355200, 140142786359295,
+STORE, 140142786359296, 140142803140607,
+SNULL, 140142509527040, 140142534705151,
+STORE, 140142534705152, 140142543097855,
+STORE, 140142509527040, 140142534705151,
+SNULL, 140142534709247, 140142543097855,
+STORE, 140142534705152, 140142534709247,
+STORE, 140142534709248, 140142543097855,
+STORE, 140142081728512, 140142098513919,
+SNULL, 140142786359296, 140142794747903,
+STORE, 140142794747904, 140142803140607,
+STORE, 140142786359296, 140142794747903,
+SNULL, 140142794751999, 140142803140607,
+STORE, 140142794747904, 140142794751999,
+STORE, 140142794752000, 140142803140607,
+STORE, 140142073335808, 140142098513919,
+SNULL, 140142073339903, 140142098513919,
+STORE, 140142073335808, 140142073339903,
+STORE, 140142073339904, 140142098513919,
+SNULL, 140142543101952, 140142551490559,
+STORE, 140142551490560, 140142559883263,
+STORE, 140142543101952, 140142551490559,
+SNULL, 140142551494655, 140142559883263,
+STORE, 140142551490560, 140142551494655,
+STORE, 140142551494656, 140142559883263,
+SNULL, 140142509527040, 140142517919743,
+STORE, 140142517919744, 140142534705151,
+STORE, 140142509527040, 140142517919743,
+SNULL, 140142517923839, 140142534705151,
+STORE, 140142517919744, 140142517923839,
+STORE, 140142517923840, 140142534705151,
+STORE, 140142064943104, 140142073335807,
+SNULL, 140142073339904, 140142090121215,
+STORE, 140142090121216, 140142098513919,
+STORE, 140142073339904, 140142090121215,
+SNULL, 140142090125311, 140142098513919,
+STORE, 140142090121216, 140142090125311,
+STORE, 140142090125312, 140142098513919,
+STORE, 140142056550400, 140142073335807,
+SNULL, 140142056554495, 140142073335807,
+STORE, 140142056550400, 140142056554495,
+STORE, 140142056554496, 140142073335807,
+STORE, 140142048157696, 140142056550399,
+SNULL, 140142509531135, 140142517919743,
+STORE, 140142509527040, 140142509531135,
+STORE, 140142509531136, 140142517919743,
+SNULL, 140142777966591, 140142786355199,
+STORE, 140142777962496, 140142777966591,
+STORE, 140142777966592, 140142786355199,
+SNULL, 140143046402047, 140143054790655,
+STORE, 140143046397952, 140143046402047,
+STORE, 140143046402048, 140143054790655,
+SNULL, 140142912184320, 140142920572927,
+STORE, 140142920572928, 140142928965631,
+STORE, 140142912184320, 140142920572927,
+SNULL, 140142920577023, 140142928965631,
+STORE, 140142920572928, 140142920577023,
+STORE, 140142920577024, 140142928965631,
+STORE, 140142039764992, 140142056550399,
+STORE, 140141955903488, 140141964296191,
+SNULL, 140142819930112, 140142828318719,
+STORE, 140142828318720, 140142836711423,
+STORE, 140142819930112, 140142828318719,
+SNULL, 140142828322815, 140142836711423,
+STORE, 140142828318720, 140142828322815,
+STORE, 140142828322816, 140142836711423,
+SNULL, 140142517923840, 140142526312447,
+STORE, 140142526312448, 140142534705151,
+STORE, 140142517923840, 140142526312447,
+SNULL, 140142526316543, 140142534705151,
+STORE, 140142526312448, 140142526316543,
+STORE, 140142526316544, 140142534705151,
+STORE, 140141947510784, 140141964296191,
+SNULL, 140142056554496, 140142064943103,
+STORE, 140142064943104, 140142073335807,
+STORE, 140142056554496, 140142064943103,
+SNULL, 140142064947199, 140142073335807,
+STORE, 140142064943104, 140142064947199,
+STORE, 140142064947200, 140142073335807,
+SNULL, 140142073339904, 140142081728511,
+STORE, 140142081728512, 140142090121215,
+STORE, 140142073339904, 140142081728511,
+SNULL, 140142081732607, 140142090121215,
+STORE, 140142081728512, 140142081732607,
+STORE, 140142081732608, 140142090121215,
+STORE, 140141939118080, 140141964296191,
+STORE, 140141930725376, 140141964296191,
+STORE, 140141922332672, 140141964296191,
+STORE, 140141913939968, 140141964296191,
+SNULL, 140141913939968, 140141922332671,
+STORE, 140141922332672, 140141964296191,
+STORE, 140141913939968, 140141922332671,
+SNULL, 140141922336767, 140141964296191,
+STORE, 140141922332672, 140141922336767,
+STORE, 140141922336768, 140141964296191,
+STORE, 140141905547264, 140141922332671,
+SNULL, 140141905551359, 140141922332671,
+STORE, 140141905547264, 140141905551359,
+STORE, 140141905551360, 140141922332671,
+STORE, 140141821685760, 140141830078463,
+STORE, 140141813293056, 140141830078463,
+STORE, 140141804900352, 140141830078463,
+STORE, 140141796507648, 140141830078463,
+SNULL, 140141796511743, 140141830078463,
+STORE, 140141796507648, 140141796511743,
+STORE, 140141796511744, 140141830078463,
+SNULL, 140141922336768, 140141955903487,
+STORE, 140141955903488, 140141964296191,
+STORE, 140141922336768, 140141955903487,
+SNULL, 140141955907583, 140141964296191,
+STORE, 140141955903488, 140141955907583,
+STORE, 140141955907584, 140141964296191,
+STORE, 140141788114944, 140141796507647,
+STORE, 140141779722240, 140141796507647,
+SNULL, 140141779722240, 140141788114943,
+STORE, 140141788114944, 140141796507647,
+STORE, 140141779722240, 140141788114943,
+SNULL, 140141788119039, 140141796507647,
+STORE, 140141788114944, 140141788119039,
+STORE, 140141788119040, 140141796507647,
+SNULL, 140141922336768, 140141947510783,
+STORE, 140141947510784, 140141955903487,
+STORE, 140141922336768, 140141947510783,
+SNULL, 140141947514879, 140141955903487,
+STORE, 140141947510784, 140141947514879,
+STORE, 140141947514880, 140141955903487,
+SNULL, 140142039764992, 140142048157695,
+STORE, 140142048157696, 140142056550399,
+STORE, 140142039764992, 140142048157695,
+SNULL, 140142048161791, 140142056550399,
+STORE, 140142048157696, 140142048161791,
+STORE, 140142048161792, 140142056550399,
+SNULL, 140142039769087, 140142048157695,
+STORE, 140142039764992, 140142039769087,
+STORE, 140142039769088, 140142048157695,
+SNULL, 140141796511744, 140141804900351,
+STORE, 140141804900352, 140141830078463,
+STORE, 140141796511744, 140141804900351,
+SNULL, 140141804904447, 140141830078463,
+STORE, 140141804900352, 140141804904447,
+STORE, 140141804904448, 140141830078463,
+STORE, 140141771329536, 140141788114943,
+STORE, 140141762936832, 140141788114943,
+STORE, 140141754544128, 140141788114943,
+SNULL, 140141804904448, 140141821685759,
+STORE, 140141821685760, 140141830078463,
+STORE, 140141804904448, 140141821685759,
+SNULL, 140141821689855, 140141830078463,
+STORE, 140141821685760, 140141821689855,
+STORE, 140141821689856, 140141830078463,
+SNULL, 140141922336768, 140141939118079,
+STORE, 140141939118080, 140141947510783,
+STORE, 140141922336768, 140141939118079,
+SNULL, 140141939122175, 140141947510783,
+STORE, 140141939118080, 140141939122175,
+STORE, 140141939122176, 140141947510783,
+SNULL, 140141905551360, 140141913939967,
+STORE, 140141913939968, 140141922332671,
+STORE, 140141905551360, 140141913939967,
+SNULL, 140141913944063, 140141922332671,
+STORE, 140141913939968, 140141913944063,
+STORE, 140141913944064, 140141922332671,
+STORE, 140141746151424, 140141788114943,
+STORE, 140141737758720, 140141788114943,
+SNULL, 140141804904448, 140141813293055,
+STORE, 140141813293056, 140141821685759,
+STORE, 140141804904448, 140141813293055,
+SNULL, 140141813297151, 140141821685759,
+STORE, 140141813293056, 140141813297151,
+STORE, 140141813297152, 140141821685759,
+STORE, 140141729366016, 140141788114943,
+STORE, 140141720973312, 140141788114943,
+STORE, 140141712580608, 140141788114943,
+SNULL, 140141712584703, 140141788114943,
+STORE, 140141712580608, 140141712584703,
+STORE, 140141712584704, 140141788114943,
+SNULL, 140141922336768, 140141930725375,
+STORE, 140141930725376, 140141939118079,
+STORE, 140141922336768, 140141930725375,
+SNULL, 140141930729471, 140141939118079,
+STORE, 140141930725376, 140141930729471,
+STORE, 140141930729472, 140141939118079,
+STORE, 140141704187904, 140141712580607,
+SNULL, 140141704191999, 140141712580607,
+STORE, 140141704187904, 140141704191999,
+STORE, 140141704192000, 140141712580607,
+STORE, 140141695795200, 140141704187903,
+STORE, 140141687402496, 140141704187903,
+SNULL, 140141712584704, 140141771329535,
+STORE, 140141771329536, 140141788114943,
+STORE, 140141712584704, 140141771329535,
+SNULL, 140141771333631, 140141788114943,
+STORE, 140141771329536, 140141771333631,
+STORE, 140141771333632, 140141788114943,
+SNULL, 140141771333632, 140141779722239,
+STORE, 140141779722240, 140141788114943,
+STORE, 140141771333632, 140141779722239,
+SNULL, 140141779726335, 140141788114943,
+STORE, 140141779722240, 140141779726335,
+STORE, 140141779726336, 140141788114943,
+STORE, 140141679009792, 140141704187903,
+SNULL, 140141679013887, 140141704187903,
+STORE, 140141679009792, 140141679013887,
+STORE, 140141679013888, 140141704187903,
+STORE, 140141670617088, 140141679009791,
+SNULL, 140141670621183, 140141679009791,
+STORE, 140141670617088, 140141670621183,
+STORE, 140141670621184, 140141679009791,
+STORE, 140141662224384, 140141670617087,
+SNULL, 140141712584704, 140141737758719,
+STORE, 140141737758720, 140141771329535,
+STORE, 140141712584704, 140141737758719,
+SNULL, 140141737762815, 140141771329535,
+STORE, 140141737758720, 140141737762815,
+STORE, 140141737762816, 140141771329535,
+SNULL, 140141712584704, 140141729366015,
+STORE, 140141729366016, 140141737758719,
+STORE, 140141712584704, 140141729366015,
+SNULL, 140141729370111, 140141737758719,
+STORE, 140141729366016, 140141729370111,
+STORE, 140141729370112, 140141737758719,
+SNULL, 140141737762816, 140141746151423,
+STORE, 140141746151424, 140141771329535,
+STORE, 140141737762816, 140141746151423,
+SNULL, 140141746155519, 140141771329535,
+STORE, 140141746151424, 140141746155519,
+STORE, 140141746155520, 140141771329535,
+STORE, 140141653831680, 140141670617087,
+SNULL, 140141746155520, 140141762936831,
+STORE, 140141762936832, 140141771329535,
+STORE, 140141746155520, 140141762936831,
+SNULL, 140141762940927, 140141771329535,
+STORE, 140141762936832, 140141762940927,
+STORE, 140141762940928, 140141771329535,
+STORE, 140141645438976, 140141670617087,
+SNULL, 140141645443071, 140141670617087,
+STORE, 140141645438976, 140141645443071,
+STORE, 140141645443072, 140141670617087,
+SNULL, 140141712584704, 140141720973311,
+STORE, 140141720973312, 140141729366015,
+STORE, 140141712584704, 140141720973311,
+SNULL, 140141720977407, 140141729366015,
+STORE, 140141720973312, 140141720977407,
+STORE, 140141720977408, 140141729366015,
+STORE, 140141637046272, 140141645438975,
+SNULL, 140141637050367, 140141645438975,
+STORE, 140141637046272, 140141637050367,
+STORE, 140141637050368, 140141645438975,
+STORE, 140141628653568, 140141637046271,
+SNULL, 140141628657663, 140141637046271,
+STORE, 140141628653568, 140141628657663,
+STORE, 140141628657664, 140141637046271,
+STORE, 140141620260864, 140141628653567,
+SNULL, 140141679013888, 140141687402495,
+STORE, 140141687402496, 140141704187903,
+STORE, 140141679013888, 140141687402495,
+SNULL, 140141687406591, 140141704187903,
+STORE, 140141687402496, 140141687406591,
+STORE, 140141687406592, 140141704187903,
+SNULL, 140141746155520, 140141754544127,
+STORE, 140141754544128, 140141762936831,
+STORE, 140141746155520, 140141754544127,
+SNULL, 140141754548223, 140141762936831,
+STORE, 140141754544128, 140141754548223,
+STORE, 140141754548224, 140141762936831,
+SNULL, 140141687406592, 140141695795199,
+STORE, 140141695795200, 140141704187903,
+STORE, 140141687406592, 140141695795199,
+SNULL, 140141695799295, 140141704187903,
+STORE, 140141695795200, 140141695799295,
+STORE, 140141695799296, 140141704187903,
+STORE, 140141611868160, 140141628653567,
+SNULL, 140141611872255, 140141628653567,
+STORE, 140141611868160, 140141611872255,
+STORE, 140141611872256, 140141628653567,
+SNULL, 140141645443072, 140141662224383,
+STORE, 140141662224384, 140141670617087,
+STORE, 140141645443072, 140141662224383,
+SNULL, 140141662228479, 140141670617087,
+STORE, 140141662224384, 140141662228479,
+STORE, 140141662228480, 140141670617087,
+STORE, 140141603475456, 140141611868159,
+SNULL, 140141603479551, 140141611868159,
+STORE, 140141603475456, 140141603479551,
+STORE, 140141603479552, 140141611868159,
+STORE, 140141595082752, 140141603475455,
+SNULL, 140141645443072, 140141653831679,
+STORE, 140141653831680, 140141662224383,
+STORE, 140141645443072, 140141653831679,
+SNULL, 140141653835775, 140141662224383,
+STORE, 140141653831680, 140141653835775,
+STORE, 140141653835776, 140141662224383,
+STORE, 140141586690048, 140141603475455,
+SNULL, 140141611872256, 140141620260863,
+STORE, 140141620260864, 140141628653567,
+STORE, 140141611872256, 140141620260863,
+SNULL, 140141620264959, 140141628653567,
+STORE, 140141620260864, 140141620264959,
+STORE, 140141620264960, 140141628653567,
+SNULL, 140141586690048, 140141595082751,
+STORE, 140141595082752, 140141603475455,
+STORE, 140141586690048, 140141595082751,
+SNULL, 140141595086847, 140141603475455,
+STORE, 140141595082752, 140141595086847,
+STORE, 140141595086848, 140141603475455,
+STORE, 140141578297344, 140141595082751,
+SNULL, 140141578301439, 140141595082751,
+STORE, 140141578297344, 140141578301439,
+STORE, 140141578301440, 140141595082751,
+SNULL, 140141578301440, 140141586690047,
+STORE, 140141586690048, 140141595082751,
+STORE, 140141578301440, 140141586690047,
+SNULL, 140141586694143, 140141595082751,
+STORE, 140141586690048, 140141586694143,
+STORE, 140141586694144, 140141595082751,
+STORE, 140143370027008, 140143370055679,
+STORE, 140143309254656, 140143311446015,
+SNULL, 140143309254656, 140143309344767,
+STORE, 140143309344768, 140143311446015,
+STORE, 140143309254656, 140143309344767,
+SNULL, 140143311437823, 140143311446015,
+STORE, 140143309344768, 140143311437823,
+STORE, 140143311437824, 140143311446015,
+ERASE, 140143311437824, 140143311446015,
+STORE, 140143311437824, 140143311446015,
+SNULL, 140143311441919, 140143311446015,
+STORE, 140143311437824, 140143311441919,
+STORE, 140143311441920, 140143311446015,
+ERASE, 140143370027008, 140143370055679,
+ERASE, 140142912180224, 140142912184319,
+ERASE, 140142912184320, 140142920572927,
+ERASE, 140142945751040, 140142945755135,
+ERASE, 140142945755136, 140142954143743,
+ERASE, 140142090121216, 140142090125311,
+ERASE, 140142090125312, 140142098513919,
+ERASE, 140142794747904, 140142794751999,
+ERASE, 140142794752000, 140142803140607,
+ERASE, 140141913939968, 140141913944063,
+ERASE, 140141913944064, 140141922332671,
+ERASE, 140141746151424, 140141746155519,
+ERASE, 140141746155520, 140141754544127,
+ERASE, 140142954143744, 140142954147839,
+ERASE, 140142954147840, 140142962536447,
+ERASE, 140142081728512, 140142081732607,
+ERASE, 140142081732608, 140142090121215,
+ERASE, 140141905547264, 140141905551359,
+ERASE, 140141905551360, 140141913939967,
+ERASE, 140141729366016, 140141729370111,
+ERASE, 140141729370112, 140141737758719,
+ERASE, 140142920572928, 140142920577023,
+ERASE, 140142920577024, 140142928965631,
+ERASE, 140142039764992, 140142039769087,
+ERASE, 140142039769088, 140142048157695,
+ERASE, 140141679009792, 140141679013887,
+ERASE, 140141679013888, 140141687402495,
+ERASE, 140142551490560, 140142551494655,
+ERASE, 140142551494656, 140142559883263,
+ERASE, 140141947510784, 140141947514879,
+ERASE, 140141947514880, 140141955903487,
+ERASE, 140141771329536, 140141771333631,
+ERASE, 140141771333632, 140141779722239,
+ERASE, 140142928965632, 140142928969727,
+ERASE, 140142928969728, 140142937358335,
+ERASE, 140142073335808, 140142073339903,
+ERASE, 140142073339904, 140142081728511,
+ERASE, 140142543097856, 140142543101951,
+ERASE, 140142543101952, 140142551490559,
+ERASE, 140141955903488, 140141955907583,
+ERASE, 140141955907584, 140141964296191,
+ERASE, 140141704187904, 140141704191999,
+ERASE, 140141704192000, 140141712580607,
+ERASE, 140142786355200, 140142786359295,
+ERASE, 140142786359296, 140142794747903,
+ERASE, 140142056550400, 140142056554495,
+ERASE, 140142056554496, 140142064943103,
+ERASE, 140142828318720, 140142828322815,
+ERASE, 140142828322816, 140142836711423,
+ERASE, 140141788114944, 140141788119039,
+ERASE, 140141788119040, 140141796507647,
+ERASE, 140141695795200, 140141695799295,
+ERASE, 140141695799296, 140141704187903,
+ERASE, 140141578297344, 140141578301439,
+ERASE, 140141578301440, 140141586690047,
+ERASE, 140141611868160, 140141611872255,
+ERASE, 140141611872256, 140141620260863,
+ERASE, 140142811533312, 140142811537407,
+ERASE, 140142811537408, 140142819926015,
+ERASE, 140142064943104, 140142064947199,
+ERASE, 140142064947200, 140142073335807,
+ERASE, 140141628653568, 140141628657663,
+ERASE, 140141628657664, 140141637046271,
+ERASE, 140143046397952, 140143046402047,
+ERASE, 140143046402048, 140143054790655,
+ERASE, 140141796507648, 140141796511743,
+ERASE, 140141796511744, 140141804900351,
+ERASE, 140142803140608, 140142803144703,
+ERASE, 140142803144704, 140142811533311,
+ERASE, 140142509527040, 140142509531135,
+ERASE, 140142509531136, 140142517919743,
+ERASE, 140141821685760, 140141821689855,
+ERASE, 140141821689856, 140141830078463,
+ERASE, 140142777962496, 140142777966591,
+ERASE, 140142777966592, 140142786355199,
+ERASE, 140141804900352, 140141804904447,
+ERASE, 140141804904448, 140141813293055,
+ERASE, 140141930725376, 140141930729471,
+ERASE, 140141930729472, 140141939118079,
+ERASE, 140142937358336, 140142937362431,
+ERASE, 140142937362432, 140142945751039,
+ERASE, 140142559883264, 140142559887359,
+ERASE, 140142559887360, 140142568275967,
+ERASE, 140142534705152, 140142534709247,
+ERASE, 140142534709248, 140142543097855,
+ERASE, 140142048157696, 140142048161791,
+ERASE, 140142048161792, 140142056550399,
+ERASE, 140141754544128, 140141754548223,
+ERASE, 140141754548224, 140141762936831,
+ERASE, 140141939118080, 140141939122175,
+ERASE, 140141939122176, 140141947510783,
+ERASE, 140141653831680, 140141653835775,
+ERASE, 140141653835776, 140141662224383,
+ERASE, 140141712580608, 140141712584703,
+ERASE, 140141712584704, 140141720973311,
+ERASE, 140141645438976, 140141645443071,
+ERASE, 140141645443072, 140141653831679,
+ERASE, 140141687402496, 140141687406591,
+ERASE, 140141687406592, 140141695795199,
+ERASE, 140141662224384, 140141662228479,
+ERASE, 140141662228480, 140141670617087,
+ERASE, 140141922332672, 140141922336767,
+ERASE, 140141922336768, 140141930725375,
+ERASE, 140141737758720, 140141737762815,
+ERASE, 140141737762816, 140141746151423,
+ERASE, 140141637046272, 140141637050367,
+ERASE, 140141637050368, 140141645438975,
+ERASE, 140142517919744, 140142517923839,
+ERASE, 140142517923840, 140142526312447,
+ERASE, 140143096754176, 140143096758271,
+ERASE, 140143096758272, 140143105146879,
+ERASE, 140141595082752, 140141595086847,
+ERASE, 140141595086848, 140141603475455,
+ERASE, 140141762936832, 140141762940927,
+ERASE, 140141762940928, 140141771329535,
+ERASE, 140143311446016, 140143311450111,
+ERASE, 140143311450112, 140143319838719,
+ERASE, 140142526312448, 140142526316543,
+ERASE, 140142526316544, 140142534705151,
+ERASE, 140142819926016, 140142819930111,
+ERASE, 140142819930112, 140142828318719,
+ERASE, 140143180615680, 140143180619775,
+ERASE, 140143180619776, 140143189008383,
+ERASE, 140142962536448, 140142962540543,
+ERASE, 140142962540544, 140142970929151,
+ERASE, 140143214186496, 140143214190591,
+ERASE, 140143214190592, 140143222579199,
+ERASE, 140143088361472, 140143088365567,
+ERASE, 140143088365568, 140143096754175,
+ERASE, 140141586690048, 140141586694143,
+ERASE, 140141586694144, 140141595082751,
+ERASE, 140143230971904, 140143230975999,
+ERASE, 140143230976000, 140143239364607,
+ERASE, 140141779722240, 140141779726335,
+ERASE, 140141779726336, 140141788114943,
+ERASE, 140141670617088, 140141670621183,
+ERASE, 140141670621184, 140141679009791,
+ERASE, 140141813293056, 140141813297151,
+ERASE, 140141813297152, 140141821685759,
+ERASE, 140143222579200, 140143222583295,
+ERASE, 140143222583296, 140143230971903,
+ERASE, 140143189008384, 140143189012479,
+ERASE, 140143189012480, 140143197401087,
+ERASE, 140143071576064, 140143071580159,
+ERASE, 140143071580160, 140143079968767,
+ERASE, 140141620260864, 140141620264959,
+ERASE, 140141620264960, 140141628653567,
+ERASE, 140141603475456, 140141603479551,
+ERASE, 140141603479552, 140141611868159,
+ERASE, 140141720973312, 140141720977407,
+ERASE, 140141720977408, 140141729366015,
+ERASE, 140143079968768, 140143079972863,
+ERASE, 140143079972864, 140143088361471,
+ERASE, 140143205793792, 140143205797887,
+ERASE, 140143205797888, 140143214186495,
+ };
+ static const unsigned long set30[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140733436743680, 140737488351231,
+SNULL, 140733436747775, 140737488351231,
+STORE, 140733436743680, 140733436747775,
+STORE, 140733436612608, 140733436747775,
+STORE, 94630728904704, 94630731157503,
+SNULL, 94630729035775, 94630731157503,
+STORE, 94630728904704, 94630729035775,
+STORE, 94630729035776, 94630731157503,
+ERASE, 94630729035776, 94630731157503,
+STORE, 94630731128832, 94630731137023,
+STORE, 94630731137024, 94630731157503,
+STORE, 140165750841344, 140165753094143,
+SNULL, 140165750984703, 140165753094143,
+STORE, 140165750841344, 140165750984703,
+STORE, 140165750984704, 140165753094143,
+ERASE, 140165750984704, 140165753094143,
+STORE, 140165753081856, 140165753090047,
+STORE, 140165753090048, 140165753094143,
+STORE, 140733436887040, 140733436891135,
+STORE, 140733436874752, 140733436887039,
+STORE, 140165753053184, 140165753081855,
+STORE, 140165753044992, 140165753053183,
+STORE, 140165748625408, 140165750841343,
+SNULL, 140165748625408, 140165748723711,
+STORE, 140165748723712, 140165750841343,
+STORE, 140165748625408, 140165748723711,
+SNULL, 140165750816767, 140165750841343,
+STORE, 140165748723712, 140165750816767,
+STORE, 140165750816768, 140165750841343,
+SNULL, 140165750816768, 140165750824959,
+STORE, 140165750824960, 140165750841343,
+STORE, 140165750816768, 140165750824959,
+ERASE, 140165750816768, 140165750824959,
+STORE, 140165750816768, 140165750824959,
+ERASE, 140165750824960, 140165750841343,
+STORE, 140165750824960, 140165750841343,
+STORE, 140165744828416, 140165748625407,
+SNULL, 140165744828416, 140165746487295,
+STORE, 140165746487296, 140165748625407,
+STORE, 140165744828416, 140165746487295,
+SNULL, 140165748584447, 140165748625407,
+STORE, 140165746487296, 140165748584447,
+STORE, 140165748584448, 140165748625407,
+SNULL, 140165748584448, 140165748609023,
+STORE, 140165748609024, 140165748625407,
+STORE, 140165748584448, 140165748609023,
+ERASE, 140165748584448, 140165748609023,
+STORE, 140165748584448, 140165748609023,
+ERASE, 140165748609024, 140165748625407,
+STORE, 140165748609024, 140165748625407,
+STORE, 140165753036800, 140165753053183,
+SNULL, 140165748600831, 140165748609023,
+STORE, 140165748584448, 140165748600831,
+STORE, 140165748600832, 140165748609023,
+SNULL, 140165750820863, 140165750824959,
+STORE, 140165750816768, 140165750820863,
+STORE, 140165750820864, 140165750824959,
+SNULL, 94630731132927, 94630731137023,
+STORE, 94630731128832, 94630731132927,
+STORE, 94630731132928, 94630731137023,
+SNULL, 140165753085951, 140165753090047,
+STORE, 140165753081856, 140165753085951,
+STORE, 140165753085952, 140165753090047,
+ERASE, 140165753053184, 140165753081855,
+STORE, 94630743547904, 94630743683071,
+STORE, 140165736435712, 140165744828415,
+SNULL, 140165736439807, 140165744828415,
+STORE, 140165736435712, 140165736439807,
+STORE, 140165736439808, 140165744828415,
+STORE, 140165728043008, 140165736435711,
+STORE, 140165593825280, 140165728043007,
+SNULL, 140165593825280, 140165653725183,
+STORE, 140165653725184, 140165728043007,
+STORE, 140165593825280, 140165653725183,
+ERASE, 140165593825280, 140165653725183,
+SNULL, 140165720834047, 140165728043007,
+STORE, 140165653725184, 140165720834047,
+STORE, 140165720834048, 140165728043007,
+ERASE, 140165720834048, 140165728043007,
+SNULL, 140165653860351, 140165720834047,
+STORE, 140165653725184, 140165653860351,
+STORE, 140165653860352, 140165720834047,
+SNULL, 140165728047103, 140165736435711,
+STORE, 140165728043008, 140165728047103,
+STORE, 140165728047104, 140165736435711,
+STORE, 140165645332480, 140165653725183,
+SNULL, 140165645336575, 140165653725183,
+STORE, 140165645332480, 140165645336575,
+STORE, 140165645336576, 140165653725183,
+STORE, 140165636939776, 140165645332479,
+SNULL, 140165636943871, 140165645332479,
+STORE, 140165636939776, 140165636943871,
+STORE, 140165636943872, 140165645332479,
+STORE, 140165628547072, 140165636939775,
+SNULL, 140165628551167, 140165636939775,
+STORE, 140165628547072, 140165628551167,
+STORE, 140165628551168, 140165636939775,
+STORE, 140165620154368, 140165628547071,
+STORE, 140165611761664, 140165628547071,
+STORE, 140165603368960, 140165628547071,
+STORE, 140165469151232, 140165603368959,
+SNULL, 140165469151232, 140165519507455,
+STORE, 140165519507456, 140165603368959,
+STORE, 140165469151232, 140165519507455,
+ERASE, 140165469151232, 140165519507455,
+SNULL, 140165586616319, 140165603368959,
+STORE, 140165519507456, 140165586616319,
+STORE, 140165586616320, 140165603368959,
+ERASE, 140165586616320, 140165603368959,
+STORE, 140165594976256, 140165628547071,
+STORE, 140165385289728, 140165586616319,
+SNULL, 140165452398591, 140165586616319,
+STORE, 140165385289728, 140165452398591,
+STORE, 140165452398592, 140165586616319,
+SNULL, 140165452398592, 140165519507455,
+STORE, 140165519507456, 140165586616319,
+STORE, 140165452398592, 140165519507455,
+ERASE, 140165452398592, 140165519507455,
+STORE, 140165251072000, 140165452398591,
+SNULL, 140165318180863, 140165452398591,
+STORE, 140165251072000, 140165318180863,
+STORE, 140165318180864, 140165452398591,
+SNULL, 140165318180864, 140165385289727,
+STORE, 140165385289728, 140165452398591,
+STORE, 140165318180864, 140165385289727,
+ERASE, 140165318180864, 140165385289727,
+SNULL, 140165519642623, 140165586616319,
+STORE, 140165519507456, 140165519642623,
+STORE, 140165519642624, 140165586616319,
+SNULL, 140165594976256, 140165611761663,
+STORE, 140165611761664, 140165628547071,
+STORE, 140165594976256, 140165611761663,
+SNULL, 140165611765759, 140165628547071,
+STORE, 140165611761664, 140165611765759,
+STORE, 140165611765760, 140165628547071,
+STORE, 140165385289728, 140165519507455,
+SNULL, 140165385424895, 140165519507455,
+STORE, 140165385289728, 140165385424895,
+STORE, 140165385424896, 140165519507455,
+SNULL, 140165594976256, 140165603368959,
+STORE, 140165603368960, 140165611761663,
+STORE, 140165594976256, 140165603368959,
+SNULL, 140165603373055, 140165611761663,
+STORE, 140165603368960, 140165603373055,
+STORE, 140165603373056, 140165611761663,
+SNULL, 140165251207167, 140165318180863,
+STORE, 140165251072000, 140165251207167,
+STORE, 140165251207168, 140165318180863,
+STORE, 140165376897024, 140165385289727,
+SNULL, 140165376901119, 140165385289727,
+STORE, 140165376897024, 140165376901119,
+STORE, 140165376901120, 140165385289727,
+SNULL, 140165385424896, 140165452398591,
+STORE, 140165452398592, 140165519507455,
+STORE, 140165385424896, 140165452398591,
+SNULL, 140165452533759, 140165519507455,
+STORE, 140165452398592, 140165452533759,
+STORE, 140165452533760, 140165519507455,
+STORE, 140165368504320, 140165376897023,
+SNULL, 140165594980351, 140165603368959,
+STORE, 140165594976256, 140165594980351,
+STORE, 140165594980352, 140165603368959,
+SNULL, 140165368508415, 140165376897023,
+STORE, 140165368504320, 140165368508415,
+STORE, 140165368508416, 140165376897023,
+SNULL, 140165611765760, 140165620154367,
+STORE, 140165620154368, 140165628547071,
+STORE, 140165611765760, 140165620154367,
+SNULL, 140165620158463, 140165628547071,
+STORE, 140165620154368, 140165620158463,
+STORE, 140165620158464, 140165628547071,
+STORE, 140165360111616, 140165368504319,
+STORE, 140165351718912, 140165368504319,
+STORE, 140165343326208, 140165368504319,
+SNULL, 140165343326208, 140165351718911,
+STORE, 140165351718912, 140165368504319,
+STORE, 140165343326208, 140165351718911,
+SNULL, 140165351723007, 140165368504319,
+STORE, 140165351718912, 140165351723007,
+STORE, 140165351723008, 140165368504319,
+SNULL, 140165343330303, 140165351718911,
+STORE, 140165343326208, 140165343330303,
+STORE, 140165343330304, 140165351718911,
+SNULL, 140165351723008, 140165360111615,
+STORE, 140165360111616, 140165368504319,
+STORE, 140165351723008, 140165360111615,
+SNULL, 140165360115711, 140165368504319,
+STORE, 140165360111616, 140165360115711,
+STORE, 140165360115712, 140165368504319,
+STORE, 140165334933504, 140165343326207,
+SNULL, 140165334937599, 140165343326207,
+STORE, 140165334933504, 140165334937599,
+STORE, 140165334937600, 140165343326207,
+STORE, 140165326540800, 140165334933503,
+STORE, 140165242679296, 140165251071999,
+SNULL, 140165242683391, 140165251071999,
+STORE, 140165242679296, 140165242683391,
+STORE, 140165242683392, 140165251071999,
+STORE, 140165234286592, 140165242679295,
+STORE, 140165225893888, 140165242679295,
+SNULL, 140165225897983, 140165242679295,
+STORE, 140165225893888, 140165225897983,
+STORE, 140165225897984, 140165242679295,
+SNULL, 140165225897984, 140165234286591,
+STORE, 140165234286592, 140165242679295,
+STORE, 140165225897984, 140165234286591,
+SNULL, 140165234290687, 140165242679295,
+STORE, 140165234286592, 140165234290687,
+STORE, 140165234290688, 140165242679295,
+SNULL, 140165326544895, 140165334933503,
+STORE, 140165326540800, 140165326544895,
+STORE, 140165326544896, 140165334933503,
+STORE, 140165217501184, 140165225893887,
+STORE, 140165209108480, 140165225893887,
+SNULL, 140165209108480, 140165217501183,
+STORE, 140165217501184, 140165225893887,
+STORE, 140165209108480, 140165217501183,
+SNULL, 140165217505279, 140165225893887,
+STORE, 140165217501184, 140165217505279,
+STORE, 140165217505280, 140165225893887,
+SNULL, 140165209112575, 140165217501183,
+STORE, 140165209108480, 140165209112575,
+STORE, 140165209112576, 140165217501183,
+STORE, 140165200715776, 140165209108479,
+STORE, 140165066498048, 140165200715775,
+SNULL, 140165066498048, 140165116854271,
+STORE, 140165116854272, 140165200715775,
+STORE, 140165066498048, 140165116854271,
+ERASE, 140165066498048, 140165116854271,
+SNULL, 140165183963135, 140165200715775,
+STORE, 140165116854272, 140165183963135,
+STORE, 140165183963136, 140165200715775,
+ERASE, 140165183963136, 140165200715775,
+SNULL, 140165116989439, 140165183963135,
+STORE, 140165116854272, 140165116989439,
+STORE, 140165116989440, 140165183963135,
+STORE, 140165192323072, 140165209108479,
+STORE, 140165108461568, 140165116854271,
+STORE, 140164974243840, 140165108461567,
+STORE, 140164965851136, 140164974243839,
+SNULL, 140164974243840, 140164982636543,
+STORE, 140164982636544, 140165108461567,
+STORE, 140164974243840, 140164982636543,
+ERASE, 140164974243840, 140164982636543,
+STORE, 140164965851136, 140164982636543,
+STORE, 140164957458432, 140164982636543,
+STORE, 140164949065728, 140164982636543,
+STORE, 140164940673024, 140164982636543,
+STORE, 140164806455296, 140164940673023,
+STORE, 140164798062592, 140164806455295,
+STORE, 140164789669888, 140164806455295,
+STORE, 140164655452160, 140164789669887,
+STORE, 140164647059456, 140164655452159,
+STORE, 140164638666752, 140164655452159,
+SNULL, 140164655452160, 140164714201087,
+STORE, 140164714201088, 140164789669887,
+STORE, 140164655452160, 140164714201087,
+ERASE, 140164655452160, 140164714201087,
+STORE, 140164705808384, 140164714201087,
+STORE, 140164697415680, 140164714201087,
+STORE, 140164504449024, 140164638666751,
+SNULL, 140164504449024, 140164512874495,
+STORE, 140164512874496, 140164638666751,
+STORE, 140164504449024, 140164512874495,
+ERASE, 140164504449024, 140164512874495,
+STORE, 140164689022976, 140164714201087,
+STORE, 140164680630272, 140164714201087,
+SNULL, 140164680634367, 140164714201087,
+STORE, 140164680630272, 140164680634367,
+STORE, 140164680634368, 140164714201087,
+STORE, 140164378656768, 140164638666751,
+SNULL, 140165192323072, 140165200715775,
+STORE, 140165200715776, 140165209108479,
+STORE, 140165192323072, 140165200715775,
+SNULL, 140165200719871, 140165209108479,
+STORE, 140165200715776, 140165200719871,
+STORE, 140165200719872, 140165209108479,
+SNULL, 140165049745407, 140165108461567,
+STORE, 140164982636544, 140165049745407,
+STORE, 140165049745408, 140165108461567,
+ERASE, 140165049745408, 140165108461567,
+SNULL, 140164982771711, 140165049745407,
+STORE, 140164982636544, 140164982771711,
+STORE, 140164982771712, 140165049745407,
+STORE, 140164244439040, 140164638666751,
+SNULL, 140164311547903, 140164638666751,
+STORE, 140164244439040, 140164311547903,
+STORE, 140164311547904, 140164638666751,
+SNULL, 140164311547904, 140164378656767,
+STORE, 140164378656768, 140164638666751,
+STORE, 140164311547904, 140164378656767,
+ERASE, 140164311547904, 140164378656767,
+SNULL, 140164806455296, 140164848418815,
+STORE, 140164848418816, 140164940673023,
+STORE, 140164806455296, 140164848418815,
+ERASE, 140164806455296, 140164848418815,
+SNULL, 140164915527679, 140164940673023,
+STORE, 140164848418816, 140164915527679,
+STORE, 140164915527680, 140164940673023,
+ERASE, 140164915527680, 140164940673023,
+STORE, 140164110221312, 140164311547903,
+SNULL, 140164177330175, 140164311547903,
+STORE, 140164110221312, 140164177330175,
+STORE, 140164177330176, 140164311547903,
+SNULL, 140164177330176, 140164244439039,
+STORE, 140164244439040, 140164311547903,
+STORE, 140164177330176, 140164244439039,
+ERASE, 140164177330176, 140164244439039,
+SNULL, 140164781309951, 140164789669887,
+STORE, 140164714201088, 140164781309951,
+STORE, 140164781309952, 140164789669887,
+ERASE, 140164781309952, 140164789669887,
+STORE, 140163976003584, 140164177330175,
+SNULL, 140164043112447, 140164177330175,
+STORE, 140163976003584, 140164043112447,
+STORE, 140164043112448, 140164177330175,
+SNULL, 140164043112448, 140164110221311,
+STORE, 140164110221312, 140164177330175,
+STORE, 140164043112448, 140164110221311,
+ERASE, 140164043112448, 140164110221311,
+SNULL, 140164579983359, 140164638666751,
+STORE, 140164378656768, 140164579983359,
+STORE, 140164579983360, 140164638666751,
+ERASE, 140164579983360, 140164638666751,
+STORE, 140163841785856, 140164043112447,
+SNULL, 140163908894719, 140164043112447,
+STORE, 140163841785856, 140163908894719,
+STORE, 140163908894720, 140164043112447,
+SNULL, 140163908894720, 140163976003583,
+STORE, 140163976003584, 140164043112447,
+STORE, 140163908894720, 140163976003583,
+ERASE, 140163908894720, 140163976003583,
+SNULL, 140164940673024, 140164965851135,
+STORE, 140164965851136, 140164982636543,
+STORE, 140164940673024, 140164965851135,
+SNULL, 140164965855231, 140164982636543,
+STORE, 140164965851136, 140164965855231,
+STORE, 140164965855232, 140164982636543,
+SNULL, 140164965855232, 140164974243839,
+STORE, 140164974243840, 140164982636543,
+STORE, 140164965855232, 140164974243839,
+SNULL, 140164974247935, 140164982636543,
+STORE, 140164974243840, 140164974247935,
+STORE, 140164974247936, 140164982636543,
+SNULL, 140164445765631, 140164579983359,
+STORE, 140164378656768, 140164445765631,
+STORE, 140164445765632, 140164579983359,
+SNULL, 140164445765632, 140164512874495,
+STORE, 140164512874496, 140164579983359,
+STORE, 140164445765632, 140164512874495,
+ERASE, 140164445765632, 140164512874495,
+SNULL, 140164378791935, 140164445765631,
+STORE, 140164378656768, 140164378791935,
+STORE, 140164378791936, 140164445765631,
+SNULL, 140164789673983, 140164806455295,
+STORE, 140164789669888, 140164789673983,
+STORE, 140164789673984, 140164806455295,
+SNULL, 140164789673984, 140164798062591,
+STORE, 140164798062592, 140164806455295,
+STORE, 140164789673984, 140164798062591,
+SNULL, 140164798066687, 140164806455295,
+STORE, 140164798062592, 140164798066687,
+STORE, 140164798066688, 140164806455295,
+SNULL, 140164638670847, 140164655452159,
+STORE, 140164638666752, 140164638670847,
+STORE, 140164638670848, 140164655452159,
+STORE, 140165100068864, 140165116854271,
+STORE, 140165091676160, 140165116854271,
+STORE, 140165083283456, 140165116854271,
+SNULL, 140164244574207, 140164311547903,
+STORE, 140164244439040, 140164244574207,
+STORE, 140164244574208, 140164311547903,
+SNULL, 140164848553983, 140164915527679,
+STORE, 140164848418816, 140164848553983,
+STORE, 140164848553984, 140164915527679,
+SNULL, 140164110356479, 140164177330175,
+STORE, 140164110221312, 140164110356479,
+STORE, 140164110356480, 140164177330175,
+SNULL, 140164714336255, 140164781309951,
+STORE, 140164714201088, 140164714336255,
+STORE, 140164714336256, 140164781309951,
+SNULL, 140163976138751, 140164043112447,
+STORE, 140163976003584, 140163976138751,
+STORE, 140163976138752, 140164043112447,
+SNULL, 140164513009663, 140164579983359,
+STORE, 140164512874496, 140164513009663,
+STORE, 140164513009664, 140164579983359,
+SNULL, 140163841921023, 140163908894719,
+STORE, 140163841785856, 140163841921023,
+STORE, 140163841921024, 140163908894719,
+SNULL, 140165083283456, 140165100068863,
+STORE, 140165100068864, 140165116854271,
+STORE, 140165083283456, 140165100068863,
+SNULL, 140165100072959, 140165116854271,
+STORE, 140165100068864, 140165100072959,
+STORE, 140165100072960, 140165116854271,
+SNULL, 140165100072960, 140165108461567,
+STORE, 140165108461568, 140165116854271,
+STORE, 140165100072960, 140165108461567,
+SNULL, 140165108465663, 140165116854271,
+STORE, 140165108461568, 140165108465663,
+STORE, 140165108465664, 140165116854271,
+STORE, 140165074890752, 140165100068863,
+SNULL, 140165074894847, 140165100068863,
+STORE, 140165074890752, 140165074894847,
+STORE, 140165074894848, 140165100068863,
+STORE, 140165066498048, 140165074890751,
+STORE, 140165058105344, 140165074890751,
+STORE, 140164932280320, 140164965851135,
+SNULL, 140165192327167, 140165200715775,
+STORE, 140165192323072, 140165192327167,
+STORE, 140165192327168, 140165200715775,
+STORE, 140164923887616, 140164965851135,
+SNULL, 140164923891711, 140164965851135,
+STORE, 140164923887616, 140164923891711,
+STORE, 140164923891712, 140164965851135,
+SNULL, 140164680634368, 140164705808383,
+STORE, 140164705808384, 140164714201087,
+STORE, 140164680634368, 140164705808383,
+SNULL, 140164705812479, 140164714201087,
+STORE, 140164705808384, 140164705812479,
+STORE, 140164705812480, 140164714201087,
+SNULL, 140164680634368, 140164697415679,
+STORE, 140164697415680, 140164705808383,
+STORE, 140164680634368, 140164697415679,
+SNULL, 140164697419775, 140164705808383,
+STORE, 140164697415680, 140164697419775,
+STORE, 140164697419776, 140164705808383,
+STORE, 140164840026112, 140164848418815,
+STORE, 140164831633408, 140164848418815,
+STORE, 140164823240704, 140164848418815,
+SNULL, 140165074894848, 140165083283455,
+STORE, 140165083283456, 140165100068863,
+STORE, 140165074894848, 140165083283455,
+SNULL, 140165083287551, 140165100068863,
+STORE, 140165083283456, 140165083287551,
+STORE, 140165083287552, 140165100068863,
+SNULL, 140165083287552, 140165091676159,
+STORE, 140165091676160, 140165100068863,
+STORE, 140165083287552, 140165091676159,
+SNULL, 140165091680255, 140165100068863,
+STORE, 140165091676160, 140165091680255,
+STORE, 140165091680256, 140165100068863,
+SNULL, 140164638670848, 140164647059455,
+STORE, 140164647059456, 140164655452159,
+STORE, 140164638670848, 140164647059455,
+SNULL, 140164647063551, 140164655452159,
+STORE, 140164647059456, 140164647063551,
+STORE, 140164647063552, 140164655452159,
+SNULL, 140164923891712, 140164940673023,
+STORE, 140164940673024, 140164965851135,
+STORE, 140164923891712, 140164940673023,
+SNULL, 140164940677119, 140164965851135,
+STORE, 140164940673024, 140164940677119,
+STORE, 140164940677120, 140164965851135,
+SNULL, 140164940677120, 140164949065727,
+STORE, 140164949065728, 140164965851135,
+STORE, 140164940677120, 140164949065727,
+SNULL, 140164949069823, 140164965851135,
+STORE, 140164949065728, 140164949069823,
+STORE, 140164949069824, 140164965851135,
+SNULL, 140164949069824, 140164957458431,
+STORE, 140164957458432, 140164965851135,
+STORE, 140164949069824, 140164957458431,
+SNULL, 140164957462527, 140164965851135,
+STORE, 140164957458432, 140164957462527,
+STORE, 140164957462528, 140164965851135,
+SNULL, 140164680634368, 140164689022975,
+STORE, 140164689022976, 140164697415679,
+STORE, 140164680634368, 140164689022975,
+SNULL, 140164689027071, 140164697415679,
+STORE, 140164689022976, 140164689027071,
+STORE, 140164689027072, 140164697415679,
+STORE, 140164814848000, 140164848418815,
+SNULL, 140165058105344, 140165066498047,
+STORE, 140165066498048, 140165074890751,
+STORE, 140165058105344, 140165066498047,
+SNULL, 140165066502143, 140165074890751,
+STORE, 140165066498048, 140165066502143,
+STORE, 140165066502144, 140165074890751,
+SNULL, 140165058109439, 140165066498047,
+STORE, 140165058105344, 140165058109439,
+STORE, 140165058109440, 140165066498047,
+STORE, 140164798066688, 140164814847999,
+SNULL, 140164798066688, 140164806455295,
+STORE, 140164806455296, 140164814847999,
+STORE, 140164798066688, 140164806455295,
+SNULL, 140164806459391, 140164814847999,
+STORE, 140164806455296, 140164806459391,
+STORE, 140164806459392, 140164814847999,
+SNULL, 140164923891712, 140164932280319,
+STORE, 140164932280320, 140164940673023,
+STORE, 140164923891712, 140164932280319,
+SNULL, 140164932284415, 140164940673023,
+STORE, 140164932280320, 140164932284415,
+STORE, 140164932284416, 140164940673023,
+STORE, 140164672237568, 140164680630271,
+STORE, 140164663844864, 140164680630271,
+STORE, 140164647063552, 140164680630271,
+SNULL, 140164647063552, 140164655452159,
+STORE, 140164655452160, 140164680630271,
+STORE, 140164647063552, 140164655452159,
+SNULL, 140164655456255, 140164680630271,
+STORE, 140164655452160, 140164655456255,
+STORE, 140164655456256, 140164680630271,
+STORE, 140164630274048, 140164638666751,
+SNULL, 140164814852095, 140164848418815,
+STORE, 140164814848000, 140164814852095,
+STORE, 140164814852096, 140164848418815,
+SNULL, 140164814852096, 140164831633407,
+STORE, 140164831633408, 140164848418815,
+STORE, 140164814852096, 140164831633407,
+SNULL, 140164831637503, 140164848418815,
+STORE, 140164831633408, 140164831637503,
+STORE, 140164831637504, 140164848418815,
+STORE, 140164621881344, 140164638666751,
+SNULL, 140164831637504, 140164840026111,
+STORE, 140164840026112, 140164848418815,
+STORE, 140164831637504, 140164840026111,
+SNULL, 140164840030207, 140164848418815,
+STORE, 140164840026112, 140164840030207,
+STORE, 140164840030208, 140164848418815,
+STORE, 140164613488640, 140164638666751,
+SNULL, 140164613492735, 140164638666751,
+STORE, 140164613488640, 140164613492735,
+STORE, 140164613492736, 140164638666751,
+STORE, 140164605095936, 140164613488639,
+SNULL, 140164605100031, 140164613488639,
+STORE, 140164605095936, 140164605100031,
+STORE, 140164605100032, 140164613488639,
+STORE, 140164596703232, 140164605095935,
+STORE, 140164588310528, 140164605095935,
+SNULL, 140164588314623, 140164605095935,
+STORE, 140164588310528, 140164588314623,
+STORE, 140164588314624, 140164605095935,
+STORE, 140164504481792, 140164512874495,
+STORE, 140164496089088, 140164512874495,
+SNULL, 140164496089088, 140164504481791,
+STORE, 140164504481792, 140164512874495,
+STORE, 140164496089088, 140164504481791,
+SNULL, 140164504485887, 140164512874495,
+STORE, 140164504481792, 140164504485887,
+STORE, 140164504485888, 140164512874495,
+SNULL, 140164613492736, 140164630274047,
+STORE, 140164630274048, 140164638666751,
+STORE, 140164613492736, 140164630274047,
+SNULL, 140164630278143, 140164638666751,
+STORE, 140164630274048, 140164630278143,
+STORE, 140164630278144, 140164638666751,
+STORE, 140164487696384, 140164504481791,
+STORE, 140164479303680, 140164504481791,
+SNULL, 140164814852096, 140164823240703,
+STORE, 140164823240704, 140164831633407,
+STORE, 140164814852096, 140164823240703,
+SNULL, 140164823244799, 140164831633407,
+STORE, 140164823240704, 140164823244799,
+STORE, 140164823244800, 140164831633407,
+STORE, 140164470910976, 140164504481791,
+SNULL, 140164470910976, 140164496089087,
+STORE, 140164496089088, 140164504481791,
+STORE, 140164470910976, 140164496089087,
+SNULL, 140164496093183, 140164504481791,
+STORE, 140164496089088, 140164496093183,
+STORE, 140164496093184, 140164504481791,
+SNULL, 140164655456256, 140164672237567,
+STORE, 140164672237568, 140164680630271,
+STORE, 140164655456256, 140164672237567,
+SNULL, 140164672241663, 140164680630271,
+STORE, 140164672237568, 140164672241663,
+STORE, 140164672241664, 140164680630271,
+STORE, 140164462518272, 140164496089087,
+STORE, 140164454125568, 140164496089087,
+SNULL, 140164655456256, 140164663844863,
+STORE, 140164663844864, 140164672237567,
+STORE, 140164655456256, 140164663844863,
+SNULL, 140164663848959, 140164672237567,
+STORE, 140164663844864, 140164663848959,
+STORE, 140164663848960, 140164672237567,
+STORE, 140164370264064, 140164378656767,
+STORE, 140164361871360, 140164378656767,
+STORE, 140164353478656, 140164378656767,
+STORE, 140164345085952, 140164378656767,
+SNULL, 140164345085952, 140164353478655,
+STORE, 140164353478656, 140164378656767,
+STORE, 140164345085952, 140164353478655,
+SNULL, 140164353482751, 140164378656767,
+STORE, 140164353478656, 140164353482751,
+STORE, 140164353482752, 140164378656767,
+SNULL, 140164454125568, 140164487696383,
+STORE, 140164487696384, 140164496089087,
+STORE, 140164454125568, 140164487696383,
+SNULL, 140164487700479, 140164496089087,
+STORE, 140164487696384, 140164487700479,
+STORE, 140164487700480, 140164496089087,
+STORE, 140164336693248, 140164353478655,
+SNULL, 140164336697343, 140164353478655,
+STORE, 140164336693248, 140164336697343,
+STORE, 140164336697344, 140164353478655,
+STORE, 140164328300544, 140164336693247,
+SNULL, 140164454125568, 140164479303679,
+STORE, 140164479303680, 140164487696383,
+STORE, 140164454125568, 140164479303679,
+SNULL, 140164479307775, 140164487696383,
+STORE, 140164479303680, 140164479307775,
+STORE, 140164479307776, 140164487696383,
+STORE, 140164319907840, 140164336693247,
+STORE, 140164236046336, 140164244439039,
+SNULL, 140164588314624, 140164596703231,
+STORE, 140164596703232, 140164605095935,
+STORE, 140164588314624, 140164596703231,
+SNULL, 140164596707327, 140164605095935,
+STORE, 140164596703232, 140164596707327,
+STORE, 140164596707328, 140164605095935,
+SNULL, 140164454125568, 140164462518271,
+STORE, 140164462518272, 140164479303679,
+STORE, 140164454125568, 140164462518271,
+SNULL, 140164462522367, 140164479303679,
+STORE, 140164462518272, 140164462522367,
+STORE, 140164462522368, 140164479303679,
+STORE, 140164227653632, 140164244439039,
+SNULL, 140164227657727, 140164244439039,
+STORE, 140164227653632, 140164227657727,
+STORE, 140164227657728, 140164244439039,
+SNULL, 140164462522368, 140164470910975,
+STORE, 140164470910976, 140164479303679,
+STORE, 140164462522368, 140164470910975,
+SNULL, 140164470915071, 140164479303679,
+STORE, 140164470910976, 140164470915071,
+STORE, 140164470915072, 140164479303679,
+SNULL, 140164613492736, 140164621881343,
+STORE, 140164621881344, 140164630274047,
+STORE, 140164613492736, 140164621881343,
+SNULL, 140164621885439, 140164630274047,
+STORE, 140164621881344, 140164621885439,
+STORE, 140164621885440, 140164630274047,
+SNULL, 140164353482752, 140164370264063,
+STORE, 140164370264064, 140164378656767,
+STORE, 140164353482752, 140164370264063,
+SNULL, 140164370268159, 140164378656767,
+STORE, 140164370264064, 140164370268159,
+STORE, 140164370268160, 140164378656767,
+STORE, 140164219260928, 140164227653631,
+SNULL, 140164319911935, 140164336693247,
+STORE, 140164319907840, 140164319911935,
+STORE, 140164319911936, 140164336693247,
+SNULL, 140164336697344, 140164345085951,
+STORE, 140164345085952, 140164353478655,
+STORE, 140164336697344, 140164345085951,
+SNULL, 140164345090047, 140164353478655,
+STORE, 140164345085952, 140164345090047,
+STORE, 140164345090048, 140164353478655,
+SNULL, 140164319911936, 140164328300543,
+STORE, 140164328300544, 140164336693247,
+STORE, 140164319911936, 140164328300543,
+SNULL, 140164328304639, 140164336693247,
+STORE, 140164328300544, 140164328304639,
+STORE, 140164328304640, 140164336693247,
+SNULL, 140164454129663, 140164462518271,
+STORE, 140164454125568, 140164454129663,
+STORE, 140164454129664, 140164462518271,
+STORE, 140164210868224, 140164227653631,
+STORE, 140164202475520, 140164227653631,
+STORE, 140164194082816, 140164227653631,
+SNULL, 140164194086911, 140164227653631,
+STORE, 140164194082816, 140164194086911,
+STORE, 140164194086912, 140164227653631,
+SNULL, 140164353482752, 140164361871359,
+STORE, 140164361871360, 140164370264063,
+STORE, 140164353482752, 140164361871359,
+SNULL, 140164361875455, 140164370264063,
+STORE, 140164361871360, 140164361875455,
+STORE, 140164361875456, 140164370264063,
+SNULL, 140164227657728, 140164236046335,
+STORE, 140164236046336, 140164244439039,
+STORE, 140164227657728, 140164236046335,
+SNULL, 140164236050431, 140164244439039,
+STORE, 140164236046336, 140164236050431,
+STORE, 140164236050432, 140164244439039,
+STORE, 140164185690112, 140164194082815,
+SNULL, 140164194086912, 140164219260927,
+STORE, 140164219260928, 140164227653631,
+STORE, 140164194086912, 140164219260927,
+SNULL, 140164219265023, 140164227653631,
+STORE, 140164219260928, 140164219265023,
+STORE, 140164219265024, 140164227653631,
+STORE, 140164101828608, 140164110221311,
+STORE, 140164093435904, 140164110221311,
+STORE, 140164085043200, 140164110221311,
+SNULL, 140164085047295, 140164110221311,
+STORE, 140164085043200, 140164085047295,
+STORE, 140164085047296, 140164110221311,
+STORE, 140164076650496, 140164085043199,
+SNULL, 140164185694207, 140164194082815,
+STORE, 140164185690112, 140164185694207,
+STORE, 140164185694208, 140164194082815,
+SNULL, 140164085047296, 140164101828607,
+STORE, 140164101828608, 140164110221311,
+STORE, 140164085047296, 140164101828607,
+SNULL, 140164101832703, 140164110221311,
+STORE, 140164101828608, 140164101832703,
+STORE, 140164101832704, 140164110221311,
+SNULL, 140164085047296, 140164093435903,
+STORE, 140164093435904, 140164101828607,
+STORE, 140164085047296, 140164093435903,
+SNULL, 140164093439999, 140164101828607,
+STORE, 140164093435904, 140164093439999,
+STORE, 140164093440000, 140164101828607,
+SNULL, 140164194086912, 140164202475519,
+STORE, 140164202475520, 140164219260927,
+STORE, 140164194086912, 140164202475519,
+SNULL, 140164202479615, 140164219260927,
+STORE, 140164202475520, 140164202479615,
+STORE, 140164202479616, 140164219260927,
+SNULL, 140164202479616, 140164210868223,
+STORE, 140164210868224, 140164219260927,
+STORE, 140164202479616, 140164210868223,
+SNULL, 140164210872319, 140164219260927,
+STORE, 140164210868224, 140164210872319,
+STORE, 140164210872320, 140164219260927,
+SNULL, 140164076654591, 140164085043199,
+STORE, 140164076650496, 140164076654591,
+STORE, 140164076654592, 140164085043199,
+STORE, 140164068257792, 140164076650495,
+SNULL, 140164068261887, 140164076650495,
+STORE, 140164068257792, 140164068261887,
+STORE, 140164068261888, 140164076650495,
+STORE, 140165753053184, 140165753081855,
+STORE, 140165725851648, 140165728043007,
+SNULL, 140165725851648, 140165725941759,
+STORE, 140165725941760, 140165728043007,
+STORE, 140165725851648, 140165725941759,
+SNULL, 140165728034815, 140165728043007,
+STORE, 140165725941760, 140165728034815,
+STORE, 140165728034816, 140165728043007,
+ERASE, 140165728034816, 140165728043007,
+STORE, 140165728034816, 140165728043007,
+SNULL, 140165728038911, 140165728043007,
+STORE, 140165728034816, 140165728038911,
+STORE, 140165728038912, 140165728043007,
+ERASE, 140165753053184, 140165753081855,
+ERASE, 140164638666752, 140164638670847,
+ERASE, 140164638670848, 140164647059455,
+ERASE, 140165091676160, 140165091680255,
+ERASE, 140165091680256, 140165100068863,
+ERASE, 140164613488640, 140164613492735,
+ERASE, 140164613492736, 140164621881343,
+ERASE, 140164319907840, 140164319911935,
+ERASE, 140164319911936, 140164328300543,
+ERASE, 140165620154368, 140165620158463,
+ERASE, 140165620158464, 140165628547071,
+ERASE, 140164798062592, 140164798066687,
+ERASE, 140164798066688, 140164806455295,
+ERASE, 140164789669888, 140164789673983,
+ERASE, 140164789673984, 140164798062591,
+ERASE, 140164965851136, 140164965855231,
+ERASE, 140164965855232, 140164974243839,
+ERASE, 140165074890752, 140165074894847,
+ERASE, 140165074894848, 140165083283455,
+ERASE, 140164672237568, 140164672241663,
+ERASE, 140164672241664, 140164680630271,
+ERASE, 140164454125568, 140164454129663,
+ERASE, 140164454129664, 140164462518271,
+ERASE, 140165200715776, 140165200719871,
+ERASE, 140165200719872, 140165209108479,
+ERASE, 140164932280320, 140164932284415,
+ERASE, 140164932284416, 140164940673023,
+ERASE, 140164663844864, 140164663848959,
+ERASE, 140164663848960, 140164672237567,
+ERASE, 140164697415680, 140164697419775,
+ERASE, 140164697419776, 140164705808383,
+ERASE, 140164831633408, 140164831637503,
+ERASE, 140164831637504, 140164840026111,
+ERASE, 140165192323072, 140165192327167,
+ERASE, 140165192327168, 140165200715775,
+ERASE, 140165108461568, 140165108465663,
+ERASE, 140165108465664, 140165116854271,
+ERASE, 140164840026112, 140164840030207,
+ERASE, 140164840030208, 140164848418815,
+ERASE, 140164647059456, 140164647063551,
+ERASE, 140164647063552, 140164655452159,
+ERASE, 140165083283456, 140165083287551,
+ERASE, 140165083287552, 140165091676159,
+ERASE, 140164923887616, 140164923891711,
+ERASE, 140164923891712, 140164932280319,
+ERASE, 140164823240704, 140164823244799,
+ERASE, 140164823244800, 140164831633407,
+ERASE, 140164227653632, 140164227657727,
+ERASE, 140164227657728, 140164236046335,
+ERASE, 140164957458432, 140164957462527,
+ERASE, 140164957462528, 140164965851135,
+ERASE, 140164680630272, 140164680634367,
+ERASE, 140164680634368, 140164689022975,
+ERASE, 140164974243840, 140164974247935,
+ERASE, 140164974247936, 140164982636543,
+ERASE, 140165066498048, 140165066502143,
+ERASE, 140165066502144, 140165074890751,
+ERASE, 140164621881344, 140164621885439,
+ERASE, 140164621885440, 140164630274047,
+ERASE, 140164949065728, 140164949069823,
+ERASE, 140164949069824, 140164957458431,
+ERASE, 140164588310528, 140164588314623,
+ERASE, 140164588314624, 140164596703231,
+ERASE, 140164806455296, 140164806459391,
+ERASE, 140164806459392, 140164814847999,
+ERASE, 140164940673024, 140164940677119,
+ERASE, 140164940677120, 140164949065727,
+ERASE, 140164596703232, 140164596707327,
+ERASE, 140164596707328, 140164605095935,
+ERASE, 140164605095936, 140164605100031,
+ERASE, 140164605100032, 140164613488639,
+ERASE, 140164655452160, 140164655456255,
+ERASE, 140164655456256, 140164663844863,
+ERASE, 140164705808384, 140164705812479,
+ERASE, 140164705812480, 140164714201087,
+ERASE, 140164689022976, 140164689027071,
+ERASE, 140164689027072, 140164697415679,
+ERASE, 140164630274048, 140164630278143,
+ERASE, 140164630278144, 140164638666751,
+ERASE, 140164479303680, 140164479307775,
+ERASE, 140164479307776, 140164487696383,
+ERASE, 140164236046336, 140164236050431,
+ERASE, 140164236050432, 140164244439039,
+ERASE, 140164085043200, 140164085047295,
+ERASE, 140164085047296, 140164093435903,
+ERASE, 140164345085952, 140164345090047,
+ERASE, 140164345090048, 140164353478655,
+ERASE, 140164101828608, 140164101832703,
+ERASE, 140164101832704, 140164110221311,
+ERASE, 140164370264064, 140164370268159,
+ERASE, 140164370268160, 140164378656767,
+ERASE, 140164336693248, 140164336697343,
+ERASE, 140164336697344, 140164345085951,
+ERASE, 140164194082816, 140164194086911,
+ERASE, 140164194086912, 140164202475519,
+ERASE, 140164353478656, 140164353482751,
+ERASE, 140164353482752, 140164361871359,
+ERASE, 140164210868224, 140164210872319,
+ERASE, 140164210872320, 140164219260927,
+ERASE, 140164814848000, 140164814852095,
+ERASE, 140164814852096, 140164823240703,
+ERASE, 140164504481792, 140164504485887,
+ERASE, 140164504485888, 140164512874495,
+ERASE, 140165100068864, 140165100072959,
+ERASE, 140165100072960, 140165108461567,
+ERASE, 140164361871360, 140164361875455,
+ERASE, 140164361875456, 140164370264063,
+ERASE, 140164470910976, 140164470915071,
+ERASE, 140164470915072, 140164479303679,
+ERASE, 140164076650496, 140164076654591,
+ERASE, 140164076654592, 140164085043199,
+ERASE, 140164202475520, 140164202479615,
+ERASE, 140164202479616, 140164210868223,
+ERASE, 140164462518272, 140164462522367,
+ERASE, 140164462522368, 140164470910975,
+ERASE, 140165351718912, 140165351723007,
+ERASE, 140165351723008, 140165360111615,
+ERASE, 140164328300544, 140164328304639,
+ERASE, 140164328304640, 140164336693247,
+ERASE, 140164093435904, 140164093439999,
+ERASE, 140164093440000, 140164101828607,
+ERASE, 140165603368960, 140165603373055,
+ERASE, 140165603373056, 140165611761663,
+ERASE, 140165368504320, 140165368508415,
+ERASE, 140165368508416, 140165376897023,
+ERASE, 140165334933504, 140165334937599,
+ERASE, 140165334937600, 140165343326207,
+ERASE, 140165594976256, 140165594980351,
+ERASE, 140165594980352, 140165603368959,
+ERASE, 140164487696384, 140164487700479,
+ERASE, 140164487700480, 140164496089087,
+ERASE, 140164219260928, 140164219265023,
+ERASE, 140164219265024, 140164227653631,
+ERASE, 140164185690112, 140164185694207,
+ERASE, 140164185694208, 140164194082815,
+ERASE, 140164068257792, 140164068261887,
+ERASE, 140164068261888, 140164076650495,
+ERASE, 140165225893888, 140165225897983,
+ERASE, 140165225897984, 140165234286591,
+ERASE, 140165058105344, 140165058109439,
+ };
+ static const unsigned long set31[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730890784768, 140737488351231,
+SNULL, 140730890788863, 140737488351231,
+STORE, 140730890784768, 140730890788863,
+STORE, 140730890653696, 140730890788863,
+STORE, 94577123659776, 94577125912575,
+SNULL, 94577123790847, 94577125912575,
+STORE, 94577123659776, 94577123790847,
+STORE, 94577123790848, 94577125912575,
+ERASE, 94577123790848, 94577125912575,
+STORE, 94577125883904, 94577125892095,
+STORE, 94577125892096, 94577125912575,
+STORE, 140624060407808, 140624062660607,
+SNULL, 140624060551167, 140624062660607,
+STORE, 140624060407808, 140624060551167,
+STORE, 140624060551168, 140624062660607,
+ERASE, 140624060551168, 140624062660607,
+STORE, 140624062648320, 140624062656511,
+STORE, 140624062656512, 140624062660607,
+STORE, 140730892140544, 140730892144639,
+STORE, 140730892128256, 140730892140543,
+STORE, 140624062619648, 140624062648319,
+STORE, 140624062611456, 140624062619647,
+STORE, 140624058191872, 140624060407807,
+SNULL, 140624058191872, 140624058290175,
+STORE, 140624058290176, 140624060407807,
+STORE, 140624058191872, 140624058290175,
+SNULL, 140624060383231, 140624060407807,
+STORE, 140624058290176, 140624060383231,
+STORE, 140624060383232, 140624060407807,
+SNULL, 140624060383232, 140624060391423,
+STORE, 140624060391424, 140624060407807,
+STORE, 140624060383232, 140624060391423,
+ERASE, 140624060383232, 140624060391423,
+STORE, 140624060383232, 140624060391423,
+ERASE, 140624060391424, 140624060407807,
+STORE, 140624060391424, 140624060407807,
+STORE, 140624054394880, 140624058191871,
+SNULL, 140624054394880, 140624056053759,
+STORE, 140624056053760, 140624058191871,
+STORE, 140624054394880, 140624056053759,
+SNULL, 140624058150911, 140624058191871,
+STORE, 140624056053760, 140624058150911,
+STORE, 140624058150912, 140624058191871,
+SNULL, 140624058150912, 140624058175487,
+STORE, 140624058175488, 140624058191871,
+STORE, 140624058150912, 140624058175487,
+ERASE, 140624058150912, 140624058175487,
+STORE, 140624058150912, 140624058175487,
+ERASE, 140624058175488, 140624058191871,
+STORE, 140624058175488, 140624058191871,
+STORE, 140624062603264, 140624062619647,
+SNULL, 140624058167295, 140624058175487,
+STORE, 140624058150912, 140624058167295,
+STORE, 140624058167296, 140624058175487,
+SNULL, 140624060387327, 140624060391423,
+STORE, 140624060383232, 140624060387327,
+STORE, 140624060387328, 140624060391423,
+SNULL, 94577125887999, 94577125892095,
+STORE, 94577125883904, 94577125887999,
+STORE, 94577125888000, 94577125892095,
+SNULL, 140624062652415, 140624062656511,
+STORE, 140624062648320, 140624062652415,
+STORE, 140624062652416, 140624062656511,
+ERASE, 140624062619648, 140624062648319,
+STORE, 94577157709824, 94577157844991,
+STORE, 140624046002176, 140624054394879,
+SNULL, 140624046006271, 140624054394879,
+STORE, 140624046002176, 140624046006271,
+STORE, 140624046006272, 140624054394879,
+STORE, 140624037609472, 140624046002175,
+STORE, 140623903391744, 140624037609471,
+SNULL, 140623903391744, 140623940157439,
+STORE, 140623940157440, 140624037609471,
+STORE, 140623903391744, 140623940157439,
+ERASE, 140623903391744, 140623940157439,
+SNULL, 140624007266303, 140624037609471,
+STORE, 140623940157440, 140624007266303,
+STORE, 140624007266304, 140624037609471,
+ERASE, 140624007266304, 140624037609471,
+SNULL, 140623940292607, 140624007266303,
+STORE, 140623940157440, 140623940292607,
+STORE, 140623940292608, 140624007266303,
+SNULL, 140624037613567, 140624046002175,
+STORE, 140624037609472, 140624037613567,
+STORE, 140624037613568, 140624046002175,
+STORE, 140624029216768, 140624037609471,
+SNULL, 140624029220863, 140624037609471,
+STORE, 140624029216768, 140624029220863,
+STORE, 140624029220864, 140624037609471,
+STORE, 140624020824064, 140624029216767,
+SNULL, 140624020828159, 140624029216767,
+STORE, 140624020824064, 140624020828159,
+STORE, 140624020828160, 140624029216767,
+STORE, 140624012431360, 140624020824063,
+SNULL, 140624012435455, 140624020824063,
+STORE, 140624012431360, 140624012435455,
+STORE, 140624012435456, 140624020824063,
+STORE, 140623931764736, 140623940157439,
+STORE, 140623797547008, 140623931764735,
+SNULL, 140623797547008, 140623805939711,
+STORE, 140623805939712, 140623931764735,
+STORE, 140623797547008, 140623805939711,
+ERASE, 140623797547008, 140623805939711,
+SNULL, 140623873048575, 140623931764735,
+STORE, 140623805939712, 140623873048575,
+STORE, 140623873048576, 140623931764735,
+ERASE, 140623873048576, 140623931764735,
+STORE, 140623923372032, 140623940157439,
+STORE, 140623914979328, 140623940157439,
+STORE, 140623906586624, 140623940157439,
+STORE, 140623671721984, 140623873048575,
+SNULL, 140623738830847, 140623873048575,
+STORE, 140623671721984, 140623738830847,
+STORE, 140623738830848, 140623873048575,
+SNULL, 140623738830848, 140623805939711,
+STORE, 140623805939712, 140623873048575,
+STORE, 140623738830848, 140623805939711,
+ERASE, 140623738830848, 140623805939711,
+SNULL, 140623806074879, 140623873048575,
+STORE, 140623805939712, 140623806074879,
+STORE, 140623806074880, 140623873048575,
+SNULL, 140623906586624, 140623931764735,
+STORE, 140623931764736, 140623940157439,
+STORE, 140623906586624, 140623931764735,
+SNULL, 140623931768831, 140623940157439,
+STORE, 140623931764736, 140623931768831,
+STORE, 140623931768832, 140623940157439,
+STORE, 140623537504256, 140623738830847,
+SNULL, 140623537504256, 140623671721983,
+STORE, 140623671721984, 140623738830847,
+STORE, 140623537504256, 140623671721983,
+SNULL, 140623671857151, 140623738830847,
+STORE, 140623671721984, 140623671857151,
+STORE, 140623671857152, 140623738830847,
+SNULL, 140623604613119, 140623671721983,
+STORE, 140623537504256, 140623604613119,
+STORE, 140623604613120, 140623671721983,
+ERASE, 140623604613120, 140623671721983,
+SNULL, 140623537639423, 140623604613119,
+STORE, 140623537504256, 140623537639423,
+STORE, 140623537639424, 140623604613119,
+STORE, 140623537639424, 140623671721983,
+SNULL, 140623537639424, 140623604613119,
+STORE, 140623604613120, 140623671721983,
+STORE, 140623537639424, 140623604613119,
+SNULL, 140623604748287, 140623671721983,
+STORE, 140623604613120, 140623604748287,
+STORE, 140623604748288, 140623671721983,
+STORE, 140623898193920, 140623931764735,
+SNULL, 140623898193920, 140623923372031,
+STORE, 140623923372032, 140623931764735,
+STORE, 140623898193920, 140623923372031,
+SNULL, 140623923376127, 140623931764735,
+STORE, 140623923372032, 140623923376127,
+STORE, 140623923376128, 140623931764735,
+STORE, 140623889801216, 140623923372031,
+SNULL, 140623889801216, 140623898193919,
+STORE, 140623898193920, 140623923372031,
+STORE, 140623889801216, 140623898193919,
+SNULL, 140623898198015, 140623923372031,
+STORE, 140623898193920, 140623898198015,
+STORE, 140623898198016, 140623923372031,
+SNULL, 140623889805311, 140623898193919,
+STORE, 140623889801216, 140623889805311,
+STORE, 140623889805312, 140623898193919,
+SNULL, 140623898198016, 140623906586623,
+STORE, 140623906586624, 140623923372031,
+STORE, 140623898198016, 140623906586623,
+SNULL, 140623906590719, 140623923372031,
+STORE, 140623906586624, 140623906590719,
+STORE, 140623906590720, 140623923372031,
+STORE, 140623881408512, 140623889801215,
+SNULL, 140623906590720, 140623914979327,
+STORE, 140623914979328, 140623923372031,
+STORE, 140623906590720, 140623914979327,
+SNULL, 140623914983423, 140623923372031,
+STORE, 140623914979328, 140623914983423,
+STORE, 140623914983424, 140623923372031,
+SNULL, 140623881412607, 140623889801215,
+STORE, 140623881408512, 140623881412607,
+STORE, 140623881412608, 140623889801215,
+STORE, 140623797547008, 140623805939711,
+STORE, 140623789154304, 140623805939711,
+STORE, 140623780761600, 140623805939711,
+SNULL, 140623780761600, 140623789154303,
+STORE, 140623789154304, 140623805939711,
+STORE, 140623780761600, 140623789154303,
+SNULL, 140623789158399, 140623805939711,
+STORE, 140623789154304, 140623789158399,
+STORE, 140623789158400, 140623805939711,
+STORE, 140623772368896, 140623789154303,
+STORE, 140623763976192, 140623789154303,
+SNULL, 140623763976192, 140623780761599,
+STORE, 140623780761600, 140623789154303,
+STORE, 140623763976192, 140623780761599,
+SNULL, 140623780765695, 140623789154303,
+STORE, 140623780761600, 140623780765695,
+STORE, 140623780765696, 140623789154303,
+SNULL, 140623789158400, 140623797547007,
+STORE, 140623797547008, 140623805939711,
+STORE, 140623789158400, 140623797547007,
+SNULL, 140623797551103, 140623805939711,
+STORE, 140623797547008, 140623797551103,
+STORE, 140623797551104, 140623805939711,
+SNULL, 140623763976192, 140623772368895,
+STORE, 140623772368896, 140623780761599,
+STORE, 140623763976192, 140623772368895,
+SNULL, 140623772372991, 140623780761599,
+STORE, 140623772368896, 140623772372991,
+STORE, 140623772372992, 140623780761599,
+SNULL, 140623763980287, 140623772368895,
+STORE, 140623763976192, 140623763980287,
+STORE, 140623763980288, 140623772368895,
+STORE, 140623755583488, 140623763976191,
+STORE, 140623747190784, 140623763976191,
+SNULL, 140623747190784, 140623755583487,
+STORE, 140623755583488, 140623763976191,
+STORE, 140623747190784, 140623755583487,
+SNULL, 140623755587583, 140623763976191,
+STORE, 140623755583488, 140623755587583,
+STORE, 140623755587584, 140623763976191,
+STORE, 140623529111552, 140623537504255,
+SNULL, 140623747194879, 140623755583487,
+STORE, 140623747190784, 140623747194879,
+STORE, 140623747194880, 140623755583487,
+SNULL, 140623529115647, 140623537504255,
+STORE, 140623529111552, 140623529115647,
+STORE, 140623529115648, 140623537504255,
+STORE, 140623520718848, 140623529111551,
+SNULL, 140623520722943, 140623529111551,
+STORE, 140623520718848, 140623520722943,
+STORE, 140623520722944, 140623529111551,
+STORE, 140623512326144, 140623520718847,
+STORE, 140623503933440, 140623520718847,
+STORE, 140623495540736, 140623520718847,
+STORE, 140623361323008, 140623495540735,
+STORE, 140623227105280, 140623495540735,
+STORE, 140623218712576, 140623227105279,
+STORE, 140623084494848, 140623218712575,
+STORE, 140623076102144, 140623084494847,
+STORE, 140622941884416, 140623076102143,
+SNULL, 140622941884416, 140623000633343,
+STORE, 140623000633344, 140623076102143,
+STORE, 140622941884416, 140623000633343,
+ERASE, 140622941884416, 140623000633343,
+STORE, 140622992240640, 140623000633343,
+STORE, 140622983847936, 140623000633343,
+STORE, 140622849630208, 140622983847935,
+STORE, 140622841237504, 140622849630207,
+SNULL, 140622849630208, 140622866415615,
+STORE, 140622866415616, 140622983847935,
+STORE, 140622849630208, 140622866415615,
+ERASE, 140622849630208, 140622866415615,
+STORE, 140622858022912, 140622866415615,
+SNULL, 140622933524479, 140622983847935,
+STORE, 140622866415616, 140622933524479,
+STORE, 140622933524480, 140622983847935,
+ERASE, 140622933524480, 140622983847935,
+STORE, 140622975455232, 140623000633343,
+STORE, 140622707019776, 140622841237503,
+STORE, 140622967062528, 140623000633343,
+STORE, 140622572802048, 140622841237503,
+STORE, 140622958669824, 140623000633343,
+STORE, 140622438584320, 140622841237503,
+STORE, 140622950277120, 140623000633343,
+SNULL, 140622858027007, 140622866415615,
+STORE, 140622858022912, 140622858027007,
+STORE, 140622858027008, 140622866415615,
+STORE, 140622941884416, 140623000633343,
+STORE, 140622841237504, 140622858022911,
+SNULL, 140622841237504, 140622849630207,
+STORE, 140622849630208, 140622858022911,
+STORE, 140622841237504, 140622849630207,
+SNULL, 140622849634303, 140622858022911,
+STORE, 140622849630208, 140622849634303,
+STORE, 140622849634304, 140622858022911,
+STORE, 140622430191616, 140622438584319,
+SNULL, 140622430195711, 140622438584319,
+STORE, 140622430191616, 140622430195711,
+STORE, 140622430195712, 140622438584319,
+SNULL, 140623361323007, 140623495540735,
+STORE, 140623227105280, 140623361323007,
+STORE, 140623361323008, 140623495540735,
+SNULL, 140623361323008, 140623403286527,
+STORE, 140623403286528, 140623495540735,
+STORE, 140623361323008, 140623403286527,
+ERASE, 140623361323008, 140623403286527,
+SNULL, 140623470395391, 140623495540735,
+STORE, 140623403286528, 140623470395391,
+STORE, 140623470395392, 140623495540735,
+ERASE, 140623470395392, 140623495540735,
+SNULL, 140623227105280, 140623269068799,
+STORE, 140623269068800, 140623361323007,
+STORE, 140623227105280, 140623269068799,
+ERASE, 140623227105280, 140623269068799,
+SNULL, 140623084494848, 140623134851071,
+STORE, 140623134851072, 140623218712575,
+STORE, 140623084494848, 140623134851071,
+ERASE, 140623084494848, 140623134851071,
+SNULL, 140623201959935, 140623218712575,
+STORE, 140623134851072, 140623201959935,
+STORE, 140623201959936, 140623218712575,
+ERASE, 140623201959936, 140623218712575,
+SNULL, 140623067742207, 140623076102143,
+STORE, 140623000633344, 140623067742207,
+STORE, 140623067742208, 140623076102143,
+ERASE, 140623067742208, 140623076102143,
+STORE, 140622295973888, 140622430191615,
+SNULL, 140622295973888, 140622329544703,
+STORE, 140622329544704, 140622430191615,
+STORE, 140622295973888, 140622329544703,
+ERASE, 140622295973888, 140622329544703,
+SNULL, 140622866550783, 140622933524479,
+STORE, 140622866415616, 140622866550783,
+STORE, 140622866550784, 140622933524479,
+SNULL, 140622707019775, 140622841237503,
+STORE, 140622438584320, 140622707019775,
+STORE, 140622707019776, 140622841237503,
+SNULL, 140622707019776, 140622732197887,
+STORE, 140622732197888, 140622841237503,
+STORE, 140622707019776, 140622732197887,
+ERASE, 140622707019776, 140622732197887,
+SNULL, 140622799306751, 140622841237503,
+STORE, 140622732197888, 140622799306751,
+STORE, 140622799306752, 140622841237503,
+ERASE, 140622799306752, 140622841237503,
+SNULL, 140622572802047, 140622707019775,
+STORE, 140622438584320, 140622572802047,
+STORE, 140622572802048, 140622707019775,
+SNULL, 140622572802048, 140622597980159,
+STORE, 140622597980160, 140622707019775,
+STORE, 140622572802048, 140622597980159,
+ERASE, 140622572802048, 140622597980159,
+SNULL, 140622438584320, 140622463762431,
+STORE, 140622463762432, 140622572802047,
+STORE, 140622438584320, 140622463762431,
+ERASE, 140622438584320, 140622463762431,
+SNULL, 140622530871295, 140622572802047,
+STORE, 140622463762432, 140622530871295,
+STORE, 140622530871296, 140622572802047,
+ERASE, 140622530871296, 140622572802047,
+STORE, 140622195326976, 140622430191615,
+SNULL, 140622262435839, 140622430191615,
+STORE, 140622195326976, 140622262435839,
+STORE, 140622262435840, 140622430191615,
+SNULL, 140622262435840, 140622329544703,
+STORE, 140622329544704, 140622430191615,
+STORE, 140622262435840, 140622329544703,
+ERASE, 140622262435840, 140622329544703,
+SNULL, 140622841241599, 140622849630207,
+STORE, 140622841237504, 140622841241599,
+STORE, 140622841241600, 140622849630207,
+STORE, 140623487148032, 140623520718847,
+STORE, 140623478755328, 140623520718847,
+SNULL, 140622941884416, 140622983847935,
+STORE, 140622983847936, 140623000633343,
+STORE, 140622941884416, 140622983847935,
+SNULL, 140622983852031, 140623000633343,
+STORE, 140622983847936, 140622983852031,
+STORE, 140622983852032, 140623000633343,
+STORE, 140623394893824, 140623403286527,
+SNULL, 140623394897919, 140623403286527,
+STORE, 140623394893824, 140623394897919,
+STORE, 140623394897920, 140623403286527,
+SNULL, 140623403421695, 140623470395391,
+STORE, 140623403286528, 140623403421695,
+STORE, 140623403421696, 140623470395391,
+SNULL, 140623478755328, 140623503933439,
+STORE, 140623503933440, 140623520718847,
+STORE, 140623478755328, 140623503933439,
+SNULL, 140623503937535, 140623520718847,
+STORE, 140623503933440, 140623503937535,
+STORE, 140623503937536, 140623520718847,
+SNULL, 140623336177663, 140623361323007,
+STORE, 140623269068800, 140623336177663,
+STORE, 140623336177664, 140623361323007,
+ERASE, 140623336177664, 140623361323007,
+SNULL, 140623269203967, 140623336177663,
+STORE, 140623269068800, 140623269203967,
+STORE, 140623269203968, 140623336177663,
+SNULL, 140623134986239, 140623201959935,
+STORE, 140623134851072, 140623134986239,
+STORE, 140623134986240, 140623201959935,
+SNULL, 140623000768511, 140623067742207,
+STORE, 140623000633344, 140623000768511,
+STORE, 140623000768512, 140623067742207,
+SNULL, 140622396653567, 140622430191615,
+STORE, 140622329544704, 140622396653567,
+STORE, 140622396653568, 140622430191615,
+ERASE, 140622396653568, 140622430191615,
+SNULL, 140622732333055, 140622799306751,
+STORE, 140622732197888, 140622732333055,
+STORE, 140622732333056, 140622799306751,
+SNULL, 140622941884416, 140622975455231,
+STORE, 140622975455232, 140622983847935,
+STORE, 140622941884416, 140622975455231,
+SNULL, 140622975459327, 140622983847935,
+STORE, 140622975455232, 140622975459327,
+STORE, 140622975459328, 140622983847935,
+SNULL, 140622665089023, 140622707019775,
+STORE, 140622597980160, 140622665089023,
+STORE, 140622665089024, 140622707019775,
+ERASE, 140622665089024, 140622707019775,
+SNULL, 140622598115327, 140622665089023,
+STORE, 140622597980160, 140622598115327,
+STORE, 140622598115328, 140622665089023,
+SNULL, 140622463897599, 140622530871295,
+STORE, 140622463762432, 140622463897599,
+STORE, 140622463897600, 140622530871295,
+SNULL, 140622195462143, 140622262435839,
+STORE, 140622195326976, 140622195462143,
+STORE, 140622195462144, 140622262435839,
+STORE, 140623386501120, 140623394893823,
+SNULL, 140622941884416, 140622950277119,
+STORE, 140622950277120, 140622975455231,
+STORE, 140622941884416, 140622950277119,
+SNULL, 140622950281215, 140622975455231,
+STORE, 140622950277120, 140622950281215,
+STORE, 140622950281216, 140622975455231,
+SNULL, 140622941888511, 140622950277119,
+STORE, 140622941884416, 140622941888511,
+STORE, 140622941888512, 140622950277119,
+STORE, 140623378108416, 140623394893823,
+SNULL, 140623478755328, 140623495540735,
+STORE, 140623495540736, 140623503933439,
+STORE, 140623478755328, 140623495540735,
+SNULL, 140623495544831, 140623503933439,
+STORE, 140623495540736, 140623495544831,
+STORE, 140623495544832, 140623503933439,
+SNULL, 140623478755328, 140623487148031,
+STORE, 140623487148032, 140623495540735,
+STORE, 140623478755328, 140623487148031,
+SNULL, 140623487152127, 140623495540735,
+STORE, 140623487148032, 140623487152127,
+STORE, 140623487152128, 140623495540735,
+SNULL, 140623218716671, 140623227105279,
+STORE, 140623218712576, 140623218716671,
+STORE, 140623218716672, 140623227105279,
+SNULL, 140623076106239, 140623084494847,
+STORE, 140623076102144, 140623076106239,
+STORE, 140623076106240, 140623084494847,
+SNULL, 140622329679871, 140622396653567,
+STORE, 140622329544704, 140622329679871,
+STORE, 140622329679872, 140622396653567,
+SNULL, 140622950281216, 140622958669823,
+STORE, 140622958669824, 140622975455231,
+STORE, 140622950281216, 140622958669823,
+SNULL, 140622958673919, 140622975455231,
+STORE, 140622958669824, 140622958673919,
+STORE, 140622958673920, 140622975455231,
+SNULL, 140623503937536, 140623512326143,
+STORE, 140623512326144, 140623520718847,
+STORE, 140623503937536, 140623512326143,
+SNULL, 140623512330239, 140623520718847,
+STORE, 140623512326144, 140623512330239,
+STORE, 140623512330240, 140623520718847,
+SNULL, 140623378108416, 140623386501119,
+STORE, 140623386501120, 140623394893823,
+STORE, 140623378108416, 140623386501119,
+SNULL, 140623386505215, 140623394893823,
+STORE, 140623386501120, 140623386505215,
+STORE, 140623386505216, 140623394893823,
+STORE, 140623369715712, 140623386501119,
+STORE, 140623361323008, 140623386501119,
+STORE, 140623352930304, 140623386501119,
+SNULL, 140623352930304, 140623361323007,
+STORE, 140623361323008, 140623386501119,
+STORE, 140623352930304, 140623361323007,
+SNULL, 140623361327103, 140623386501119,
+STORE, 140623361323008, 140623361327103,
+STORE, 140623361327104, 140623386501119,
+SNULL, 140623478759423, 140623487148031,
+STORE, 140623478755328, 140623478759423,
+STORE, 140623478759424, 140623487148031,
+STORE, 140623344537600, 140623361323007,
+STORE, 140623260676096, 140623269068799,
+SNULL, 140622958673920, 140622967062527,
+STORE, 140622967062528, 140622975455231,
+STORE, 140622958673920, 140622967062527,
+SNULL, 140622967066623, 140622975455231,
+STORE, 140622967062528, 140622967066623,
+STORE, 140622967066624, 140622975455231,
+STORE, 140623252283392, 140623269068799,
+STORE, 140623243890688, 140623269068799,
+SNULL, 140622983852032, 140622992240639,
+STORE, 140622992240640, 140623000633343,
+STORE, 140622983852032, 140622992240639,
+SNULL, 140622992244735, 140623000633343,
+STORE, 140622992240640, 140622992244735,
+STORE, 140622992244736, 140623000633343,
+STORE, 140623235497984, 140623269068799,
+STORE, 140623218716672, 140623235497983,
+STORE, 140623210319872, 140623218712575,
+STORE, 140623126458368, 140623134851071,
+SNULL, 140623210323967, 140623218712575,
+STORE, 140623210319872, 140623210323967,
+STORE, 140623210323968, 140623218712575,
+SNULL, 140623218716672, 140623227105279,
+STORE, 140623227105280, 140623235497983,
+STORE, 140623218716672, 140623227105279,
+SNULL, 140623227109375, 140623235497983,
+STORE, 140623227105280, 140623227109375,
+STORE, 140623227109376, 140623235497983,
+STORE, 140623118065664, 140623134851071,
+STORE, 140623109672960, 140623134851071,
+SNULL, 140623109677055, 140623134851071,
+STORE, 140623109672960, 140623109677055,
+STORE, 140623109677056, 140623134851071,
+STORE, 140623101280256, 140623109672959,
+STORE, 140623092887552, 140623109672959,
+SNULL, 140623092887552, 140623101280255,
+STORE, 140623101280256, 140623109672959,
+STORE, 140623092887552, 140623101280255,
+SNULL, 140623101284351, 140623109672959,
+STORE, 140623101280256, 140623101284351,
+STORE, 140623101284352, 140623109672959,
+SNULL, 140623361327104, 140623378108415,
+STORE, 140623378108416, 140623386501119,
+STORE, 140623361327104, 140623378108415,
+SNULL, 140623378112511, 140623386501119,
+STORE, 140623378108416, 140623378112511,
+STORE, 140623378112512, 140623386501119,
+SNULL, 140623235497984, 140623243890687,
+STORE, 140623243890688, 140623269068799,
+STORE, 140623235497984, 140623243890687,
+SNULL, 140623243894783, 140623269068799,
+STORE, 140623243890688, 140623243894783,
+STORE, 140623243894784, 140623269068799,
+SNULL, 140623361327104, 140623369715711,
+STORE, 140623369715712, 140623378108415,
+STORE, 140623361327104, 140623369715711,
+SNULL, 140623369719807, 140623378108415,
+STORE, 140623369715712, 140623369719807,
+STORE, 140623369719808, 140623378108415,
+SNULL, 140623243894784, 140623252283391,
+STORE, 140623252283392, 140623269068799,
+STORE, 140623243894784, 140623252283391,
+SNULL, 140623252287487, 140623269068799,
+STORE, 140623252283392, 140623252287487,
+STORE, 140623252287488, 140623269068799,
+SNULL, 140623235502079, 140623243890687,
+STORE, 140623235497984, 140623235502079,
+STORE, 140623235502080, 140623243890687,
+SNULL, 140623344541695, 140623361323007,
+STORE, 140623344537600, 140623344541695,
+STORE, 140623344541696, 140623361323007,
+STORE, 140623076106240, 140623092887551,
+SNULL, 140623076106240, 140623084494847,
+STORE, 140623084494848, 140623092887551,
+STORE, 140623076106240, 140623084494847,
+SNULL, 140623084498943, 140623092887551,
+STORE, 140623084494848, 140623084498943,
+STORE, 140623084498944, 140623092887551,
+SNULL, 140623344541696, 140623352930303,
+STORE, 140623352930304, 140623361323007,
+STORE, 140623344541696, 140623352930303,
+SNULL, 140623352934399, 140623361323007,
+STORE, 140623352930304, 140623352934399,
+STORE, 140623352934400, 140623361323007,
+SNULL, 140623109677056, 140623118065663,
+STORE, 140623118065664, 140623134851071,
+STORE, 140623109677056, 140623118065663,
+SNULL, 140623118069759, 140623134851071,
+STORE, 140623118065664, 140623118069759,
+STORE, 140623118069760, 140623134851071,
+STORE, 140622832844800, 140622841237503,
+STORE, 140622824452096, 140622841237503,
+SNULL, 140622824452096, 140622832844799,
+STORE, 140622832844800, 140622841237503,
+STORE, 140622824452096, 140622832844799,
+SNULL, 140622832848895, 140622841237503,
+STORE, 140622832844800, 140622832848895,
+STORE, 140622832848896, 140622841237503,
+STORE, 140622816059392, 140622832844799,
+SNULL, 140623092891647, 140623101280255,
+STORE, 140623092887552, 140623092891647,
+STORE, 140623092891648, 140623101280255,
+SNULL, 140623118069760, 140623126458367,
+STORE, 140623126458368, 140623134851071,
+STORE, 140623118069760, 140623126458367,
+SNULL, 140623126462463, 140623134851071,
+STORE, 140623126458368, 140623126462463,
+STORE, 140623126462464, 140623134851071,
+SNULL, 140623252287488, 140623260676095,
+STORE, 140623260676096, 140623269068799,
+STORE, 140623252287488, 140623260676095,
+SNULL, 140623260680191, 140623269068799,
+STORE, 140623260676096, 140623260680191,
+STORE, 140623260680192, 140623269068799,
+STORE, 140622807666688, 140622832844799,
+STORE, 140622723805184, 140622732197887,
+STORE, 140622715412480, 140622732197887,
+STORE, 140622707019776, 140622732197887,
+SNULL, 140622707023871, 140622732197887,
+STORE, 140622707019776, 140622707023871,
+STORE, 140622707023872, 140622732197887,
+STORE, 140622698627072, 140622707019775,
+STORE, 140622690234368, 140622707019775,
+SNULL, 140622690238463, 140622707019775,
+STORE, 140622690234368, 140622690238463,
+STORE, 140622690238464, 140622707019775,
+SNULL, 140622807666688, 140622816059391,
+STORE, 140622816059392, 140622832844799,
+STORE, 140622807666688, 140622816059391,
+SNULL, 140622816063487, 140622832844799,
+STORE, 140622816059392, 140622816063487,
+STORE, 140622816063488, 140622832844799,
+STORE, 140622681841664, 140622690234367,
+STORE, 140622673448960, 140622690234367,
+SNULL, 140622673453055, 140622690234367,
+STORE, 140622673448960, 140622673453055,
+STORE, 140622673453056, 140622690234367,
+STORE, 140622589587456, 140622597980159,
+SNULL, 140622807670783, 140622816059391,
+STORE, 140622807666688, 140622807670783,
+STORE, 140622807670784, 140622816059391,
+STORE, 140622581194752, 140622597980159,
+SNULL, 140622581198847, 140622597980159,
+STORE, 140622581194752, 140622581198847,
+STORE, 140622581198848, 140622597980159,
+SNULL, 140622816063488, 140622824452095,
+STORE, 140622824452096, 140622832844799,
+STORE, 140622816063488, 140622824452095,
+SNULL, 140622824456191, 140622832844799,
+STORE, 140622824452096, 140622824456191,
+STORE, 140622824456192, 140622832844799,
+STORE, 140622572802048, 140622581194751,
+SNULL, 140622572806143, 140622581194751,
+STORE, 140622572802048, 140622572806143,
+STORE, 140622572806144, 140622581194751,
+STORE, 140622564409344, 140622572802047,
+STORE, 140622556016640, 140622572802047,
+SNULL, 140622556016640, 140622564409343,
+STORE, 140622564409344, 140622572802047,
+STORE, 140622556016640, 140622564409343,
+SNULL, 140622564413439, 140622572802047,
+STORE, 140622564409344, 140622564413439,
+STORE, 140622564413440, 140622572802047,
+SNULL, 140622690238464, 140622698627071,
+STORE, 140622698627072, 140622707019775,
+STORE, 140622690238464, 140622698627071,
+SNULL, 140622698631167, 140622707019775,
+STORE, 140622698627072, 140622698631167,
+STORE, 140622698631168, 140622707019775,
+SNULL, 140622707023872, 140622723805183,
+STORE, 140622723805184, 140622732197887,
+STORE, 140622707023872, 140622723805183,
+SNULL, 140622723809279, 140622732197887,
+STORE, 140622723805184, 140622723809279,
+STORE, 140622723809280, 140622732197887,
+SNULL, 140622707023872, 140622715412479,
+STORE, 140622715412480, 140622723805183,
+STORE, 140622707023872, 140622715412479,
+SNULL, 140622715416575, 140622723805183,
+STORE, 140622715412480, 140622715416575,
+STORE, 140622715416576, 140622723805183,
+STORE, 140622547623936, 140622564409343,
+SNULL, 140622547628031, 140622564409343,
+STORE, 140622547623936, 140622547628031,
+STORE, 140622547628032, 140622564409343,
+STORE, 140622539231232, 140622547623935,
+SNULL, 140622539235327, 140622547623935,
+STORE, 140622539231232, 140622539235327,
+STORE, 140622539235328, 140622547623935,
+SNULL, 140622581198848, 140622589587455,
+STORE, 140622589587456, 140622597980159,
+STORE, 140622581198848, 140622589587455,
+SNULL, 140622589591551, 140622597980159,
+STORE, 140622589587456, 140622589591551,
+STORE, 140622589591552, 140622597980159,
+STORE, 140622455369728, 140622463762431,
+SNULL, 140622455373823, 140622463762431,
+STORE, 140622455369728, 140622455373823,
+STORE, 140622455373824, 140622463762431,
+STORE, 140622446977024, 140622455369727,
+SNULL, 140622446981119, 140622455369727,
+STORE, 140622446977024, 140622446981119,
+STORE, 140622446981120, 140622455369727,
+SNULL, 140622547628032, 140622556016639,
+STORE, 140622556016640, 140622564409343,
+STORE, 140622547628032, 140622556016639,
+SNULL, 140622556020735, 140622564409343,
+STORE, 140622556016640, 140622556020735,
+STORE, 140622556020736, 140622564409343,
+STORE, 140622430195712, 140622446977023,
+STORE, 140622421798912, 140622430191615,
+SNULL, 140622430195712, 140622438584319,
+STORE, 140622438584320, 140622446977023,
+STORE, 140622430195712, 140622438584319,
+SNULL, 140622438588415, 140622446977023,
+STORE, 140622438584320, 140622438588415,
+STORE, 140622438588416, 140622446977023,
+STORE, 140622413406208, 140622430191615,
+STORE, 140622405013504, 140622430191615,
+SNULL, 140622405013504, 140622413406207,
+STORE, 140622413406208, 140622430191615,
+STORE, 140622405013504, 140622413406207,
+SNULL, 140622413410303, 140622430191615,
+STORE, 140622413406208, 140622413410303,
+STORE, 140622413410304, 140622430191615,
+SNULL, 140622673453056, 140622681841663,
+STORE, 140622681841664, 140622690234367,
+STORE, 140622673453056, 140622681841663,
+SNULL, 140622681845759, 140622690234367,
+STORE, 140622681841664, 140622681845759,
+STORE, 140622681845760, 140622690234367,
+STORE, 140622321152000, 140622329544703,
+SNULL, 140622413410304, 140622421798911,
+STORE, 140622421798912, 140622430191615,
+STORE, 140622413410304, 140622421798911,
+SNULL, 140622421803007, 140622430191615,
+STORE, 140622421798912, 140622421803007,
+STORE, 140622421803008, 140622430191615,
+STORE, 140622312759296, 140622329544703,
+SNULL, 140622312763391, 140622329544703,
+STORE, 140622312759296, 140622312763391,
+STORE, 140622312763392, 140622329544703,
+SNULL, 140622405017599, 140622413406207,
+STORE, 140622405013504, 140622405017599,
+STORE, 140622405017600, 140622413406207,
+STORE, 140622304366592, 140622312759295,
+SNULL, 140622304370687, 140622312759295,
+STORE, 140622304366592, 140622304370687,
+STORE, 140622304370688, 140622312759295,
+SNULL, 140622312763392, 140622321151999,
+STORE, 140622321152000, 140622329544703,
+STORE, 140622312763392, 140622321151999,
+SNULL, 140622321156095, 140622329544703,
+STORE, 140622321152000, 140622321156095,
+STORE, 140622321156096, 140622329544703,
+STORE, 140624062619648, 140624062648319,
+STORE, 140624010240000, 140624012431359,
+SNULL, 140624010240000, 140624010330111,
+STORE, 140624010330112, 140624012431359,
+STORE, 140624010240000, 140624010330111,
+SNULL, 140624012423167, 140624012431359,
+STORE, 140624010330112, 140624012423167,
+STORE, 140624012423168, 140624012431359,
+ERASE, 140624012423168, 140624012431359,
+STORE, 140624012423168, 140624012431359,
+SNULL, 140624012427263, 140624012431359,
+STORE, 140624012423168, 140624012427263,
+STORE, 140624012427264, 140624012431359,
+ERASE, 140624062619648, 140624062648319,
+ERASE, 140622849630208, 140622849634303,
+ERASE, 140622849634304, 140622858022911,
+ERASE, 140623394893824, 140623394897919,
+ERASE, 140623394897920, 140623403286527,
+ERASE, 140623361323008, 140623361327103,
+ERASE, 140623361327104, 140623369715711,
+ERASE, 140623084494848, 140623084498943,
+ERASE, 140623084498944, 140623092887551,
+ERASE, 140623931764736, 140623931768831,
+ERASE, 140623931768832, 140623940157439,
+ERASE, 140622841237504, 140622841241599,
+ERASE, 140622841241600, 140622849630207,
+ERASE, 140623487148032, 140623487152127,
+ERASE, 140623487152128, 140623495540735,
+ERASE, 140623109672960, 140623109677055,
+ERASE, 140623109677056, 140623118065663,
+ERASE, 140622983847936, 140622983852031,
+ERASE, 140622983852032, 140622992240639,
+ERASE, 140623352930304, 140623352934399,
+ERASE, 140623352934400, 140623361323007,
+ERASE, 140622564409344, 140622564413439,
+ERASE, 140622564413440, 140622572802047,
+ERASE, 140622430191616, 140622430195711,
+ERASE, 140622430195712, 140622438584319,
+ERASE, 140622958669824, 140622958673919,
+ERASE, 140622958673920, 140622967062527,
+ERASE, 140622992240640, 140622992244735,
+ERASE, 140622992244736, 140623000633343,
+ERASE, 140623227105280, 140623227109375,
+ERASE, 140623227109376, 140623235497983,
+ERASE, 140622321152000, 140622321156095,
+ERASE, 140622321156096, 140622329544703,
+ERASE, 140622858022912, 140622858027007,
+ERASE, 140622858027008, 140622866415615,
+ERASE, 140622975455232, 140622975459327,
+ERASE, 140622975459328, 140622983847935,
+ERASE, 140623378108416, 140623378112511,
+ERASE, 140623378112512, 140623386501119,
+ERASE, 140623495540736, 140623495544831,
+ERASE, 140623495544832, 140623503933439,
+ERASE, 140623118065664, 140623118069759,
+ERASE, 140623118069760, 140623126458367,
+ERASE, 140622572802048, 140622572806143,
+ERASE, 140622572806144, 140622581194751,
+ERASE, 140622421798912, 140622421803007,
+ERASE, 140622421803008, 140622430191615,
+ERASE, 140622967062528, 140622967066623,
+ERASE, 140622967066624, 140622975455231,
+ERASE, 140623252283392, 140623252287487,
+ERASE, 140623252287488, 140623260676095,
+ERASE, 140622673448960, 140622673453055,
+ERASE, 140622673453056, 140622681841663,
+ERASE, 140623076102144, 140623076106239,
+ERASE, 140623076106240, 140623084494847,
+ERASE, 140623101280256, 140623101284351,
+ERASE, 140623101284352, 140623109672959,
+ERASE, 140622715412480, 140622715416575,
+ERASE, 140622715416576, 140622723805183,
+ERASE, 140622405013504, 140622405017599,
+ERASE, 140622405017600, 140622413406207,
+ERASE, 140623478755328, 140623478759423,
+ERASE, 140623478759424, 140623487148031,
+ERASE, 140623906586624, 140623906590719,
+ERASE, 140623906590720, 140623914979327,
+ERASE, 140622950277120, 140622950281215,
+ERASE, 140622950281216, 140622958669823,
+ };
+ static const unsigned long set32[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731244212224, 140737488351231,
+SNULL, 140731244216319, 140737488351231,
+STORE, 140731244212224, 140731244216319,
+STORE, 140731244081152, 140731244216319,
+STORE, 94427773984768, 94427776237567,
+SNULL, 94427774115839, 94427776237567,
+STORE, 94427773984768, 94427774115839,
+STORE, 94427774115840, 94427776237567,
+ERASE, 94427774115840, 94427776237567,
+STORE, 94427776208896, 94427776217087,
+STORE, 94427776217088, 94427776237567,
+STORE, 140401464893440, 140401467146239,
+SNULL, 140401465036799, 140401467146239,
+STORE, 140401464893440, 140401465036799,
+STORE, 140401465036800, 140401467146239,
+ERASE, 140401465036800, 140401467146239,
+STORE, 140401467133952, 140401467142143,
+STORE, 140401467142144, 140401467146239,
+STORE, 140731244507136, 140731244511231,
+STORE, 140731244494848, 140731244507135,
+STORE, 140401467105280, 140401467133951,
+STORE, 140401467097088, 140401467105279,
+STORE, 140401462677504, 140401464893439,
+SNULL, 140401462677504, 140401462775807,
+STORE, 140401462775808, 140401464893439,
+STORE, 140401462677504, 140401462775807,
+SNULL, 140401464868863, 140401464893439,
+STORE, 140401462775808, 140401464868863,
+STORE, 140401464868864, 140401464893439,
+SNULL, 140401464868864, 140401464877055,
+STORE, 140401464877056, 140401464893439,
+STORE, 140401464868864, 140401464877055,
+ERASE, 140401464868864, 140401464877055,
+STORE, 140401464868864, 140401464877055,
+ERASE, 140401464877056, 140401464893439,
+STORE, 140401464877056, 140401464893439,
+STORE, 140401458880512, 140401462677503,
+SNULL, 140401458880512, 140401460539391,
+STORE, 140401460539392, 140401462677503,
+STORE, 140401458880512, 140401460539391,
+SNULL, 140401462636543, 140401462677503,
+STORE, 140401460539392, 140401462636543,
+STORE, 140401462636544, 140401462677503,
+SNULL, 140401462636544, 140401462661119,
+STORE, 140401462661120, 140401462677503,
+STORE, 140401462636544, 140401462661119,
+ERASE, 140401462636544, 140401462661119,
+STORE, 140401462636544, 140401462661119,
+ERASE, 140401462661120, 140401462677503,
+STORE, 140401462661120, 140401462677503,
+STORE, 140401467088896, 140401467105279,
+SNULL, 140401462652927, 140401462661119,
+STORE, 140401462636544, 140401462652927,
+STORE, 140401462652928, 140401462661119,
+SNULL, 140401464872959, 140401464877055,
+STORE, 140401464868864, 140401464872959,
+STORE, 140401464872960, 140401464877055,
+SNULL, 94427776212991, 94427776217087,
+STORE, 94427776208896, 94427776212991,
+STORE, 94427776212992, 94427776217087,
+SNULL, 140401467138047, 140401467142143,
+STORE, 140401467133952, 140401467138047,
+STORE, 140401467138048, 140401467142143,
+ERASE, 140401467105280, 140401467133951,
+STORE, 94427784683520, 94427784818687,
+STORE, 140401450487808, 140401458880511,
+SNULL, 140401450491903, 140401458880511,
+STORE, 140401450487808, 140401450491903,
+STORE, 140401450491904, 140401458880511,
+STORE, 140401442095104, 140401450487807,
+STORE, 140401307877376, 140401442095103,
+SNULL, 140401307877376, 140401340055551,
+STORE, 140401340055552, 140401442095103,
+STORE, 140401307877376, 140401340055551,
+ERASE, 140401307877376, 140401340055551,
+SNULL, 140401407164415, 140401442095103,
+STORE, 140401340055552, 140401407164415,
+STORE, 140401407164416, 140401442095103,
+ERASE, 140401407164416, 140401442095103,
+SNULL, 140401340190719, 140401407164415,
+STORE, 140401340055552, 140401340190719,
+STORE, 140401340190720, 140401407164415,
+SNULL, 140401442099199, 140401450487807,
+STORE, 140401442095104, 140401442099199,
+STORE, 140401442099200, 140401450487807,
+STORE, 140401433702400, 140401442095103,
+SNULL, 140401433706495, 140401442095103,
+STORE, 140401433702400, 140401433706495,
+STORE, 140401433706496, 140401442095103,
+STORE, 140401425309696, 140401433702399,
+SNULL, 140401425313791, 140401433702399,
+STORE, 140401425309696, 140401425313791,
+STORE, 140401425313792, 140401433702399,
+STORE, 140401416916992, 140401425309695,
+SNULL, 140401416921087, 140401425309695,
+STORE, 140401416916992, 140401416921087,
+STORE, 140401416921088, 140401425309695,
+STORE, 140401408524288, 140401416916991,
+STORE, 140401205837824, 140401340055551,
+SNULL, 140401272946687, 140401340055551,
+STORE, 140401205837824, 140401272946687,
+STORE, 140401272946688, 140401340055551,
+ERASE, 140401272946688, 140401340055551,
+SNULL, 140401205972991, 140401272946687,
+STORE, 140401205837824, 140401205972991,
+STORE, 140401205972992, 140401272946687,
+STORE, 140401331662848, 140401340055551,
+STORE, 140401323270144, 140401340055551,
+STORE, 140401138728960, 140401205837823,
+STORE, 140401314877440, 140401340055551,
+SNULL, 140401408528383, 140401416916991,
+STORE, 140401408524288, 140401408528383,
+STORE, 140401408528384, 140401416916991,
+SNULL, 140401138864127, 140401205837823,
+STORE, 140401138728960, 140401138864127,
+STORE, 140401138864128, 140401205837823,
+STORE, 140401004511232, 140401138728959,
+SNULL, 140401071620095, 140401138728959,
+STORE, 140401004511232, 140401071620095,
+STORE, 140401071620096, 140401138728959,
+ERASE, 140401071620096, 140401138728959,
+STORE, 140400870293504, 140401071620095,
+SNULL, 140400937402367, 140401071620095,
+STORE, 140400870293504, 140400937402367,
+STORE, 140400937402368, 140401071620095,
+SNULL, 140400937402368, 140401004511231,
+STORE, 140401004511232, 140401071620095,
+STORE, 140400937402368, 140401004511231,
+ERASE, 140400937402368, 140401004511231,
+STORE, 140401306484736, 140401340055551,
+SNULL, 140401306484736, 140401323270143,
+STORE, 140401323270144, 140401340055551,
+STORE, 140401306484736, 140401323270143,
+SNULL, 140401323274239, 140401340055551,
+STORE, 140401323270144, 140401323274239,
+STORE, 140401323274240, 140401340055551,
+SNULL, 140401004646399, 140401071620095,
+STORE, 140401004511232, 140401004646399,
+STORE, 140401004646400, 140401071620095,
+SNULL, 140400870428671, 140400937402367,
+STORE, 140400870293504, 140400870428671,
+STORE, 140400870428672, 140400937402367,
+SNULL, 140401306488831, 140401323270143,
+STORE, 140401306484736, 140401306488831,
+STORE, 140401306488832, 140401323270143,
+STORE, 140401298092032, 140401306484735,
+SNULL, 140401306488832, 140401314877439,
+STORE, 140401314877440, 140401323270143,
+STORE, 140401306488832, 140401314877439,
+SNULL, 140401314881535, 140401323270143,
+STORE, 140401314877440, 140401314881535,
+STORE, 140401314881536, 140401323270143,
+SNULL, 140401323274240, 140401331662847,
+STORE, 140401331662848, 140401340055551,
+STORE, 140401323274240, 140401331662847,
+SNULL, 140401331666943, 140401340055551,
+STORE, 140401331662848, 140401331666943,
+STORE, 140401331666944, 140401340055551,
+SNULL, 140401298096127, 140401306484735,
+STORE, 140401298092032, 140401298096127,
+STORE, 140401298096128, 140401306484735,
+STORE, 140401289699328, 140401298092031,
+STORE, 140401281306624, 140401298092031,
+STORE, 140401130336256, 140401138728959,
+SNULL, 140401281306624, 140401289699327,
+STORE, 140401289699328, 140401298092031,
+STORE, 140401281306624, 140401289699327,
+SNULL, 140401289703423, 140401298092031,
+STORE, 140401289699328, 140401289703423,
+STORE, 140401289703424, 140401298092031,
+STORE, 140401121943552, 140401138728959,
+STORE, 140401113550848, 140401138728959,
+SNULL, 140401281310719, 140401289699327,
+STORE, 140401281306624, 140401281310719,
+STORE, 140401281310720, 140401289699327,
+SNULL, 140401113550848, 140401121943551,
+STORE, 140401121943552, 140401138728959,
+STORE, 140401113550848, 140401121943551,
+SNULL, 140401121947647, 140401138728959,
+STORE, 140401121943552, 140401121947647,
+STORE, 140401121947648, 140401138728959,
+STORE, 140401105158144, 140401121943551,
+SNULL, 140401121947648, 140401130336255,
+STORE, 140401130336256, 140401138728959,
+STORE, 140401121947648, 140401130336255,
+SNULL, 140401130340351, 140401138728959,
+STORE, 140401130336256, 140401130340351,
+STORE, 140401130340352, 140401138728959,
+STORE, 140401096765440, 140401121943551,
+SNULL, 140401096765440, 140401113550847,
+STORE, 140401113550848, 140401121943551,
+STORE, 140401096765440, 140401113550847,
+SNULL, 140401113554943, 140401121943551,
+STORE, 140401113550848, 140401113554943,
+STORE, 140401113554944, 140401121943551,
+STORE, 140401088372736, 140401113550847,
+SNULL, 140401088372736, 140401096765439,
+STORE, 140401096765440, 140401113550847,
+STORE, 140401088372736, 140401096765439,
+SNULL, 140401096769535, 140401113550847,
+STORE, 140401096765440, 140401096769535,
+STORE, 140401096769536, 140401113550847,
+SNULL, 140401096769536, 140401105158143,
+STORE, 140401105158144, 140401113550847,
+STORE, 140401096769536, 140401105158143,
+SNULL, 140401105162239, 140401113550847,
+STORE, 140401105158144, 140401105162239,
+STORE, 140401105162240, 140401113550847,
+SNULL, 140401088376831, 140401096765439,
+STORE, 140401088372736, 140401088376831,
+STORE, 140401088376832, 140401096765439,
+STORE, 140401079980032, 140401088372735,
+STORE, 140400996118528, 140401004511231,
+SNULL, 140401079984127, 140401088372735,
+STORE, 140401079980032, 140401079984127,
+STORE, 140401079984128, 140401088372735,
+SNULL, 140400996122623, 140401004511231,
+STORE, 140400996118528, 140400996122623,
+STORE, 140400996122624, 140401004511231,
+STORE, 140400987725824, 140400996118527,
+STORE, 140400979333120, 140400996118527,
+STORE, 140400803184640, 140400870293503,
+SNULL, 140400803319807, 140400870293503,
+STORE, 140400803184640, 140400803319807,
+STORE, 140400803319808, 140400870293503,
+SNULL, 140400979333120, 140400987725823,
+STORE, 140400987725824, 140400996118527,
+STORE, 140400979333120, 140400987725823,
+SNULL, 140400987729919, 140400996118527,
+STORE, 140400987725824, 140400987729919,
+STORE, 140400987729920, 140400996118527,
+STORE, 140400970940416, 140400987725823,
+STORE, 140400962547712, 140400987725823,
+STORE, 140400668966912, 140400803184639,
+STORE, 140400954155008, 140400987725823,
+STORE, 140400945762304, 140400987725823,
+STORE, 140400660574208, 140400668966911,
+STORE, 140400593465344, 140400660574207,
+STORE, 140400585072640, 140400593465343,
+STORE, 140400450854912, 140400585072639,
+STORE, 140400442462208, 140400450854911,
+STORE, 140400434069504, 140400450854911,
+STORE, 140400299851776, 140400434069503,
+STORE, 140400291459072, 140400299851775,
+SNULL, 140400299851776, 140400333422591,
+STORE, 140400333422592, 140400434069503,
+STORE, 140400299851776, 140400333422591,
+ERASE, 140400299851776, 140400333422591,
+STORE, 140400325029888, 140400333422591,
+STORE, 140400157241344, 140400291459071,
+STORE, 140400316637184, 140400333422591,
+STORE, 140400308244480, 140400333422591,
+STORE, 140400023023616, 140400291459071,
+STORE, 140400291459072, 140400333422591,
+SNULL, 140400023023616, 140400064987135,
+STORE, 140400064987136, 140400291459071,
+STORE, 140400023023616, 140400064987135,
+ERASE, 140400023023616, 140400064987135,
+STORE, 140400056594432, 140400064987135,
+SNULL, 140400056598527, 140400064987135,
+STORE, 140400056594432, 140400056598527,
+STORE, 140400056598528, 140400064987135,
+STORE, 140399989485568, 140400056594431,
+SNULL, 140400291459072, 140400316637183,
+STORE, 140400316637184, 140400333422591,
+STORE, 140400291459072, 140400316637183,
+SNULL, 140400316641279, 140400333422591,
+STORE, 140400316637184, 140400316641279,
+STORE, 140400316641280, 140400333422591,
+STORE, 140399855267840, 140400056594431,
+SNULL, 140399855267840, 140399863660543,
+STORE, 140399863660544, 140400056594431,
+STORE, 140399855267840, 140399863660543,
+ERASE, 140399855267840, 140399863660543,
+SNULL, 140400736075775, 140400803184639,
+STORE, 140400668966912, 140400736075775,
+STORE, 140400736075776, 140400803184639,
+ERASE, 140400736075776, 140400803184639,
+SNULL, 140400669102079, 140400736075775,
+STORE, 140400668966912, 140400669102079,
+STORE, 140400669102080, 140400736075775,
+STORE, 140400669102080, 140400803184639,
+SNULL, 140400669102080, 140400736075775,
+STORE, 140400736075776, 140400803184639,
+STORE, 140400669102080, 140400736075775,
+SNULL, 140400736210943, 140400803184639,
+STORE, 140400736075776, 140400736210943,
+STORE, 140400736210944, 140400803184639,
+ERASE, 140400593465344, 140400660574207,
+SNULL, 140400450854912, 140400467640319,
+STORE, 140400467640320, 140400585072639,
+STORE, 140400450854912, 140400467640319,
+ERASE, 140400450854912, 140400467640319,
+STORE, 140399729442816, 140400056594431,
+SNULL, 140400400531455, 140400434069503,
+STORE, 140400333422592, 140400400531455,
+STORE, 140400400531456, 140400434069503,
+ERASE, 140400400531456, 140400434069503,
+SNULL, 140400333557759, 140400400531455,
+STORE, 140400333422592, 140400333557759,
+STORE, 140400333557760, 140400400531455,
+SNULL, 140400157241343, 140400291459071,
+STORE, 140400064987136, 140400157241343,
+STORE, 140400157241344, 140400291459071,
+SNULL, 140400157241344, 140400199204863,
+STORE, 140400199204864, 140400291459071,
+STORE, 140400157241344, 140400199204863,
+ERASE, 140400157241344, 140400199204863,
+SNULL, 140400266313727, 140400291459071,
+STORE, 140400199204864, 140400266313727,
+STORE, 140400266313728, 140400291459071,
+ERASE, 140400266313728, 140400291459071,
+SNULL, 140400132095999, 140400157241343,
+STORE, 140400064987136, 140400132095999,
+STORE, 140400132096000, 140400157241343,
+ERASE, 140400132096000, 140400157241343,
+SNULL, 140400065122303, 140400132095999,
+STORE, 140400064987136, 140400065122303,
+STORE, 140400065122304, 140400132095999,
+SNULL, 140400945762304, 140400954155007,
+STORE, 140400954155008, 140400987725823,
+STORE, 140400945762304, 140400954155007,
+SNULL, 140400954159103, 140400987725823,
+STORE, 140400954155008, 140400954159103,
+STORE, 140400954159104, 140400987725823,
+SNULL, 140400434069504, 140400442462207,
+STORE, 140400442462208, 140400450854911,
+STORE, 140400434069504, 140400442462207,
+SNULL, 140400442466303, 140400450854911,
+STORE, 140400442462208, 140400442466303,
+STORE, 140400442466304, 140400450854911,
+SNULL, 140400291463167, 140400316637183,
+STORE, 140400291459072, 140400291463167,
+STORE, 140400291463168, 140400316637183,
+STORE, 140400652181504, 140400668966911,
+STORE, 140400643788800, 140400668966911,
+SNULL, 140400291463168, 140400299851775,
+STORE, 140400299851776, 140400316637183,
+STORE, 140400291463168, 140400299851775,
+SNULL, 140400299855871, 140400316637183,
+STORE, 140400299851776, 140400299855871,
+STORE, 140400299855872, 140400316637183,
+STORE, 140400635396096, 140400668966911,
+SNULL, 140400635396096, 140400643788799,
+STORE, 140400643788800, 140400668966911,
+STORE, 140400635396096, 140400643788799,
+SNULL, 140400643792895, 140400668966911,
+STORE, 140400643788800, 140400643792895,
+STORE, 140400643792896, 140400668966911,
+SNULL, 140399989485567, 140400056594431,
+STORE, 140399729442816, 140399989485567,
+STORE, 140399989485568, 140400056594431,
+ERASE, 140399989485568, 140400056594431,
+SNULL, 140399930769407, 140399989485567,
+STORE, 140399729442816, 140399930769407,
+STORE, 140399930769408, 140399989485567,
+ERASE, 140399930769408, 140399989485567,
+SNULL, 140400945766399, 140400954155007,
+STORE, 140400945762304, 140400945766399,
+STORE, 140400945766400, 140400954155007,
+SNULL, 140400534749183, 140400585072639,
+STORE, 140400467640320, 140400534749183,
+STORE, 140400534749184, 140400585072639,
+ERASE, 140400534749184, 140400585072639,
+SNULL, 140399796551679, 140399930769407,
+STORE, 140399729442816, 140399796551679,
+STORE, 140399796551680, 140399930769407,
+SNULL, 140399796551680, 140399863660543,
+STORE, 140399863660544, 140399930769407,
+STORE, 140399796551680, 140399863660543,
+ERASE, 140399796551680, 140399863660543,
+SNULL, 140400199340031, 140400266313727,
+STORE, 140400199204864, 140400199340031,
+STORE, 140400199340032, 140400266313727,
+STORE, 140400627003392, 140400643788799,
+SNULL, 140400316641280, 140400325029887,
+STORE, 140400325029888, 140400333422591,
+STORE, 140400316641280, 140400325029887,
+SNULL, 140400325033983, 140400333422591,
+STORE, 140400325029888, 140400325033983,
+STORE, 140400325033984, 140400333422591,
+SNULL, 140400627003392, 140400635396095,
+STORE, 140400635396096, 140400643788799,
+STORE, 140400627003392, 140400635396095,
+SNULL, 140400635400191, 140400643788799,
+STORE, 140400635396096, 140400635400191,
+STORE, 140400635400192, 140400643788799,
+SNULL, 140400434073599, 140400442462207,
+STORE, 140400434069504, 140400434073599,
+STORE, 140400434073600, 140400442462207,
+STORE, 140400618610688, 140400635396095,
+STORE, 140400610217984, 140400635396095,
+SNULL, 140400954159104, 140400962547711,
+STORE, 140400962547712, 140400987725823,
+STORE, 140400954159104, 140400962547711,
+SNULL, 140400962551807, 140400987725823,
+STORE, 140400962547712, 140400962551807,
+STORE, 140400962551808, 140400987725823,
+SNULL, 140400299855872, 140400308244479,
+STORE, 140400308244480, 140400316637183,
+STORE, 140400299855872, 140400308244479,
+SNULL, 140400308248575, 140400316637183,
+STORE, 140400308244480, 140400308248575,
+STORE, 140400308248576, 140400316637183,
+STORE, 140400601825280, 140400635396095,
+SNULL, 140400601829375, 140400635396095,
+STORE, 140400601825280, 140400601829375,
+STORE, 140400601829376, 140400635396095,
+STORE, 140400576679936, 140400593465343,
+SNULL, 140400576684031, 140400593465343,
+STORE, 140400576679936, 140400576684031,
+STORE, 140400576684032, 140400593465343,
+SNULL, 140400643792896, 140400652181503,
+STORE, 140400652181504, 140400668966911,
+STORE, 140400643792896, 140400652181503,
+SNULL, 140400652185599, 140400668966911,
+STORE, 140400652181504, 140400652185599,
+STORE, 140400652185600, 140400668966911,
+STORE, 140399595225088, 140399796551679,
+SNULL, 140399662333951, 140399796551679,
+STORE, 140399595225088, 140399662333951,
+STORE, 140399662333952, 140399796551679,
+SNULL, 140399662333952, 140399729442815,
+STORE, 140399729442816, 140399796551679,
+STORE, 140399662333952, 140399729442815,
+ERASE, 140399662333952, 140399729442815,
+SNULL, 140399863795711, 140399930769407,
+STORE, 140399863660544, 140399863795711,
+STORE, 140399863795712, 140399930769407,
+STORE, 140400568287232, 140400576679935,
+SNULL, 140400568291327, 140400576679935,
+STORE, 140400568287232, 140400568291327,
+STORE, 140400568291328, 140400576679935,
+SNULL, 140400467775487, 140400534749183,
+STORE, 140400467640320, 140400467775487,
+STORE, 140400467775488, 140400534749183,
+SNULL, 140399729577983, 140399796551679,
+STORE, 140399729442816, 140399729577983,
+STORE, 140399729577984, 140399796551679,
+SNULL, 140400601829376, 140400627003391,
+STORE, 140400627003392, 140400635396095,
+STORE, 140400601829376, 140400627003391,
+SNULL, 140400627007487, 140400635396095,
+STORE, 140400627003392, 140400627007487,
+STORE, 140400627007488, 140400635396095,
+STORE, 140400559894528, 140400568287231,
+STORE, 140400551501824, 140400568287231,
+STORE, 140400543109120, 140400568287231,
+STORE, 140400459247616, 140400467640319,
+STORE, 140400442466304, 140400467640319,
+SNULL, 140399595360255, 140399662333951,
+STORE, 140399595225088, 140399595360255,
+STORE, 140399595360256, 140399662333951,
+SNULL, 140400962551808, 140400970940415,
+STORE, 140400970940416, 140400987725823,
+STORE, 140400962551808, 140400970940415,
+SNULL, 140400970944511, 140400987725823,
+STORE, 140400970940416, 140400970944511,
+STORE, 140400970944512, 140400987725823,
+SNULL, 140400652185600, 140400660574207,
+STORE, 140400660574208, 140400668966911,
+STORE, 140400652185600, 140400660574207,
+SNULL, 140400660578303, 140400668966911,
+STORE, 140400660574208, 140400660578303,
+STORE, 140400660578304, 140400668966911,
+SNULL, 140400576684032, 140400585072639,
+STORE, 140400585072640, 140400593465343,
+STORE, 140400576684032, 140400585072639,
+SNULL, 140400585076735, 140400593465343,
+STORE, 140400585072640, 140400585076735,
+STORE, 140400585076736, 140400593465343,
+STORE, 140400425676800, 140400434069503,
+STORE, 140400417284096, 140400434069503,
+STORE, 140400408891392, 140400434069503,
+SNULL, 140400408891392, 140400417284095,
+STORE, 140400417284096, 140400434069503,
+STORE, 140400408891392, 140400417284095,
+SNULL, 140400417288191, 140400434069503,
+STORE, 140400417284096, 140400417288191,
+STORE, 140400417288192, 140400434069503,
+STORE, 140400283066368, 140400291459071,
+SNULL, 140400601829376, 140400618610687,
+STORE, 140400618610688, 140400627003391,
+STORE, 140400601829376, 140400618610687,
+SNULL, 140400618614783, 140400627003391,
+STORE, 140400618610688, 140400618614783,
+STORE, 140400618614784, 140400627003391,
+SNULL, 140400601829376, 140400610217983,
+STORE, 140400610217984, 140400618610687,
+STORE, 140400601829376, 140400610217983,
+SNULL, 140400610222079, 140400618610687,
+STORE, 140400610217984, 140400610222079,
+STORE, 140400610222080, 140400618610687,
+STORE, 140400274673664, 140400291459071,
+STORE, 140400190812160, 140400199204863,
+STORE, 140400182419456, 140400199204863,
+SNULL, 140400442466304, 140400450854911,
+STORE, 140400450854912, 140400467640319,
+STORE, 140400442466304, 140400450854911,
+SNULL, 140400450859007, 140400467640319,
+STORE, 140400450854912, 140400450859007,
+STORE, 140400450859008, 140400467640319,
+SNULL, 140400543109120, 140400559894527,
+STORE, 140400559894528, 140400568287231,
+STORE, 140400543109120, 140400559894527,
+SNULL, 140400559898623, 140400568287231,
+STORE, 140400559894528, 140400559898623,
+STORE, 140400559898624, 140400568287231,
+SNULL, 140400450859008, 140400459247615,
+STORE, 140400459247616, 140400467640319,
+STORE, 140400450859008, 140400459247615,
+SNULL, 140400459251711, 140400467640319,
+STORE, 140400459247616, 140400459251711,
+STORE, 140400459251712, 140400467640319,
+SNULL, 140400543113215, 140400559894527,
+STORE, 140400543109120, 140400543113215,
+STORE, 140400543113216, 140400559894527,
+SNULL, 140400970944512, 140400979333119,
+STORE, 140400979333120, 140400987725823,
+STORE, 140400970944512, 140400979333119,
+SNULL, 140400979337215, 140400987725823,
+STORE, 140400979333120, 140400979337215,
+STORE, 140400979337216, 140400987725823,
+STORE, 140400174026752, 140400199204863,
+SNULL, 140400174030847, 140400199204863,
+STORE, 140400174026752, 140400174030847,
+STORE, 140400174030848, 140400199204863,
+SNULL, 140400274673664, 140400283066367,
+STORE, 140400283066368, 140400291459071,
+STORE, 140400274673664, 140400283066367,
+SNULL, 140400283070463, 140400291459071,
+STORE, 140400283066368, 140400283070463,
+STORE, 140400283070464, 140400291459071,
+STORE, 140400165634048, 140400174026751,
+SNULL, 140400165638143, 140400174026751,
+STORE, 140400165634048, 140400165638143,
+STORE, 140400165638144, 140400174026751,
+SNULL, 140400174030848, 140400182419455,
+STORE, 140400182419456, 140400199204863,
+STORE, 140400174030848, 140400182419455,
+SNULL, 140400182423551, 140400199204863,
+STORE, 140400182419456, 140400182423551,
+STORE, 140400182423552, 140400199204863,
+SNULL, 140400182423552, 140400190812159,
+STORE, 140400190812160, 140400199204863,
+STORE, 140400182423552, 140400190812159,
+SNULL, 140400190816255, 140400199204863,
+STORE, 140400190812160, 140400190816255,
+STORE, 140400190816256, 140400199204863,
+STORE, 140400157241344, 140400165634047,
+SNULL, 140400157245439, 140400165634047,
+STORE, 140400157241344, 140400157245439,
+STORE, 140400157245440, 140400165634047,
+SNULL, 140400408895487, 140400417284095,
+STORE, 140400408891392, 140400408895487,
+STORE, 140400408895488, 140400417284095,
+SNULL, 140400417288192, 140400425676799,
+STORE, 140400425676800, 140400434069503,
+STORE, 140400417288192, 140400425676799,
+SNULL, 140400425680895, 140400434069503,
+STORE, 140400425676800, 140400425680895,
+STORE, 140400425680896, 140400434069503,
+STORE, 140400148848640, 140400157241343,
+SNULL, 140400148852735, 140400157241343,
+STORE, 140400148848640, 140400148852735,
+STORE, 140400148852736, 140400157241343,
+SNULL, 140400543113216, 140400551501823,
+STORE, 140400551501824, 140400559894527,
+STORE, 140400543113216, 140400551501823,
+SNULL, 140400551505919, 140400559894527,
+STORE, 140400551501824, 140400551505919,
+STORE, 140400551505920, 140400559894527,
+STORE, 140400140455936, 140400148848639,
+STORE, 140400048201728, 140400056594431,
+SNULL, 140400140460031, 140400148848639,
+STORE, 140400140455936, 140400140460031,
+STORE, 140400140460032, 140400148848639,
+STORE, 140400039809024, 140400056594431,
+SNULL, 140400039813119, 140400056594431,
+STORE, 140400039809024, 140400039813119,
+STORE, 140400039813120, 140400056594431,
+STORE, 140400031416320, 140400039809023,
+STORE, 140400023023616, 140400039809023,
+SNULL, 140400274677759, 140400283066367,
+STORE, 140400274673664, 140400274677759,
+STORE, 140400274677760, 140400283066367,
+STORE, 140400014630912, 140400039809023,
+STORE, 140400006238208, 140400039809023,
+STORE, 140399997845504, 140400039809023,
+SNULL, 140399997849599, 140400039809023,
+STORE, 140399997845504, 140399997849599,
+STORE, 140399997849600, 140400039809023,
+STORE, 140399989452800, 140399997845503,
+SNULL, 140399989456895, 140399997845503,
+STORE, 140399989452800, 140399989456895,
+STORE, 140399989456896, 140399997845503,
+STORE, 140399981060096, 140399989452799,
+SNULL, 140399981064191, 140399989452799,
+STORE, 140399981060096, 140399981064191,
+STORE, 140399981064192, 140399989452799,
+STORE, 140399972667392, 140399981060095,
+STORE, 140399964274688, 140399981060095,
+SNULL, 140399964278783, 140399981060095,
+STORE, 140399964274688, 140399964278783,
+STORE, 140399964278784, 140399981060095,
+SNULL, 140400039813120, 140400048201727,
+STORE, 140400048201728, 140400056594431,
+STORE, 140400039813120, 140400048201727,
+SNULL, 140400048205823, 140400056594431,
+STORE, 140400048201728, 140400048205823,
+STORE, 140400048205824, 140400056594431,
+SNULL, 140399997849600, 140400031416319,
+STORE, 140400031416320, 140400039809023,
+STORE, 140399997849600, 140400031416319,
+SNULL, 140400031420415, 140400039809023,
+STORE, 140400031416320, 140400031420415,
+STORE, 140400031420416, 140400039809023,
+STORE, 140399955881984, 140399964274687,
+SNULL, 140399955886079, 140399964274687,
+STORE, 140399955881984, 140399955886079,
+STORE, 140399955886080, 140399964274687,
+STORE, 140399947489280, 140399955881983,
+STORE, 140399939096576, 140399955881983,
+STORE, 140399855267840, 140399863660543,
+SNULL, 140399939100671, 140399955881983,
+STORE, 140399939096576, 140399939100671,
+STORE, 140399939100672, 140399955881983,
+SNULL, 140399997849600, 140400014630911,
+STORE, 140400014630912, 140400031416319,
+STORE, 140399997849600, 140400014630911,
+SNULL, 140400014635007, 140400031416319,
+STORE, 140400014630912, 140400014635007,
+STORE, 140400014635008, 140400031416319,
+SNULL, 140400014635008, 140400023023615,
+STORE, 140400023023616, 140400031416319,
+STORE, 140400014635008, 140400023023615,
+SNULL, 140400023027711, 140400031416319,
+STORE, 140400023023616, 140400023027711,
+STORE, 140400023027712, 140400031416319,
+SNULL, 140399997849600, 140400006238207,
+STORE, 140400006238208, 140400014630911,
+STORE, 140399997849600, 140400006238207,
+SNULL, 140400006242303, 140400014630911,
+STORE, 140400006238208, 140400006242303,
+STORE, 140400006242304, 140400014630911,
+STORE, 140399846875136, 140399863660543,
+STORE, 140399838482432, 140399863660543,
+SNULL, 140399838486527, 140399863660543,
+STORE, 140399838482432, 140399838486527,
+STORE, 140399838486528, 140399863660543,
+SNULL, 140399939100672, 140399947489279,
+STORE, 140399947489280, 140399955881983,
+STORE, 140399939100672, 140399947489279,
+SNULL, 140399947493375, 140399955881983,
+STORE, 140399947489280, 140399947493375,
+STORE, 140399947493376, 140399955881983,
+SNULL, 140399964278784, 140399972667391,
+STORE, 140399972667392, 140399981060095,
+STORE, 140399964278784, 140399972667391,
+SNULL, 140399972671487, 140399981060095,
+STORE, 140399972667392, 140399972671487,
+STORE, 140399972671488, 140399981060095,
+SNULL, 140399838486528, 140399855267839,
+STORE, 140399855267840, 140399863660543,
+STORE, 140399838486528, 140399855267839,
+SNULL, 140399855271935, 140399863660543,
+STORE, 140399855267840, 140399855271935,
+STORE, 140399855271936, 140399863660543,
+STORE, 140399830089728, 140399838482431,
+SNULL, 140399830093823, 140399838482431,
+STORE, 140399830089728, 140399830093823,
+STORE, 140399830093824, 140399838482431,
+STORE, 140399821697024, 140399830089727,
+SNULL, 140399821701119, 140399830089727,
+STORE, 140399821697024, 140399821701119,
+STORE, 140399821701120, 140399830089727,
+SNULL, 140399838486528, 140399846875135,
+STORE, 140399846875136, 140399855267839,
+STORE, 140399838486528, 140399846875135,
+SNULL, 140399846879231, 140399855267839,
+STORE, 140399846875136, 140399846879231,
+STORE, 140399846879232, 140399855267839,
+STORE, 140399813304320, 140399821697023,
+STORE, 140399804911616, 140399821697023,
+SNULL, 140399804915711, 140399821697023,
+STORE, 140399804911616, 140399804915711,
+STORE, 140399804915712, 140399821697023,
+STORE, 140399721050112, 140399729442815,
+SNULL, 140399804915712, 140399813304319,
+STORE, 140399813304320, 140399821697023,
+STORE, 140399804915712, 140399813304319,
+SNULL, 140399813308415, 140399821697023,
+STORE, 140399813304320, 140399813308415,
+STORE, 140399813308416, 140399821697023,
+SNULL, 140399721054207, 140399729442815,
+STORE, 140399721050112, 140399721054207,
+STORE, 140399721054208, 140399729442815,
+STORE, 140401467105280, 140401467133951,
+STORE, 140401279115264, 140401281306623,
+SNULL, 140401279115264, 140401279205375,
+STORE, 140401279205376, 140401281306623,
+STORE, 140401279115264, 140401279205375,
+SNULL, 140401281298431, 140401281306623,
+STORE, 140401279205376, 140401281298431,
+STORE, 140401281298432, 140401281306623,
+ERASE, 140401281298432, 140401281306623,
+STORE, 140401281298432, 140401281306623,
+SNULL, 140401281302527, 140401281306623,
+STORE, 140401281298432, 140401281302527,
+STORE, 140401281302528, 140401281306623,
+ERASE, 140401467105280, 140401467133951,
+ERASE, 140400056594432, 140400056598527,
+ERASE, 140400056598528, 140400064987135,
+ERASE, 140400635396096, 140400635400191,
+ERASE, 140400635400192, 140400643788799,
+ERASE, 140400408891392, 140400408895487,
+ERASE, 140400408895488, 140400417284095,
+ERASE, 140400299851776, 140400299855871,
+ERASE, 140400299855872, 140400308244479,
+ERASE, 140400627003392, 140400627007487,
+ERASE, 140400627007488, 140400635396095,
+ERASE, 140400954155008, 140400954159103,
+ERASE, 140400954159104, 140400962547711,
+ERASE, 140400291459072, 140400291463167,
+ERASE, 140400291463168, 140400299851775,
+ERASE, 140400643788800, 140400643792895,
+ERASE, 140400643792896, 140400652181503,
+ERASE, 140400325029888, 140400325033983,
+ERASE, 140400325033984, 140400333422591,
+ERASE, 140400610217984, 140400610222079,
+ERASE, 140400610222080, 140400618610687,
+ERASE, 140400190812160, 140400190816255,
+ERASE, 140400190816256, 140400199204863,
+ERASE, 140399964274688, 140399964278783,
+ERASE, 140399964278784, 140399972667391,
+ERASE, 140400945762304, 140400945766399,
+ERASE, 140400945766400, 140400954155007,
+ERASE, 140400568287232, 140400568291327,
+ERASE, 140400568291328, 140400576679935,
+ERASE, 140399972667392, 140399972671487,
+ERASE, 140399972671488, 140399981060095,
+ERASE, 140400962547712, 140400962551807,
+ERASE, 140400962551808, 140400970940415,
+ERASE, 140400987725824, 140400987729919,
+ERASE, 140400987729920, 140400996118527,
+ERASE, 140400652181504, 140400652185599,
+ERASE, 140400652185600, 140400660574207,
+ERASE, 140400450854912, 140400450859007,
+ERASE, 140400450859008, 140400459247615,
+ERASE, 140400031416320, 140400031420415,
+ERASE, 140400031420416, 140400039809023,
+ERASE, 140400308244480, 140400308248575,
+ERASE, 140400308248576, 140400316637183,
+ERASE, 140400434069504, 140400434073599,
+ERASE, 140400434073600, 140400442462207,
+ERASE, 140400543109120, 140400543113215,
+ERASE, 140400543113216, 140400551501823,
+ERASE, 140400023023616, 140400023027711,
+ERASE, 140400023027712, 140400031416319,
+ERASE, 140399813304320, 140399813308415,
+ERASE, 140399813308416, 140399821697023,
+ERASE, 140400316637184, 140400316641279,
+ERASE, 140400316641280, 140400325029887,
+ERASE, 140400585072640, 140400585076735,
+ERASE, 140400585076736, 140400593465343,
+ERASE, 140400148848640, 140400148852735,
+ERASE, 140400148852736, 140400157241343,
+ERASE, 140399955881984, 140399955886079,
+ERASE, 140399955886080, 140399964274687,
+ERASE, 140399821697024, 140399821701119,
+ERASE, 140399821701120, 140399830089727,
+ERASE, 140400601825280, 140400601829375,
+ERASE, 140400601829376, 140400610217983,
+ERASE, 140400979333120, 140400979337215,
+ERASE, 140400979337216, 140400987725823,
+ERASE, 140399997845504, 140399997849599,
+ERASE, 140399997849600, 140400006238207,
+ERASE, 140400459247616, 140400459251711,
+ERASE, 140400459251712, 140400467640319,
+ERASE, 140400551501824, 140400551505919,
+ERASE, 140400551505920, 140400559894527,
+ERASE, 140399939096576, 140399939100671,
+ERASE, 140399939100672, 140399947489279,
+ERASE, 140400442462208, 140400442466303,
+ERASE, 140400442466304, 140400450854911,
+ERASE, 140400576679936, 140400576684031,
+ERASE, 140400576684032, 140400585072639,
+ERASE, 140400559894528, 140400559898623,
+ERASE, 140400559898624, 140400568287231,
+ERASE, 140400417284096, 140400417288191,
+ERASE, 140400417288192, 140400425676799,
+ERASE, 140400283066368, 140400283070463,
+ERASE, 140400283070464, 140400291459071,
+ };
+ static const unsigned long set33[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734562918400, 140737488351231,
+SNULL, 140734562922495, 140737488351231,
+STORE, 140734562918400, 140734562922495,
+STORE, 140734562787328, 140734562922495,
+STORE, 94133878984704, 94133881237503,
+SNULL, 94133879115775, 94133881237503,
+STORE, 94133878984704, 94133879115775,
+STORE, 94133879115776, 94133881237503,
+ERASE, 94133879115776, 94133881237503,
+STORE, 94133881208832, 94133881217023,
+STORE, 94133881217024, 94133881237503,
+STORE, 140583654043648, 140583656296447,
+SNULL, 140583654187007, 140583656296447,
+STORE, 140583654043648, 140583654187007,
+STORE, 140583654187008, 140583656296447,
+ERASE, 140583654187008, 140583656296447,
+STORE, 140583656284160, 140583656292351,
+STORE, 140583656292352, 140583656296447,
+STORE, 140734564319232, 140734564323327,
+STORE, 140734564306944, 140734564319231,
+STORE, 140583656255488, 140583656284159,
+STORE, 140583656247296, 140583656255487,
+STORE, 140583651827712, 140583654043647,
+SNULL, 140583651827712, 140583651926015,
+STORE, 140583651926016, 140583654043647,
+STORE, 140583651827712, 140583651926015,
+SNULL, 140583654019071, 140583654043647,
+STORE, 140583651926016, 140583654019071,
+STORE, 140583654019072, 140583654043647,
+SNULL, 140583654019072, 140583654027263,
+STORE, 140583654027264, 140583654043647,
+STORE, 140583654019072, 140583654027263,
+ERASE, 140583654019072, 140583654027263,
+STORE, 140583654019072, 140583654027263,
+ERASE, 140583654027264, 140583654043647,
+STORE, 140583654027264, 140583654043647,
+STORE, 140583648030720, 140583651827711,
+SNULL, 140583648030720, 140583649689599,
+STORE, 140583649689600, 140583651827711,
+STORE, 140583648030720, 140583649689599,
+SNULL, 140583651786751, 140583651827711,
+STORE, 140583649689600, 140583651786751,
+STORE, 140583651786752, 140583651827711,
+SNULL, 140583651786752, 140583651811327,
+STORE, 140583651811328, 140583651827711,
+STORE, 140583651786752, 140583651811327,
+ERASE, 140583651786752, 140583651811327,
+STORE, 140583651786752, 140583651811327,
+ERASE, 140583651811328, 140583651827711,
+STORE, 140583651811328, 140583651827711,
+STORE, 140583656239104, 140583656255487,
+SNULL, 140583651803135, 140583651811327,
+STORE, 140583651786752, 140583651803135,
+STORE, 140583651803136, 140583651811327,
+SNULL, 140583654023167, 140583654027263,
+STORE, 140583654019072, 140583654023167,
+STORE, 140583654023168, 140583654027263,
+SNULL, 94133881212927, 94133881217023,
+STORE, 94133881208832, 94133881212927,
+STORE, 94133881212928, 94133881217023,
+SNULL, 140583656288255, 140583656292351,
+STORE, 140583656284160, 140583656288255,
+STORE, 140583656288256, 140583656292351,
+ERASE, 140583656255488, 140583656284159,
+STORE, 94133881733120, 94133881868287,
+STORE, 140583639638016, 140583648030719,
+SNULL, 140583639642111, 140583648030719,
+STORE, 140583639638016, 140583639642111,
+STORE, 140583639642112, 140583648030719,
+STORE, 140583631245312, 140583639638015,
+STORE, 140583497027584, 140583631245311,
+SNULL, 140583497027584, 140583540621311,
+STORE, 140583540621312, 140583631245311,
+STORE, 140583497027584, 140583540621311,
+ERASE, 140583497027584, 140583540621311,
+SNULL, 140583607730175, 140583631245311,
+STORE, 140583540621312, 140583607730175,
+STORE, 140583607730176, 140583631245311,
+ERASE, 140583607730176, 140583631245311,
+SNULL, 140583540756479, 140583607730175,
+STORE, 140583540621312, 140583540756479,
+STORE, 140583540756480, 140583607730175,
+SNULL, 140583631249407, 140583639638015,
+STORE, 140583631245312, 140583631249407,
+STORE, 140583631249408, 140583639638015,
+STORE, 140583622852608, 140583631245311,
+SNULL, 140583622856703, 140583631245311,
+STORE, 140583622852608, 140583622856703,
+STORE, 140583622856704, 140583631245311,
+STORE, 140583614459904, 140583622852607,
+SNULL, 140583614463999, 140583622852607,
+STORE, 140583614459904, 140583614463999,
+STORE, 140583614464000, 140583622852607,
+STORE, 140583532228608, 140583540621311,
+SNULL, 140583532232703, 140583540621311,
+STORE, 140583532228608, 140583532232703,
+STORE, 140583532232704, 140583540621311,
+STORE, 140583523835904, 140583532228607,
+STORE, 140583515443200, 140583532228607,
+STORE, 140583507050496, 140583532228607,
+STORE, 140583372832768, 140583507050495,
+STORE, 140583364440064, 140583372832767,
+STORE, 140583230222336, 140583364440063,
+STORE, 140583096004608, 140583364440063,
+SNULL, 140583230222335, 140583364440063,
+STORE, 140583096004608, 140583230222335,
+STORE, 140583230222336, 140583364440063,
+SNULL, 140583230222336, 140583272185855,
+STORE, 140583272185856, 140583364440063,
+STORE, 140583230222336, 140583272185855,
+ERASE, 140583230222336, 140583272185855,
+STORE, 140582961786880, 140583230222335,
+SNULL, 140583372832768, 140583406403583,
+STORE, 140583406403584, 140583507050495,
+STORE, 140583372832768, 140583406403583,
+ERASE, 140583372832768, 140583406403583,
+SNULL, 140583473512447, 140583507050495,
+STORE, 140583406403584, 140583473512447,
+STORE, 140583473512448, 140583507050495,
+ERASE, 140583473512448, 140583507050495,
+SNULL, 140583096004607, 140583230222335,
+STORE, 140582961786880, 140583096004607,
+STORE, 140583096004608, 140583230222335,
+SNULL, 140583096004608, 140583137968127,
+STORE, 140583137968128, 140583230222335,
+STORE, 140583096004608, 140583137968127,
+ERASE, 140583096004608, 140583137968127,
+SNULL, 140583339294719, 140583364440063,
+STORE, 140583272185856, 140583339294719,
+STORE, 140583339294720, 140583364440063,
+ERASE, 140583339294720, 140583364440063,
+SNULL, 140583272321023, 140583339294719,
+STORE, 140583272185856, 140583272321023,
+STORE, 140583272321024, 140583339294719,
+SNULL, 140582961786880, 140583003750399,
+STORE, 140583003750400, 140583096004607,
+STORE, 140582961786880, 140583003750399,
+ERASE, 140582961786880, 140583003750399,
+ };
+
+ static const unsigned long set34[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731327180800, 140737488351231,
+SNULL, 140731327184895, 140737488351231,
+STORE, 140731327180800, 140731327184895,
+STORE, 140731327049728, 140731327184895,
+STORE, 94632924487680, 94632926740479,
+SNULL, 94632924618751, 94632926740479,
+STORE, 94632924487680, 94632924618751,
+STORE, 94632924618752, 94632926740479,
+ERASE, 94632924618752, 94632926740479,
+STORE, 94632926711808, 94632926719999,
+STORE, 94632926720000, 94632926740479,
+STORE, 140012544888832, 140012547141631,
+SNULL, 140012545032191, 140012547141631,
+STORE, 140012544888832, 140012545032191,
+STORE, 140012545032192, 140012547141631,
+ERASE, 140012545032192, 140012547141631,
+STORE, 140012547129344, 140012547137535,
+STORE, 140012547137536, 140012547141631,
+STORE, 140731327725568, 140731327729663,
+STORE, 140731327713280, 140731327725567,
+STORE, 140012547100672, 140012547129343,
+STORE, 140012547092480, 140012547100671,
+STORE, 140012542672896, 140012544888831,
+SNULL, 140012542672896, 140012542771199,
+STORE, 140012542771200, 140012544888831,
+STORE, 140012542672896, 140012542771199,
+SNULL, 140012544864255, 140012544888831,
+STORE, 140012542771200, 140012544864255,
+STORE, 140012544864256, 140012544888831,
+SNULL, 140012544864256, 140012544872447,
+STORE, 140012544872448, 140012544888831,
+STORE, 140012544864256, 140012544872447,
+ERASE, 140012544864256, 140012544872447,
+STORE, 140012544864256, 140012544872447,
+ERASE, 140012544872448, 140012544888831,
+STORE, 140012544872448, 140012544888831,
+STORE, 140012538875904, 140012542672895,
+SNULL, 140012538875904, 140012540534783,
+STORE, 140012540534784, 140012542672895,
+STORE, 140012538875904, 140012540534783,
+SNULL, 140012542631935, 140012542672895,
+STORE, 140012540534784, 140012542631935,
+STORE, 140012542631936, 140012542672895,
+SNULL, 140012542631936, 140012542656511,
+STORE, 140012542656512, 140012542672895,
+STORE, 140012542631936, 140012542656511,
+ERASE, 140012542631936, 140012542656511,
+STORE, 140012542631936, 140012542656511,
+ERASE, 140012542656512, 140012542672895,
+STORE, 140012542656512, 140012542672895,
+STORE, 140012547084288, 140012547100671,
+SNULL, 140012542648319, 140012542656511,
+STORE, 140012542631936, 140012542648319,
+STORE, 140012542648320, 140012542656511,
+SNULL, 140012544868351, 140012544872447,
+STORE, 140012544864256, 140012544868351,
+STORE, 140012544868352, 140012544872447,
+SNULL, 94632926715903, 94632926719999,
+STORE, 94632926711808, 94632926715903,
+STORE, 94632926715904, 94632926719999,
+SNULL, 140012547133439, 140012547137535,
+STORE, 140012547129344, 140012547133439,
+STORE, 140012547133440, 140012547137535,
+ERASE, 140012547100672, 140012547129343,
+STORE, 94632939606016, 94632939741183,
+STORE, 140012530483200, 140012538875903,
+SNULL, 140012530487295, 140012538875903,
+STORE, 140012530483200, 140012530487295,
+STORE, 140012530487296, 140012538875903,
+STORE, 140012522090496, 140012530483199,
+STORE, 140012387872768, 140012522090495,
+SNULL, 140012387872768, 140012444188671,
+STORE, 140012444188672, 140012522090495,
+STORE, 140012387872768, 140012444188671,
+ERASE, 140012387872768, 140012444188671,
+SNULL, 140012511297535, 140012522090495,
+STORE, 140012444188672, 140012511297535,
+STORE, 140012511297536, 140012522090495,
+ERASE, 140012511297536, 140012522090495,
+SNULL, 140012444323839, 140012511297535,
+STORE, 140012444188672, 140012444323839,
+STORE, 140012444323840, 140012511297535,
+SNULL, 140012522094591, 140012530483199,
+STORE, 140012522090496, 140012522094591,
+STORE, 140012522094592, 140012530483199,
+STORE, 140012513697792, 140012522090495,
+SNULL, 140012513701887, 140012522090495,
+STORE, 140012513697792, 140012513701887,
+STORE, 140012513701888, 140012522090495,
+STORE, 140012435795968, 140012444188671,
+SNULL, 140012435800063, 140012444188671,
+STORE, 140012435795968, 140012435800063,
+STORE, 140012435800064, 140012444188671,
+STORE, 140012427403264, 140012435795967,
+SNULL, 140012427407359, 140012435795967,
+STORE, 140012427403264, 140012427407359,
+STORE, 140012427407360, 140012435795967,
+STORE, 140012419010560, 140012427403263,
+STORE, 140012410617856, 140012427403263,
+STORE, 140012276400128, 140012410617855,
+STORE, 140012268007424, 140012276400127,
+STORE, 140012133789696, 140012268007423,
+SNULL, 140012133789696, 140012175753215,
+STORE, 140012175753216, 140012268007423,
+STORE, 140012133789696, 140012175753215,
+ERASE, 140012133789696, 140012175753215,
+STORE, 140012041535488, 140012268007423,
+SNULL, 140012108644351, 140012268007423,
+STORE, 140012041535488, 140012108644351,
+STORE, 140012108644352, 140012268007423,
+SNULL, 140012108644352, 140012175753215,
+STORE, 140012175753216, 140012268007423,
+STORE, 140012108644352, 140012175753215,
+ERASE, 140012108644352, 140012175753215,
+SNULL, 140012276400128, 140012309970943,
+STORE, 140012309970944, 140012410617855,
+STORE, 140012276400128, 140012309970943,
+ERASE, 140012276400128, 140012309970943,
+STORE, 140012301578240, 140012309970943,
+STORE, 140012041535488, 140012268007423,
+SNULL, 140012242862079, 140012268007423,
+STORE, 140012041535488, 140012242862079,
+STORE, 140012242862080, 140012268007423,
+ERASE, 140012242862080, 140012268007423,
+SNULL, 140012041670655, 140012242862079,
+STORE, 140012041535488, 140012041670655,
+STORE, 140012041670656, 140012242862079,
+SNULL, 140012041670656, 140012108644351,
+STORE, 140012108644352, 140012242862079,
+STORE, 140012041670656, 140012108644351,
+SNULL, 140012108779519, 140012242862079,
+STORE, 140012108644352, 140012108779519,
+STORE, 140012108779520, 140012242862079,
+SNULL, 140012377079807, 140012410617855,
+STORE, 140012309970944, 140012377079807,
+STORE, 140012377079808, 140012410617855,
+ERASE, 140012377079808, 140012410617855,
+SNULL, 140012310106111, 140012377079807,
+STORE, 140012309970944, 140012310106111,
+STORE, 140012310106112, 140012377079807,
+SNULL, 140012410621951, 140012427403263,
+STORE, 140012410617856, 140012410621951,
+STORE, 140012410621952, 140012427403263,
+SNULL, 140012108779520, 140012175753215,
+STORE, 140012175753216, 140012242862079,
+STORE, 140012108779520, 140012175753215,
+SNULL, 140012175888383, 140012242862079,
+STORE, 140012175753216, 140012175888383,
+STORE, 140012175888384, 140012242862079,
+SNULL, 140012301582335, 140012309970943,
+STORE, 140012301578240, 140012301582335,
+STORE, 140012301582336, 140012309970943,
+SNULL, 140012410621952, 140012419010559,
+STORE, 140012419010560, 140012427403263,
+STORE, 140012410621952, 140012419010559,
+SNULL, 140012419014655, 140012427403263,
+STORE, 140012419010560, 140012419014655,
+STORE, 140012419014656, 140012427403263,
+SNULL, 140012268011519, 140012276400127,
+STORE, 140012268007424, 140012268011519,
+STORE, 140012268011520, 140012276400127,
+STORE, 140012402225152, 140012410617855,
+STORE, 140012393832448, 140012410617855,
+SNULL, 140012393832448, 140012402225151,
+STORE, 140012402225152, 140012410617855,
+STORE, 140012393832448, 140012402225151,
+SNULL, 140012402229247, 140012410617855,
+STORE, 140012402225152, 140012402229247,
+STORE, 140012402229248, 140012410617855,
+STORE, 140012385439744, 140012402225151,
+SNULL, 140012385439744, 140012393832447,
+STORE, 140012393832448, 140012402225151,
+STORE, 140012385439744, 140012393832447,
+SNULL, 140012393836543, 140012402225151,
+STORE, 140012393832448, 140012393836543,
+STORE, 140012393836544, 140012402225151,
+STORE, 140012293185536, 140012301578239,
+STORE, 140012284792832, 140012301578239,
+SNULL, 140012284792832, 140012293185535,
+STORE, 140012293185536, 140012301578239,
+STORE, 140012284792832, 140012293185535,
+SNULL, 140012293189631, 140012301578239,
+STORE, 140012293185536, 140012293189631,
+STORE, 140012293189632, 140012301578239,
+STORE, 140012268011520, 140012284792831,
+SNULL, 140012385443839, 140012393832447,
+STORE, 140012385439744, 140012385443839,
+STORE, 140012385443840, 140012393832447,
+STORE, 140012259614720, 140012268007423,
+SNULL, 140012259618815, 140012268007423,
+STORE, 140012259614720, 140012259618815,
+STORE, 140012259618816, 140012268007423,
+STORE, 140012251222016, 140012259614719,
+SNULL, 140012251226111, 140012259614719,
+STORE, 140012251222016, 140012251226111,
+STORE, 140012251226112, 140012259614719,
+SNULL, 140012284796927, 140012293185535,
+STORE, 140012284792832, 140012284796927,
+STORE, 140012284796928, 140012293185535,
+SNULL, 140012268011520, 140012276400127,
+STORE, 140012276400128, 140012284792831,
+STORE, 140012268011520, 140012276400127,
+SNULL, 140012276404223, 140012284792831,
+STORE, 140012276400128, 140012276404223,
+STORE, 140012276404224, 140012284792831,
+STORE, 140012033142784, 140012041535487,
+SNULL, 140012033146879, 140012041535487,
+STORE, 140012033142784, 140012033146879,
+STORE, 140012033146880, 140012041535487,
+STORE, 140012024750080, 140012033142783,
+STORE, 140012016357376, 140012033142783,
+SNULL, 140012016357376, 140012024750079,
+STORE, 140012024750080, 140012033142783,
+STORE, 140012016357376, 140012024750079,
+SNULL, 140012024754175, 140012033142783,
+STORE, 140012024750080, 140012024754175,
+STORE, 140012024754176, 140012033142783,
+SNULL, 140012016361471, 140012024750079,
+STORE, 140012016357376, 140012016361471,
+STORE, 140012016361472, 140012024750079,
+STORE, 140012007964672, 140012016357375,
+SNULL, 140012007968767, 140012016357375,
+STORE, 140012007964672, 140012007968767,
+STORE, 140012007968768, 140012016357375,
+STORE, 140011999571968, 140012007964671,
+STORE, 140011991179264, 140012007964671,
+STORE, 140011856961536, 140011991179263,
+STORE, 140011848568832, 140011856961535,
+STORE, 140011714351104, 140011848568831,
+SNULL, 140011714351104, 140011773100031,
+STORE, 140011773100032, 140011848568831,
+STORE, 140011714351104, 140011773100031,
+ERASE, 140011714351104, 140011773100031,
+STORE, 140011764707328, 140011773100031,
+STORE, 140011756314624, 140011773100031,
+STORE, 140011622096896, 140011756314623,
+STORE, 140011613704192, 140011622096895,
+STORE, 140011479486464, 140011613704191,
+STORE, 140011471093760, 140011479486463,
+SNULL, 140011479486464, 140011504664575,
+STORE, 140011504664576, 140011613704191,
+STORE, 140011479486464, 140011504664575,
+ERASE, 140011479486464, 140011504664575,
+STORE, 140011496271872, 140011504664575,
+STORE, 140011487879168, 140011504664575,
+STORE, 140011336876032, 140011471093759,
+SNULL, 140011336876032, 140011370446847,
+STORE, 140011370446848, 140011471093759,
+STORE, 140011336876032, 140011370446847,
+ERASE, 140011336876032, 140011370446847,
+STORE, 140011471093760, 140011487879167,
+STORE, 140011362054144, 140011370446847,
+SNULL, 140011362058239, 140011370446847,
+STORE, 140011362054144, 140011362058239,
+STORE, 140011362058240, 140011370446847,
+STORE, 140011353661440, 140011362054143,
+STORE, 140011345268736, 140011362054143,
+SNULL, 140011345272831, 140011362054143,
+STORE, 140011345268736, 140011345272831,
+STORE, 140011345272832, 140011362054143,
+STORE, 140011336876032, 140011345268735,
+STORE, 140011328483328, 140011345268735,
+SNULL, 140011328487423, 140011345268735,
+STORE, 140011328483328, 140011328487423,
+STORE, 140011328487424, 140011345268735,
+STORE, 140011320090624, 140011328483327,
+STORE, 140011185872896, 140011320090623,
+SNULL, 140011185872896, 140011236229119,
+STORE, 140011236229120, 140011320090623,
+STORE, 140011185872896, 140011236229119,
+ERASE, 140011185872896, 140011236229119,
+SNULL, 140011856961536, 140011907317759,
+STORE, 140011907317760, 140011991179263,
+STORE, 140011856961536, 140011907317759,
+ERASE, 140011856961536, 140011907317759,
+SNULL, 140011974426623, 140011991179263,
+STORE, 140011907317760, 140011974426623,
+STORE, 140011974426624, 140011991179263,
+ERASE, 140011974426624, 140011991179263,
+SNULL, 140011840208895, 140011848568831,
+STORE, 140011773100032, 140011840208895,
+STORE, 140011840208896, 140011848568831,
+ERASE, 140011840208896, 140011848568831,
+SNULL, 140011773235199, 140011840208895,
+STORE, 140011773100032, 140011773235199,
+STORE, 140011773235200, 140011840208895,
+STORE, 140011102011392, 140011320090623,
+SNULL, 140011169120255, 140011320090623,
+STORE, 140011102011392, 140011169120255,
+STORE, 140011169120256, 140011320090623,
+SNULL, 140011169120256, 140011236229119,
+STORE, 140011236229120, 140011320090623,
+STORE, 140011169120256, 140011236229119,
+ERASE, 140011169120256, 140011236229119,
+SNULL, 140011622096896, 140011638882303,
+STORE, 140011638882304, 140011756314623,
+STORE, 140011622096896, 140011638882303,
+ERASE, 140011622096896, 140011638882303,
+SNULL, 140011705991167, 140011756314623,
+STORE, 140011638882304, 140011705991167,
+STORE, 140011705991168, 140011756314623,
+ERASE, 140011705991168, 140011756314623,
+SNULL, 140011571773439, 140011613704191,
+STORE, 140011504664576, 140011571773439,
+STORE, 140011571773440, 140011613704191,
+ERASE, 140011571773440, 140011613704191,
+STORE, 140010967793664, 140011169120255,
+SNULL, 140011034902527, 140011169120255,
+STORE, 140010967793664, 140011034902527,
+STORE, 140011034902528, 140011169120255,
+SNULL, 140011034902528, 140011102011391,
+STORE, 140011102011392, 140011169120255,
+STORE, 140011034902528, 140011102011391,
+ERASE, 140011034902528, 140011102011391,
+STORE, 140010833575936, 140011034902527,
+SNULL, 140011437555711, 140011471093759,
+STORE, 140011370446848, 140011437555711,
+STORE, 140011437555712, 140011471093759,
+ERASE, 140011437555712, 140011471093759,
+SNULL, 140011370582015, 140011437555711,
+STORE, 140011370446848, 140011370582015,
+STORE, 140011370582016, 140011437555711,
+STORE, 140010699358208, 140011034902527,
+SNULL, 140011487883263, 140011504664575,
+STORE, 140011487879168, 140011487883263,
+STORE, 140011487883264, 140011504664575,
+SNULL, 140011345272832, 140011353661439,
+STORE, 140011353661440, 140011362054143,
+STORE, 140011345272832, 140011353661439,
+SNULL, 140011353665535, 140011362054143,
+STORE, 140011353661440, 140011353665535,
+STORE, 140011353665536, 140011362054143,
+SNULL, 140011328487424, 140011336876031,
+STORE, 140011336876032, 140011345268735,
+STORE, 140011328487424, 140011336876031,
+SNULL, 140011336880127, 140011345268735,
+STORE, 140011336876032, 140011336880127,
+STORE, 140011336880128, 140011345268735,
+SNULL, 140011303337983, 140011320090623,
+STORE, 140011236229120, 140011303337983,
+STORE, 140011303337984, 140011320090623,
+ERASE, 140011303337984, 140011320090623,
+SNULL, 140011907452927, 140011974426623,
+STORE, 140011907317760, 140011907452927,
+STORE, 140011907452928, 140011974426623,
+SNULL, 140011102146559, 140011169120255,
+STORE, 140011102011392, 140011102146559,
+STORE, 140011102146560, 140011169120255,
+SNULL, 140011639017471, 140011705991167,
+STORE, 140011638882304, 140011639017471,
+STORE, 140011639017472, 140011705991167,
+SNULL, 140011504799743, 140011571773439,
+STORE, 140011504664576, 140011504799743,
+STORE, 140011504799744, 140011571773439,
+SNULL, 140011613708287, 140011622096895,
+STORE, 140011613704192, 140011613708287,
+STORE, 140011613708288, 140011622096895,
+SNULL, 140010699358208, 140010967793663,
+STORE, 140010967793664, 140011034902527,
+STORE, 140010699358208, 140010967793663,
+SNULL, 140010967928831, 140011034902527,
+STORE, 140010967793664, 140010967928831,
+STORE, 140010967928832, 140011034902527,
+SNULL, 140010900684799, 140010967793663,
+STORE, 140010699358208, 140010900684799,
+STORE, 140010900684800, 140010967793663,
+ERASE, 140010900684800, 140010967793663,
+SNULL, 140010766467071, 140010900684799,
+STORE, 140010699358208, 140010766467071,
+STORE, 140010766467072, 140010900684799,
+SNULL, 140010766467072, 140010833575935,
+STORE, 140010833575936, 140010900684799,
+STORE, 140010766467072, 140010833575935,
+ERASE, 140010766467072, 140010833575935,
+SNULL, 140010699493375, 140010766467071,
+STORE, 140010699358208, 140010699493375,
+STORE, 140010699493376, 140010766467071,
+SNULL, 140011848572927, 140011856961535,
+STORE, 140011848568832, 140011848572927,
+STORE, 140011848572928, 140011856961535,
+STORE, 140011982786560, 140012007964671,
+STORE, 140011898925056, 140011907317759,
+SNULL, 140011898929151, 140011907317759,
+STORE, 140011898925056, 140011898929151,
+STORE, 140011898929152, 140011907317759,
+SNULL, 140011320094719, 140011328483327,
+STORE, 140011320090624, 140011320094719,
+STORE, 140011320094720, 140011328483327,
+STORE, 140011890532352, 140011898925055,
+STORE, 140011882139648, 140011898925055,
+SNULL, 140011882143743, 140011898925055,
+STORE, 140011882139648, 140011882143743,
+STORE, 140011882143744, 140011898925055,
+STORE, 140011873746944, 140011882139647,
+SNULL, 140011873751039, 140011882139647,
+STORE, 140011873746944, 140011873751039,
+STORE, 140011873751040, 140011882139647,
+SNULL, 140011236364287, 140011303337983,
+STORE, 140011236229120, 140011236364287,
+STORE, 140011236364288, 140011303337983,
+SNULL, 140011756318719, 140011773100031,
+STORE, 140011756314624, 140011756318719,
+STORE, 140011756318720, 140011773100031,
+SNULL, 140011756318720, 140011764707327,
+STORE, 140011764707328, 140011773100031,
+STORE, 140011756318720, 140011764707327,
+SNULL, 140011764711423, 140011773100031,
+STORE, 140011764707328, 140011764711423,
+STORE, 140011764711424, 140011773100031,
+SNULL, 140011471097855, 140011487879167,
+STORE, 140011471093760, 140011471097855,
+STORE, 140011471097856, 140011487879167,
+SNULL, 140010833711103, 140010900684799,
+STORE, 140010833575936, 140010833711103,
+STORE, 140010833711104, 140010900684799,
+SNULL, 140011982790655, 140012007964671,
+STORE, 140011982786560, 140011982790655,
+STORE, 140011982790656, 140012007964671,
+STORE, 140011865354240, 140011873746943,
+STORE, 140011848572928, 140011865354239,
+SNULL, 140011848572928, 140011856961535,
+STORE, 140011856961536, 140011865354239,
+STORE, 140011848572928, 140011856961535,
+SNULL, 140011856965631, 140011865354239,
+STORE, 140011856961536, 140011856965631,
+STORE, 140011856965632, 140011865354239,
+STORE, 140011747921920, 140011756314623,
+STORE, 140011739529216, 140011756314623,
+SNULL, 140011471097856, 140011479486463,
+STORE, 140011479486464, 140011487879167,
+STORE, 140011471097856, 140011479486463,
+SNULL, 140011479490559, 140011487879167,
+STORE, 140011479486464, 140011479490559,
+STORE, 140011479490560, 140011487879167,
+STORE, 140011731136512, 140011756314623,
+STORE, 140011722743808, 140011756314623,
+SNULL, 140011982790656, 140011999571967,
+STORE, 140011999571968, 140012007964671,
+STORE, 140011982790656, 140011999571967,
+SNULL, 140011999576063, 140012007964671,
+STORE, 140011999571968, 140011999576063,
+STORE, 140011999576064, 140012007964671,
+STORE, 140011714351104, 140011756314623,
+SNULL, 140011882143744, 140011890532351,
+STORE, 140011890532352, 140011898925055,
+STORE, 140011882143744, 140011890532351,
+SNULL, 140011890536447, 140011898925055,
+STORE, 140011890532352, 140011890536447,
+STORE, 140011890536448, 140011898925055,
+STORE, 140011630489600, 140011638882303,
+STORE, 140011613708288, 140011638882303,
+STORE, 140011605311488, 140011613704191,
+STORE, 140011596918784, 140011613704191,
+STORE, 140011588526080, 140011613704191,
+SNULL, 140011487883264, 140011496271871,
+STORE, 140011496271872, 140011504664575,
+STORE, 140011487883264, 140011496271871,
+SNULL, 140011496275967, 140011504664575,
+STORE, 140011496271872, 140011496275967,
+STORE, 140011496275968, 140011504664575,
+STORE, 140011580133376, 140011613704191,
+SNULL, 140011580137471, 140011613704191,
+STORE, 140011580133376, 140011580137471,
+STORE, 140011580137472, 140011613704191,
+SNULL, 140011982790656, 140011991179263,
+STORE, 140011991179264, 140011999571967,
+STORE, 140011982790656, 140011991179263,
+SNULL, 140011991183359, 140011999571967,
+STORE, 140011991179264, 140011991183359,
+STORE, 140011991183360, 140011999571967,
+SNULL, 140011865358335, 140011873746943,
+STORE, 140011865354240, 140011865358335,
+STORE, 140011865358336, 140011873746943,
+STORE, 140011462701056, 140011471093759,
+SNULL, 140011714351104, 140011739529215,
+STORE, 140011739529216, 140011756314623,
+STORE, 140011714351104, 140011739529215,
+SNULL, 140011739533311, 140011756314623,
+STORE, 140011739529216, 140011739533311,
+STORE, 140011739533312, 140011756314623,
+SNULL, 140011739533312, 140011747921919,
+STORE, 140011747921920, 140011756314623,
+STORE, 140011739533312, 140011747921919,
+SNULL, 140011747926015, 140011756314623,
+STORE, 140011747921920, 140011747926015,
+STORE, 140011747926016, 140011756314623,
+SNULL, 140011613708288, 140011630489599,
+STORE, 140011630489600, 140011638882303,
+STORE, 140011613708288, 140011630489599,
+SNULL, 140011630493695, 140011638882303,
+STORE, 140011630489600, 140011630493695,
+STORE, 140011630493696, 140011638882303,
+SNULL, 140011714351104, 140011722743807,
+STORE, 140011722743808, 140011739529215,
+STORE, 140011714351104, 140011722743807,
+SNULL, 140011722747903, 140011739529215,
+STORE, 140011722743808, 140011722747903,
+STORE, 140011722747904, 140011739529215,
+SNULL, 140011714355199, 140011722743807,
+STORE, 140011714351104, 140011714355199,
+STORE, 140011714355200, 140011722743807,
+SNULL, 140011722747904, 140011731136511,
+STORE, 140011731136512, 140011739529215,
+STORE, 140011722747904, 140011731136511,
+SNULL, 140011731140607, 140011739529215,
+STORE, 140011731136512, 140011731140607,
+STORE, 140011731140608, 140011739529215,
+STORE, 140011454308352, 140011471093759,
+STORE, 140011445915648, 140011471093759,
+SNULL, 140011580137472, 140011588526079,
+STORE, 140011588526080, 140011613704191,
+STORE, 140011580137472, 140011588526079,
+SNULL, 140011588530175, 140011613704191,
+STORE, 140011588526080, 140011588530175,
+STORE, 140011588530176, 140011613704191,
+SNULL, 140011445915648, 140011462701055,
+STORE, 140011462701056, 140011471093759,
+STORE, 140011445915648, 140011462701055,
+SNULL, 140011462705151, 140011471093759,
+STORE, 140011462701056, 140011462705151,
+STORE, 140011462705152, 140011471093759,
+SNULL, 140011588530176, 140011596918783,
+STORE, 140011596918784, 140011613704191,
+STORE, 140011588530176, 140011596918783,
+SNULL, 140011596922879, 140011613704191,
+STORE, 140011596918784, 140011596922879,
+STORE, 140011596922880, 140011613704191,
+SNULL, 140011596922880, 140011605311487,
+STORE, 140011605311488, 140011613704191,
+STORE, 140011596922880, 140011605311487,
+SNULL, 140011605315583, 140011613704191,
+STORE, 140011605311488, 140011605315583,
+STORE, 140011605315584, 140011613704191,
+SNULL, 140011613708288, 140011622096895,
+STORE, 140011622096896, 140011630489599,
+STORE, 140011613708288, 140011622096895,
+SNULL, 140011622100991, 140011630489599,
+STORE, 140011622096896, 140011622100991,
+STORE, 140011622100992, 140011630489599,
+STORE, 140011311697920, 140011320090623,
+STORE, 140011227836416, 140011236229119,
+STORE, 140011219443712, 140011236229119,
+SNULL, 140011219447807, 140011236229119,
+STORE, 140011219443712, 140011219447807,
+STORE, 140011219447808, 140011236229119,
+STORE, 140011211051008, 140011219443711,
+STORE, 140011202658304, 140011219443711,
+SNULL, 140011202662399, 140011219443711,
+STORE, 140011202658304, 140011202662399,
+STORE, 140011202662400, 140011219443711,
+STORE, 140011194265600, 140011202658303,
+STORE, 140011185872896, 140011202658303,
+STORE, 140011177480192, 140011202658303,
+STORE, 140011093618688, 140011102011391,
+SNULL, 140011445915648, 140011454308351,
+STORE, 140011454308352, 140011462701055,
+STORE, 140011445915648, 140011454308351,
+SNULL, 140011454312447, 140011462701055,
+STORE, 140011454308352, 140011454312447,
+STORE, 140011454312448, 140011462701055,
+STORE, 140011085225984, 140011102011391,
+SNULL, 140011085230079, 140011102011391,
+STORE, 140011085225984, 140011085230079,
+STORE, 140011085230080, 140011102011391,
+SNULL, 140011177484287, 140011202658303,
+STORE, 140011177480192, 140011177484287,
+STORE, 140011177484288, 140011202658303,
+SNULL, 140011445919743, 140011454308351,
+STORE, 140011445915648, 140011445919743,
+STORE, 140011445919744, 140011454308351,
+SNULL, 140011177484288, 140011185872895,
+STORE, 140011185872896, 140011202658303,
+STORE, 140011177484288, 140011185872895,
+SNULL, 140011185876991, 140011202658303,
+STORE, 140011185872896, 140011185876991,
+STORE, 140011185876992, 140011202658303,
+STORE, 140011076833280, 140011085225983,
+SNULL, 140011202662400, 140011211051007,
+STORE, 140011211051008, 140011219443711,
+STORE, 140011202662400, 140011211051007,
+SNULL, 140011211055103, 140011219443711,
+STORE, 140011211051008, 140011211055103,
+STORE, 140011211055104, 140011219443711,
+SNULL, 140011185876992, 140011194265599,
+STORE, 140011194265600, 140011202658303,
+STORE, 140011185876992, 140011194265599,
+SNULL, 140011194269695, 140011202658303,
+STORE, 140011194265600, 140011194269695,
+STORE, 140011194269696, 140011202658303,
+STORE, 140011068440576, 140011085225983,
+SNULL, 140011311702015, 140011320090623,
+STORE, 140011311697920, 140011311702015,
+STORE, 140011311702016, 140011320090623,
+STORE, 140011060047872, 140011085225983,
+SNULL, 140011060051967, 140011085225983,
+STORE, 140011060047872, 140011060051967,
+STORE, 140011060051968, 140011085225983,
+STORE, 140011051655168, 140011060047871,
+STORE, 140011043262464, 140011060047871,
+SNULL, 140011043266559, 140011060047871,
+STORE, 140011043262464, 140011043266559,
+STORE, 140011043266560, 140011060047871,
+SNULL, 140011219447808, 140011227836415,
+STORE, 140011227836416, 140011236229119,
+STORE, 140011219447808, 140011227836415,
+SNULL, 140011227840511, 140011236229119,
+STORE, 140011227836416, 140011227840511,
+STORE, 140011227840512, 140011236229119,
+SNULL, 140011085230080, 140011093618687,
+STORE, 140011093618688, 140011102011391,
+STORE, 140011085230080, 140011093618687,
+SNULL, 140011093622783, 140011102011391,
+STORE, 140011093618688, 140011093622783,
+STORE, 140011093622784, 140011102011391,
+STORE, 140010959400960, 140010967793663,
+STORE, 140010951008256, 140010967793663,
+SNULL, 140010951008256, 140010959400959,
+STORE, 140010959400960, 140010967793663,
+STORE, 140010951008256, 140010959400959,
+SNULL, 140010959405055, 140010967793663,
+STORE, 140010959400960, 140010959405055,
+STORE, 140010959405056, 140010967793663,
+STORE, 140010942615552, 140010959400959,
+STORE, 140010934222848, 140010959400959,
+SNULL, 140011060051968, 140011076833279,
+STORE, 140011076833280, 140011085225983,
+STORE, 140011060051968, 140011076833279,
+SNULL, 140011076837375, 140011085225983,
+STORE, 140011076833280, 140011076837375,
+STORE, 140011076837376, 140011085225983,
+SNULL, 140011043266560, 140011051655167,
+STORE, 140011051655168, 140011060047871,
+STORE, 140011043266560, 140011051655167,
+SNULL, 140011051659263, 140011060047871,
+STORE, 140011051655168, 140011051659263,
+STORE, 140011051659264, 140011060047871,
+STORE, 140010925830144, 140010959400959,
+SNULL, 140011060051968, 140011068440575,
+STORE, 140011068440576, 140011076833279,
+STORE, 140011060051968, 140011068440575,
+SNULL, 140011068444671, 140011076833279,
+STORE, 140011068440576, 140011068444671,
+STORE, 140011068444672, 140011076833279,
+STORE, 140010917437440, 140010959400959,
+STORE, 140010909044736, 140010959400959,
+STORE, 140010825183232, 140010833575935,
+SNULL, 140010909044736, 140010942615551,
+STORE, 140010942615552, 140010959400959,
+STORE, 140010909044736, 140010942615551,
+SNULL, 140010942619647, 140010959400959,
+STORE, 140010942615552, 140010942619647,
+STORE, 140010942619648, 140010959400959,
+SNULL, 140010909044736, 140010934222847,
+STORE, 140010934222848, 140010942615551,
+STORE, 140010909044736, 140010934222847,
+SNULL, 140010934226943, 140010942615551,
+STORE, 140010934222848, 140010934226943,
+STORE, 140010934226944, 140010942615551,
+SNULL, 140010909048831, 140010934222847,
+STORE, 140010909044736, 140010909048831,
+STORE, 140010909048832, 140010934222847,
+STORE, 140010816790528, 140010833575935,
+SNULL, 140010816794623, 140010833575935,
+STORE, 140010816790528, 140010816794623,
+STORE, 140010816794624, 140010833575935,
+STORE, 140010808397824, 140010816790527,
+SNULL, 140010942619648, 140010951008255,
+STORE, 140010951008256, 140010959400959,
+STORE, 140010942619648, 140010951008255,
+SNULL, 140010951012351, 140010959400959,
+STORE, 140010951008256, 140010951012351,
+STORE, 140010951012352, 140010959400959,
+STORE, 140010800005120, 140010816790527,
+SNULL, 140010800009215, 140010816790527,
+STORE, 140010800005120, 140010800009215,
+STORE, 140010800009216, 140010816790527,
+SNULL, 140010909048832, 140010925830143,
+STORE, 140010925830144, 140010934222847,
+STORE, 140010909048832, 140010925830143,
+SNULL, 140010925834239, 140010934222847,
+STORE, 140010925830144, 140010925834239,
+STORE, 140010925834240, 140010934222847,
+SNULL, 140010816794624, 140010825183231,
+STORE, 140010825183232, 140010833575935,
+STORE, 140010816794624, 140010825183231,
+SNULL, 140010825187327, 140010833575935,
+STORE, 140010825183232, 140010825187327,
+STORE, 140010825187328, 140010833575935,
+SNULL, 140010909048832, 140010917437439,
+STORE, 140010917437440, 140010925830143,
+STORE, 140010909048832, 140010917437439,
+SNULL, 140010917441535, 140010925830143,
+STORE, 140010917437440, 140010917441535,
+STORE, 140010917441536, 140010925830143,
+SNULL, 140010800009216, 140010808397823,
+STORE, 140010808397824, 140010816790527,
+STORE, 140010800009216, 140010808397823,
+SNULL, 140010808401919, 140010816790527,
+STORE, 140010808397824, 140010808401919,
+STORE, 140010808401920, 140010816790527,
+STORE, 140010791612416, 140010800005119,
+SNULL, 140010791616511, 140010800005119,
+STORE, 140010791612416, 140010791616511,
+STORE, 140010791616512, 140010800005119,
+STORE, 140012547100672, 140012547129343,
+STORE, 140012511506432, 140012513697791,
+SNULL, 140012511506432, 140012511596543,
+STORE, 140012511596544, 140012513697791,
+STORE, 140012511506432, 140012511596543,
+SNULL, 140012513689599, 140012513697791,
+STORE, 140012511596544, 140012513689599,
+STORE, 140012513689600, 140012513697791,
+ERASE, 140012513689600, 140012513697791,
+STORE, 140012513689600, 140012513697791,
+SNULL, 140012513693695, 140012513697791,
+STORE, 140012513689600, 140012513693695,
+STORE, 140012513693696, 140012513697791,
+ERASE, 140012547100672, 140012547129343,
+ERASE, 140011362054144, 140011362058239,
+ERASE, 140011362058240, 140011370446847,
+ERASE, 140011882139648, 140011882143743,
+ERASE, 140011882143744, 140011890532351,
+ERASE, 140011873746944, 140011873751039,
+ERASE, 140011873751040, 140011882139647,
+ERASE, 140011588526080, 140011588530175,
+ERASE, 140011588530176, 140011596918783,
+ERASE, 140011328483328, 140011328487423,
+ERASE, 140011328487424, 140011336876031,
+ERASE, 140011898925056, 140011898929151,
+ERASE, 140011898929152, 140011907317759,
+ERASE, 140011353661440, 140011353665535,
+ERASE, 140011353665536, 140011362054143,
+ERASE, 140011336876032, 140011336880127,
+ERASE, 140011336880128, 140011345268735,
+ERASE, 140011731136512, 140011731140607,
+ERASE, 140011731140608, 140011739529215,
+ERASE, 140011479486464, 140011479490559,
+ERASE, 140011479490560, 140011487879167,
+ERASE, 140011756314624, 140011756318719,
+ERASE, 140011756318720, 140011764707327,
+ERASE, 140011580133376, 140011580137471,
+ERASE, 140011580137472, 140011588526079,
+ERASE, 140011219443712, 140011219447807,
+ERASE, 140011219447808, 140011227836415,
+ERASE, 140011051655168, 140011051659263,
+ERASE, 140011051659264, 140011060047871,
+ERASE, 140011999571968, 140011999576063,
+ERASE, 140011999576064, 140012007964671,
+ERASE, 140011714351104, 140011714355199,
+ERASE, 140011714355200, 140011722743807,
+ERASE, 140011739529216, 140011739533311,
+ERASE, 140011739533312, 140011747921919,
+ERASE, 140011320090624, 140011320094719,
+ERASE, 140011320094720, 140011328483327,
+ERASE, 140011630489600, 140011630493695,
+ERASE, 140011630493696, 140011638882303,
+ERASE, 140011345268736, 140011345272831,
+ERASE, 140011345272832, 140011353661439,
+ERASE, 140011496271872, 140011496275967,
+ERASE, 140011496275968, 140011504664575,
+ERASE, 140011194265600, 140011194269695,
+ERASE, 140011194269696, 140011202658303,
+ERASE, 140011068440576, 140011068444671,
+ERASE, 140011068444672, 140011076833279,
+ERASE, 140010909044736, 140010909048831,
+ERASE, 140010909048832, 140010917437439,
+ERASE, 140011764707328, 140011764711423,
+ERASE, 140011764711424, 140011773100031,
+ERASE, 140011462701056, 140011462705151,
+ERASE, 140011462705152, 140011471093759,
+ERASE, 140011076833280, 140011076837375,
+ERASE, 140011076837376, 140011085225983,
+ERASE, 140011991179264, 140011991183359,
+ERASE, 140011991183360, 140011999571967,
+ERASE, 140011211051008, 140011211055103,
+ERASE, 140011211055104, 140011219443711,
+ERASE, 140010917437440, 140010917441535,
+ERASE, 140010917441536, 140010925830143,
+ERASE, 140011085225984, 140011085230079,
+ERASE, 140011085230080, 140011093618687,
+ERASE, 140011487879168, 140011487883263,
+ERASE, 140011487883264, 140011496271871,
+ERASE, 140011856961536, 140011856965631,
+ERASE, 140011856965632, 140011865354239,
+ERASE, 140011982786560, 140011982790655,
+ERASE, 140011982790656, 140011991179263,
+ERASE, 140011722743808, 140011722747903,
+ERASE, 140011722747904, 140011731136511,
+ERASE, 140011177480192, 140011177484287,
+ERASE, 140011177484288, 140011185872895,
+ERASE, 140011848568832, 140011848572927,
+ERASE, 140011848572928, 140011856961535,
+ERASE, 140011890532352, 140011890536447,
+ERASE, 140011890536448, 140011898925055,
+ERASE, 140011622096896, 140011622100991,
+ERASE, 140011622100992, 140011630489599,
+ERASE, 140011311697920, 140011311702015,
+ERASE, 140011311702016, 140011320090623,
+ERASE, 140011471093760, 140011471097855,
+ERASE, 140011471097856, 140011479486463,
+ERASE, 140011605311488, 140011605315583,
+ERASE, 140011605315584, 140011613704191,
+ERASE, 140010791612416, 140010791616511,
+ERASE, 140010791616512, 140010800005119,
+ERASE, 140010959400960, 140010959405055,
+ERASE, 140010959405056, 140010967793663,
+ERASE, 140011185872896, 140011185876991,
+ERASE, 140011185876992, 140011194265599,
+ERASE, 140011454308352, 140011454312447,
+ERASE, 140011454312448, 140011462701055,
+ERASE, 140011596918784, 140011596922879,
+ERASE, 140011596922880, 140011605311487,
+ERASE, 140011060047872, 140011060051967,
+ERASE, 140011060051968, 140011068440575,
+ERASE, 140010925830144, 140010925834239,
+ERASE, 140010925834240, 140010934222847,
+ERASE, 140011747921920, 140011747926015,
+ERASE, 140011747926016, 140011756314623,
+ERASE, 140011202658304, 140011202662399,
+ERASE, 140011202662400, 140011211051007,
+ERASE, 140010800005120, 140010800009215,
+ERASE, 140010800009216, 140010808397823,
+ERASE, 140011093618688, 140011093622783,
+ERASE, 140011093622784, 140011102011391,
+ERASE, 140010808397824, 140010808401919,
+ERASE, 140010808401920, 140010816790527,
+ERASE, 140012419010560, 140012419014655,
+ERASE, 140012419014656, 140012427403263,
+ERASE, 140010934222848, 140010934226943,
+ERASE, 140010934226944, 140010942615551,
+ERASE, 140010942615552, 140010942619647,
+ERASE, 140010942619648, 140010951008255,
+ERASE, 140011613704192, 140011613708287,
+ERASE, 140011613708288, 140011622096895,
+ERASE, 140011865354240, 140011865358335,
+ERASE, 140011865358336, 140011873746943,
+ERASE, 140012301578240, 140012301582335,
+ERASE, 140012301582336, 140012309970943,
+ERASE, 140012393832448, 140012393836543,
+ERASE, 140012393836544, 140012402225151,
+ERASE, 140012410617856, 140012410621951,
+ERASE, 140012410621952, 140012419010559,
+ERASE, 140012402225152, 140012402229247,
+ERASE, 140012402229248, 140012410617855,
+ERASE, 140012259614720, 140012259618815,
+ERASE, 140012259618816, 140012268007423,
+ERASE, 140012251222016, 140012251226111,
+ERASE, 140012251226112, 140012259614719,
+ERASE, 140012284792832, 140012284796927,
+ERASE, 140012284796928, 140012293185535,
+ERASE, 140011445915648, 140011445919743,
+ERASE, 140011445919744, 140011454308351,
+ERASE, 140010951008256, 140010951012351,
+ERASE, 140010951012352, 140010959400959,
+ERASE, 140011043262464, 140011043266559,
+ERASE, 140011043266560, 140011051655167,
+ERASE, 140010825183232, 140010825187327,
+ERASE, 140010825187328, 140010833575935,
+ERASE, 140012293185536, 140012293189631,
+ERASE, 140012293189632, 140012301578239,
+ERASE, 140012276400128, 140012276404223,
+ERASE, 140012276404224, 140012284792831,
+ERASE, 140012016357376, 140012016361471,
+ERASE, 140012016361472, 140012024750079,
+ERASE, 140012024750080, 140012024754175,
+ERASE, 140012024754176, 140012033142783,
+ERASE, 140011227836416, 140011227840511,
+ERASE, 140011227840512, 140011236229119,
+ERASE, 140010816790528, 140010816794623,
+ERASE, 140010816794624, 140010825183231,
+ERASE, 140012268007424, 140012268011519,
+ERASE, 140012268011520, 140012276400127,
+ERASE, 140012385439744, 140012385443839,
+ERASE, 140012385443840, 140012393832447,
+ERASE, 140012522090496, 140012522094591,
+ERASE, 140012522094592, 140012530483199,
+ERASE, 140012033142784, 140012033146879,
+ERASE, 140012033146880, 140012041535487,
+ };
+ static const unsigned long set35[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730536939520, 140737488351231,
+SNULL, 140730536943615, 140737488351231,
+STORE, 140730536939520, 140730536943615,
+STORE, 140730536808448, 140730536943615,
+STORE, 94245239877632, 94245242130431,
+SNULL, 94245240008703, 94245242130431,
+STORE, 94245239877632, 94245240008703,
+STORE, 94245240008704, 94245242130431,
+ERASE, 94245240008704, 94245242130431,
+STORE, 94245242101760, 94245242109951,
+STORE, 94245242109952, 94245242130431,
+STORE, 140475575263232, 140475577516031,
+SNULL, 140475575406591, 140475577516031,
+STORE, 140475575263232, 140475575406591,
+STORE, 140475575406592, 140475577516031,
+ERASE, 140475575406592, 140475577516031,
+STORE, 140475577503744, 140475577511935,
+STORE, 140475577511936, 140475577516031,
+STORE, 140730538164224, 140730538168319,
+STORE, 140730538151936, 140730538164223,
+STORE, 140475577475072, 140475577503743,
+STORE, 140475577466880, 140475577475071,
+STORE, 140475573047296, 140475575263231,
+SNULL, 140475573047296, 140475573145599,
+STORE, 140475573145600, 140475575263231,
+STORE, 140475573047296, 140475573145599,
+SNULL, 140475575238655, 140475575263231,
+STORE, 140475573145600, 140475575238655,
+STORE, 140475575238656, 140475575263231,
+SNULL, 140475575238656, 140475575246847,
+STORE, 140475575246848, 140475575263231,
+STORE, 140475575238656, 140475575246847,
+ERASE, 140475575238656, 140475575246847,
+STORE, 140475575238656, 140475575246847,
+ERASE, 140475575246848, 140475575263231,
+STORE, 140475575246848, 140475575263231,
+STORE, 140475569250304, 140475573047295,
+SNULL, 140475569250304, 140475570909183,
+STORE, 140475570909184, 140475573047295,
+STORE, 140475569250304, 140475570909183,
+SNULL, 140475573006335, 140475573047295,
+STORE, 140475570909184, 140475573006335,
+STORE, 140475573006336, 140475573047295,
+SNULL, 140475573006336, 140475573030911,
+STORE, 140475573030912, 140475573047295,
+STORE, 140475573006336, 140475573030911,
+ERASE, 140475573006336, 140475573030911,
+STORE, 140475573006336, 140475573030911,
+ERASE, 140475573030912, 140475573047295,
+STORE, 140475573030912, 140475573047295,
+STORE, 140475577458688, 140475577475071,
+SNULL, 140475573022719, 140475573030911,
+STORE, 140475573006336, 140475573022719,
+STORE, 140475573022720, 140475573030911,
+SNULL, 140475575242751, 140475575246847,
+STORE, 140475575238656, 140475575242751,
+STORE, 140475575242752, 140475575246847,
+SNULL, 94245242105855, 94245242109951,
+STORE, 94245242101760, 94245242105855,
+STORE, 94245242105856, 94245242109951,
+SNULL, 140475577507839, 140475577511935,
+STORE, 140475577503744, 140475577507839,
+STORE, 140475577507840, 140475577511935,
+ERASE, 140475577475072, 140475577503743,
+STORE, 94245271216128, 94245271351295,
+STORE, 140475560857600, 140475569250303,
+SNULL, 140475560861695, 140475569250303,
+STORE, 140475560857600, 140475560861695,
+STORE, 140475560861696, 140475569250303,
+STORE, 140475552464896, 140475560857599,
+STORE, 140475418247168, 140475552464895,
+SNULL, 140475418247168, 140475428241407,
+STORE, 140475428241408, 140475552464895,
+STORE, 140475418247168, 140475428241407,
+ERASE, 140475418247168, 140475428241407,
+SNULL, 140475495350271, 140475552464895,
+STORE, 140475428241408, 140475495350271,
+STORE, 140475495350272, 140475552464895,
+ERASE, 140475495350272, 140475552464895,
+SNULL, 140475428376575, 140475495350271,
+STORE, 140475428241408, 140475428376575,
+STORE, 140475428376576, 140475495350271,
+SNULL, 140475552468991, 140475560857599,
+STORE, 140475552464896, 140475552468991,
+STORE, 140475552468992, 140475560857599,
+STORE, 140475544072192, 140475552464895,
+SNULL, 140475544076287, 140475552464895,
+STORE, 140475544072192, 140475544076287,
+STORE, 140475544076288, 140475552464895,
+STORE, 140475535679488, 140475544072191,
+SNULL, 140475535683583, 140475544072191,
+STORE, 140475535679488, 140475535683583,
+STORE, 140475535683584, 140475544072191,
+STORE, 140475527286784, 140475535679487,
+SNULL, 140475527290879, 140475535679487,
+STORE, 140475527286784, 140475527290879,
+STORE, 140475527290880, 140475535679487,
+STORE, 140475518894080, 140475527286783,
+STORE, 140475510501376, 140475527286783,
+STORE, 140475502108672, 140475527286783,
+STORE, 140475419848704, 140475428241407,
+STORE, 140475285630976, 140475419848703,
+SNULL, 140475285630976, 140475294023679,
+STORE, 140475294023680, 140475419848703,
+STORE, 140475285630976, 140475294023679,
+ERASE, 140475285630976, 140475294023679,
+STORE, 140475159805952, 140475419848703,
+STORE, 140475025588224, 140475419848703,
+SNULL, 140475092697087, 140475419848703,
+STORE, 140475025588224, 140475092697087,
+STORE, 140475092697088, 140475419848703,
+SNULL, 140475092697088, 140475159805951,
+STORE, 140475159805952, 140475419848703,
+STORE, 140475092697088, 140475159805951,
+ERASE, 140475092697088, 140475159805951,
+STORE, 140474891370496, 140475092697087,
+SNULL, 140474958479359, 140475092697087,
+STORE, 140474891370496, 140474958479359,
+STORE, 140474958479360, 140475092697087,
+SNULL, 140474958479360, 140475025588223,
+STORE, 140475025588224, 140475092697087,
+STORE, 140474958479360, 140475025588223,
+ERASE, 140474958479360, 140475025588223,
+SNULL, 140475361132543, 140475419848703,
+STORE, 140475159805952, 140475361132543,
+STORE, 140475361132544, 140475419848703,
+ERASE, 140475361132544, 140475419848703,
+SNULL, 140475159805952, 140475294023679,
+STORE, 140475294023680, 140475361132543,
+STORE, 140475159805952, 140475294023679,
+SNULL, 140475294158847, 140475361132543,
+STORE, 140475294023680, 140475294158847,
+STORE, 140475294158848, 140475361132543,
+SNULL, 140475226914815, 140475294023679,
+STORE, 140475159805952, 140475226914815,
+STORE, 140475226914816, 140475294023679,
+ERASE, 140475226914816, 140475294023679,
+SNULL, 140475025723391, 140475092697087,
+STORE, 140475025588224, 140475025723391,
+STORE, 140475025723392, 140475092697087,
+SNULL, 140475159941119, 140475226914815,
+STORE, 140475159805952, 140475159941119,
+STORE, 140475159941120, 140475226914815,
+SNULL, 140474891505663, 140474958479359,
+STORE, 140474891370496, 140474891505663,
+STORE, 140474891505664, 140474958479359,
+SNULL, 140475502108672, 140475518894079,
+STORE, 140475518894080, 140475527286783,
+STORE, 140475502108672, 140475518894079,
+SNULL, 140475518898175, 140475527286783,
+STORE, 140475518894080, 140475518898175,
+STORE, 140475518898176, 140475527286783,
+STORE, 140475411456000, 140475428241407,
+SNULL, 140475502112767, 140475518894079,
+STORE, 140475502108672, 140475502112767,
+STORE, 140475502112768, 140475518894079,
+SNULL, 140475411460095, 140475428241407,
+STORE, 140475411456000, 140475411460095,
+STORE, 140475411460096, 140475428241407,
+SNULL, 140475411460096, 140475419848703,
+STORE, 140475419848704, 140475428241407,
+STORE, 140475411460096, 140475419848703,
+SNULL, 140475419852799, 140475428241407,
+STORE, 140475419848704, 140475419852799,
+STORE, 140475419852800, 140475428241407,
+STORE, 140475403063296, 140475411455999,
+SNULL, 140475502112768, 140475510501375,
+STORE, 140475510501376, 140475518894079,
+STORE, 140475502112768, 140475510501375,
+SNULL, 140475510505471, 140475518894079,
+STORE, 140475510501376, 140475510505471,
+STORE, 140475510505472, 140475518894079,
+SNULL, 140475403067391, 140475411455999,
+STORE, 140475403063296, 140475403067391,
+STORE, 140475403067392, 140475411455999,
+STORE, 140475394670592, 140475403063295,
+SNULL, 140475394674687, 140475403063295,
+STORE, 140475394670592, 140475394674687,
+STORE, 140475394674688, 140475403063295,
+STORE, 140475386277888, 140475394670591,
+STORE, 140475377885184, 140475394670591,
+STORE, 140475369492480, 140475394670591,
+SNULL, 140475369496575, 140475394670591,
+STORE, 140475369492480, 140475369496575,
+STORE, 140475369496576, 140475394670591,
+SNULL, 140475369496576, 140475377885183,
+STORE, 140475377885184, 140475394670591,
+STORE, 140475369496576, 140475377885183,
+SNULL, 140475377889279, 140475394670591,
+STORE, 140475377885184, 140475377889279,
+STORE, 140475377889280, 140475394670591,
+STORE, 140475285630976, 140475294023679,
+SNULL, 140475377889280, 140475386277887,
+STORE, 140475386277888, 140475394670591,
+STORE, 140475377889280, 140475386277887,
+SNULL, 140475386281983, 140475394670591,
+STORE, 140475386277888, 140475386281983,
+STORE, 140475386281984, 140475394670591,
+SNULL, 140475285635071, 140475294023679,
+STORE, 140475285630976, 140475285635071,
+STORE, 140475285635072, 140475294023679,
+STORE, 140475277238272, 140475285630975,
+STORE, 140475268845568, 140475285630975,
+SNULL, 140475268845568, 140475277238271,
+STORE, 140475277238272, 140475285630975,
+STORE, 140475268845568, 140475277238271,
+SNULL, 140475277242367, 140475285630975,
+STORE, 140475277238272, 140475277242367,
+STORE, 140475277242368, 140475285630975,
+STORE, 140475260452864, 140475277238271,
+SNULL, 140475260452864, 140475268845567,
+STORE, 140475268845568, 140475277238271,
+STORE, 140475260452864, 140475268845567,
+SNULL, 140475268849663, 140475277238271,
+STORE, 140475268845568, 140475268849663,
+STORE, 140475268849664, 140475277238271,
+SNULL, 140475260456959, 140475268845567,
+STORE, 140475260452864, 140475260456959,
+STORE, 140475260456960, 140475268845567,
+STORE, 140475252060160, 140475260452863,
+SNULL, 140475252064255, 140475260452863,
+STORE, 140475252060160, 140475252064255,
+STORE, 140475252064256, 140475260452863,
+STORE, 140475243667456, 140475252060159,
+SNULL, 140475243671551, 140475252060159,
+STORE, 140475243667456, 140475243671551,
+STORE, 140475243671552, 140475252060159,
+STORE, 140475235274752, 140475243667455,
+STORE, 140475151413248, 140475159805951,
+STORE, 140474891505664, 140475025588223,
+STORE, 140475143020544, 140475159805951,
+SNULL, 140474891505664, 140474958479359,
+STORE, 140474958479360, 140475025588223,
+STORE, 140474891505664, 140474958479359,
+SNULL, 140474958614527, 140475025588223,
+STORE, 140474958479360, 140474958614527,
+STORE, 140474958614528, 140475025588223,
+STORE, 140474824261632, 140474891370495,
+SNULL, 140474824396799, 140474891370495,
+STORE, 140474824261632, 140474824396799,
+STORE, 140474824396800, 140474891370495,
+STORE, 140475134627840, 140475159805951,
+STORE, 140474690043904, 140474824261631,
+STORE, 140475126235136, 140475159805951,
+STORE, 140475117842432, 140475159805951,
+STORE, 140474622935040, 140474824261631,
+STORE, 140475109449728, 140475159805951,
+STORE, 140474488717312, 140474824261631,
+STORE, 140475101057024, 140475159805951,
+STORE, 140474480324608, 140474488717311,
+STORE, 140474413215744, 140474480324607,
+STORE, 140474404823040, 140474413215743,
+ERASE, 140474413215744, 140474480324607,
+STORE, 140474471931904, 140474488717311,
+STORE, 140474270605312, 140474404823039,
+SNULL, 140475101057024, 140475126235135,
+STORE, 140475126235136, 140475159805951,
+STORE, 140475101057024, 140475126235135,
+SNULL, 140475126239231, 140475159805951,
+STORE, 140475126235136, 140475126239231,
+STORE, 140475126239232, 140475159805951,
+STORE, 140474463539200, 140474488717311,
+STORE, 140474455146496, 140474488717311,
+SNULL, 140474455150591, 140474488717311,
+STORE, 140474455146496, 140474455150591,
+STORE, 140474455150592, 140474488717311,
+STORE, 140474446753792, 140474455146495,
+SNULL, 140474446757887, 140474455146495,
+STORE, 140474446753792, 140474446757887,
+STORE, 140474446757888, 140474455146495,
+STORE, 140474438361088, 140474446753791,
+STORE, 140474429968384, 140474446753791,
+SNULL, 140474429972479, 140474446753791,
+STORE, 140474429968384, 140474429972479,
+STORE, 140474429972480, 140474446753791,
+SNULL, 140475235278847, 140475243667455,
+STORE, 140475235274752, 140475235278847,
+STORE, 140475235278848, 140475243667455,
+SNULL, 140474757152767, 140474824261631,
+STORE, 140474488717312, 140474757152767,
+STORE, 140474757152768, 140474824261631,
+ERASE, 140474757152768, 140474824261631,
+SNULL, 140474488717312, 140474690043903,
+STORE, 140474690043904, 140474757152767,
+STORE, 140474488717312, 140474690043903,
+SNULL, 140474690179071, 140474757152767,
+STORE, 140474690043904, 140474690179071,
+STORE, 140474690179072, 140474757152767,
+SNULL, 140474488717312, 140474622935039,
+STORE, 140474622935040, 140474690043903,
+STORE, 140474488717312, 140474622935039,
+SNULL, 140474623070207, 140474690043903,
+STORE, 140474622935040, 140474623070207,
+STORE, 140474623070208, 140474690043903,
+SNULL, 140475101057024, 140475117842431,
+STORE, 140475117842432, 140475126235135,
+STORE, 140475101057024, 140475117842431,
+SNULL, 140475117846527, 140475126235135,
+STORE, 140475117842432, 140475117846527,
+STORE, 140475117846528, 140475126235135,
+SNULL, 140474555826175, 140474622935039,
+STORE, 140474488717312, 140474555826175,
+STORE, 140474555826176, 140474622935039,
+ERASE, 140474555826176, 140474622935039,
+STORE, 140474136387584, 140474404823039,
+SNULL, 140474136387584, 140474153172991,
+STORE, 140474153172992, 140474404823039,
+STORE, 140474136387584, 140474153172991,
+ERASE, 140474136387584, 140474153172991,
+STORE, 140474018955264, 140474404823039,
+STORE, 140473884737536, 140474404823039,
+SNULL, 140474086064127, 140474404823039,
+STORE, 140473884737536, 140474086064127,
+STORE, 140474086064128, 140474404823039,
+SNULL, 140474086064128, 140474153172991,
+STORE, 140474153172992, 140474404823039,
+STORE, 140474086064128, 140474153172991,
+ERASE, 140474086064128, 140474153172991,
+STORE, 140473750519808, 140474086064127,
+SNULL, 140473817628671, 140474086064127,
+STORE, 140473750519808, 140473817628671,
+STORE, 140473817628672, 140474086064127,
+SNULL, 140473817628672, 140473884737535,
+STORE, 140473884737536, 140474086064127,
+STORE, 140473817628672, 140473884737535,
+ERASE, 140473817628672, 140473884737535,
+SNULL, 140475126239232, 140475151413247,
+STORE, 140475151413248, 140475159805951,
+STORE, 140475126239232, 140475151413247,
+SNULL, 140475151417343, 140475159805951,
+STORE, 140475151413248, 140475151417343,
+STORE, 140475151417344, 140475159805951,
+SNULL, 140474270605311, 140474404823039,
+STORE, 140474153172992, 140474270605311,
+STORE, 140474270605312, 140474404823039,
+SNULL, 140474270605312, 140474287390719,
+STORE, 140474287390720, 140474404823039,
+STORE, 140474270605312, 140474287390719,
+ERASE, 140474270605312, 140474287390719,
+SNULL, 140474429972480, 140474438361087,
+STORE, 140474438361088, 140474446753791,
+STORE, 140474429972480, 140474438361087,
+SNULL, 140474438365183, 140474446753791,
+STORE, 140474438361088, 140474438365183,
+STORE, 140474438365184, 140474446753791,
+STORE, 140474815868928, 140474824261631,
+SNULL, 140474815873023, 140474824261631,
+STORE, 140474815868928, 140474815873023,
+STORE, 140474815873024, 140474824261631,
+SNULL, 140474220281855, 140474270605311,
+STORE, 140474153172992, 140474220281855,
+STORE, 140474220281856, 140474270605311,
+ERASE, 140474220281856, 140474270605311,
+SNULL, 140474488852479, 140474555826175,
+STORE, 140474488717312, 140474488852479,
+STORE, 140474488852480, 140474555826175,
+SNULL, 140475101057024, 140475109449727,
+STORE, 140475109449728, 140475117842431,
+STORE, 140475101057024, 140475109449727,
+SNULL, 140475109453823, 140475117842431,
+STORE, 140475109449728, 140475109453823,
+STORE, 140475109453824, 140475117842431,
+SNULL, 140473951846399, 140474086064127,
+STORE, 140473884737536, 140473951846399,
+STORE, 140473951846400, 140474086064127,
+SNULL, 140473951846400, 140474018955263,
+STORE, 140474018955264, 140474086064127,
+STORE, 140473951846400, 140474018955263,
+ERASE, 140473951846400, 140474018955263,
+SNULL, 140473884872703, 140473951846399,
+STORE, 140473884737536, 140473884872703,
+STORE, 140473884872704, 140473951846399,
+SNULL, 140474019090431, 140474086064127,
+STORE, 140474018955264, 140474019090431,
+STORE, 140474019090432, 140474086064127,
+SNULL, 140473750654975, 140473817628671,
+STORE, 140473750519808, 140473750654975,
+STORE, 140473750654976, 140473817628671,
+SNULL, 140474455150592, 140474463539199,
+STORE, 140474463539200, 140474488717311,
+STORE, 140474455150592, 140474463539199,
+SNULL, 140474463543295, 140474488717311,
+STORE, 140474463539200, 140474463543295,
+STORE, 140474463543296, 140474488717311,
+STORE, 140474807476224, 140474815868927,
+SNULL, 140474463543296, 140474471931903,
+STORE, 140474471931904, 140474488717311,
+STORE, 140474463543296, 140474471931903,
+SNULL, 140474471935999, 140474488717311,
+STORE, 140474471931904, 140474471935999,
+STORE, 140474471936000, 140474488717311,
+STORE, 140474799083520, 140474815868927,
+STORE, 140474790690816, 140474815868927,
+SNULL, 140474790690816, 140474799083519,
+STORE, 140474799083520, 140474815868927,
+STORE, 140474790690816, 140474799083519,
+SNULL, 140474799087615, 140474815868927,
+STORE, 140474799083520, 140474799087615,
+STORE, 140474799087616, 140474815868927,
+SNULL, 140474354499583, 140474404823039,
+STORE, 140474287390720, 140474354499583,
+STORE, 140474354499584, 140474404823039,
+ERASE, 140474354499584, 140474404823039,
+SNULL, 140474287525887, 140474354499583,
+STORE, 140474287390720, 140474287525887,
+STORE, 140474287525888, 140474354499583,
+STORE, 140474782298112, 140474799083519,
+STORE, 140474773905408, 140474799083519,
+SNULL, 140474773909503, 140474799083519,
+STORE, 140474773905408, 140474773909503,
+STORE, 140474773909504, 140474799083519,
+SNULL, 140475126239232, 140475134627839,
+STORE, 140475134627840, 140475151413247,
+STORE, 140475126239232, 140475134627839,
+SNULL, 140475134631935, 140475151413247,
+STORE, 140475134627840, 140475134631935,
+STORE, 140475134631936, 140475151413247,
+STORE, 140474765512704, 140474773905407,
+STORE, 140474614542336, 140474622935039,
+SNULL, 140474153308159, 140474220281855,
+STORE, 140474153172992, 140474153308159,
+STORE, 140474153308160, 140474220281855,
+SNULL, 140474404827135, 140474413215743,
+STORE, 140474404823040, 140474404827135,
+STORE, 140474404827136, 140474413215743,
+STORE, 140474606149632, 140474622935039,
+SNULL, 140474606153727, 140474622935039,
+STORE, 140474606149632, 140474606153727,
+STORE, 140474606153728, 140474622935039,
+STORE, 140474597756928, 140474606149631,
+SNULL, 140474597761023, 140474606149631,
+STORE, 140474597756928, 140474597761023,
+STORE, 140474597761024, 140474606149631,
+SNULL, 140475134631936, 140475143020543,
+STORE, 140475143020544, 140475151413247,
+STORE, 140475134631936, 140475143020543,
+SNULL, 140475143024639, 140475151413247,
+STORE, 140475143020544, 140475143024639,
+STORE, 140475143024640, 140475151413247,
+STORE, 140474589364224, 140474597756927,
+SNULL, 140474606153728, 140474614542335,
+STORE, 140474614542336, 140474622935039,
+STORE, 140474606153728, 140474614542335,
+SNULL, 140474614546431, 140474622935039,
+STORE, 140474614542336, 140474614546431,
+STORE, 140474614546432, 140474622935039,
+SNULL, 140474765516799, 140474773905407,
+STORE, 140474765512704, 140474765516799,
+STORE, 140474765516800, 140474773905407,
+STORE, 140474580971520, 140474597756927,
+SNULL, 140474773909504, 140474782298111,
+STORE, 140474782298112, 140474799083519,
+STORE, 140474773909504, 140474782298111,
+SNULL, 140474782302207, 140474799083519,
+STORE, 140474782298112, 140474782302207,
+STORE, 140474782302208, 140474799083519,
+SNULL, 140474471936000, 140474480324607,
+STORE, 140474480324608, 140474488717311,
+STORE, 140474471936000, 140474480324607,
+SNULL, 140474480328703, 140474488717311,
+STORE, 140474480324608, 140474480328703,
+STORE, 140474480328704, 140474488717311,
+STORE, 140474572578816, 140474597756927,
+SNULL, 140474572582911, 140474597756927,
+STORE, 140474572578816, 140474572582911,
+STORE, 140474572582912, 140474597756927,
+SNULL, 140474782302208, 140474790690815,
+STORE, 140474790690816, 140474799083519,
+STORE, 140474782302208, 140474790690815,
+SNULL, 140474790694911, 140474799083519,
+STORE, 140474790690816, 140474790694911,
+STORE, 140474790694912, 140474799083519,
+STORE, 140474564186112, 140474572578815,
+STORE, 140474421575680, 140474429968383,
+STORE, 140474396430336, 140474404823039,
+SNULL, 140474396434431, 140474404823039,
+STORE, 140474396430336, 140474396434431,
+STORE, 140474396434432, 140474404823039,
+STORE, 140474388037632, 140474396430335,
+SNULL, 140474799087616, 140474807476223,
+STORE, 140474807476224, 140474815868927,
+STORE, 140474799087616, 140474807476223,
+SNULL, 140474807480319, 140474815868927,
+STORE, 140474807476224, 140474807480319,
+STORE, 140474807480320, 140474815868927,
+SNULL, 140475101061119, 140475109449727,
+STORE, 140475101057024, 140475101061119,
+STORE, 140475101061120, 140475109449727,
+STORE, 140474379644928, 140474396430335,
+SNULL, 140474572582912, 140474589364223,
+STORE, 140474589364224, 140474597756927,
+STORE, 140474572582912, 140474589364223,
+SNULL, 140474589368319, 140474597756927,
+STORE, 140474589364224, 140474589368319,
+STORE, 140474589368320, 140474597756927,
+STORE, 140474371252224, 140474396430335,
+STORE, 140474362859520, 140474396430335,
+STORE, 140474278998016, 140474287390719,
+STORE, 140474270605312, 140474287390719,
+STORE, 140474262212608, 140474287390719,
+SNULL, 140474262216703, 140474287390719,
+STORE, 140474262212608, 140474262216703,
+STORE, 140474262216704, 140474287390719,
+STORE, 140474253819904, 140474262212607,
+SNULL, 140474253823999, 140474262212607,
+STORE, 140474253819904, 140474253823999,
+STORE, 140474253824000, 140474262212607,
+SNULL, 140474362859520, 140474388037631,
+STORE, 140474388037632, 140474396430335,
+STORE, 140474362859520, 140474388037631,
+SNULL, 140474388041727, 140474396430335,
+STORE, 140474388037632, 140474388041727,
+STORE, 140474388041728, 140474396430335,
+SNULL, 140474362859520, 140474379644927,
+STORE, 140474379644928, 140474388037631,
+STORE, 140474362859520, 140474379644927,
+SNULL, 140474379649023, 140474388037631,
+STORE, 140474379644928, 140474379649023,
+STORE, 140474379649024, 140474388037631,
+STORE, 140474245427200, 140474253819903,
+STORE, 140474237034496, 140474253819903,
+STORE, 140474228641792, 140474253819903,
+STORE, 140474144780288, 140474153172991,
+SNULL, 140474228645887, 140474253819903,
+STORE, 140474228641792, 140474228645887,
+STORE, 140474228645888, 140474253819903,
+SNULL, 140474564190207, 140474572578815,
+STORE, 140474564186112, 140474564190207,
+STORE, 140474564190208, 140474572578815,
+STORE, 140474136387584, 140474153172991,
+SNULL, 140474362859520, 140474371252223,
+STORE, 140474371252224, 140474379644927,
+STORE, 140474362859520, 140474371252223,
+SNULL, 140474371256319, 140474379644927,
+STORE, 140474371252224, 140474371256319,
+STORE, 140474371256320, 140474379644927,
+STORE, 140474127994880, 140474153172991,
+STORE, 140474119602176, 140474153172991,
+SNULL, 140474421579775, 140474429968383,
+STORE, 140474421575680, 140474421579775,
+STORE, 140474421579776, 140474429968383,
+STORE, 140474111209472, 140474153172991,
+SNULL, 140474111213567, 140474153172991,
+STORE, 140474111209472, 140474111213567,
+STORE, 140474111213568, 140474153172991,
+SNULL, 140474262216704, 140474270605311,
+STORE, 140474270605312, 140474287390719,
+STORE, 140474262216704, 140474270605311,
+SNULL, 140474270609407, 140474287390719,
+STORE, 140474270605312, 140474270609407,
+STORE, 140474270609408, 140474287390719,
+STORE, 140474102816768, 140474111209471,
+SNULL, 140474102820863, 140474111209471,
+STORE, 140474102816768, 140474102820863,
+STORE, 140474102820864, 140474111209471,
+SNULL, 140474270609408, 140474278998015,
+STORE, 140474278998016, 140474287390719,
+STORE, 140474270609408, 140474278998015,
+SNULL, 140474279002111, 140474287390719,
+STORE, 140474278998016, 140474279002111,
+STORE, 140474279002112, 140474287390719,
+STORE, 140474094424064, 140474102816767,
+SNULL, 140474572582912, 140474580971519,
+STORE, 140474580971520, 140474589364223,
+STORE, 140474572582912, 140474580971519,
+SNULL, 140474580975615, 140474589364223,
+STORE, 140474580971520, 140474580975615,
+STORE, 140474580975616, 140474589364223,
+SNULL, 140474362863615, 140474371252223,
+STORE, 140474362859520, 140474362863615,
+STORE, 140474362863616, 140474371252223,
+STORE, 140474010562560, 140474018955263,
+SNULL, 140474228645888, 140474245427199,
+STORE, 140474245427200, 140474253819903,
+STORE, 140474228645888, 140474245427199,
+SNULL, 140474245431295, 140474253819903,
+STORE, 140474245427200, 140474245431295,
+STORE, 140474245431296, 140474253819903,
+SNULL, 140474111213568, 140474136387583,
+STORE, 140474136387584, 140474153172991,
+STORE, 140474111213568, 140474136387583,
+SNULL, 140474136391679, 140474153172991,
+STORE, 140474136387584, 140474136391679,
+STORE, 140474136391680, 140474153172991,
+STORE, 140474002169856, 140474018955263,
+STORE, 140473993777152, 140474018955263,
+SNULL, 140474111213568, 140474127994879,
+STORE, 140474127994880, 140474136387583,
+STORE, 140474111213568, 140474127994879,
+SNULL, 140474127998975, 140474136387583,
+STORE, 140474127994880, 140474127998975,
+STORE, 140474127998976, 140474136387583,
+SNULL, 140474228645888, 140474237034495,
+STORE, 140474237034496, 140474245427199,
+STORE, 140474228645888, 140474237034495,
+SNULL, 140474237038591, 140474245427199,
+STORE, 140474237034496, 140474237038591,
+STORE, 140474237038592, 140474245427199,
+SNULL, 140474136391680, 140474144780287,
+STORE, 140474144780288, 140474153172991,
+STORE, 140474136391680, 140474144780287,
+SNULL, 140474144784383, 140474153172991,
+STORE, 140474144780288, 140474144784383,
+STORE, 140474144784384, 140474153172991,
+STORE, 140473985384448, 140474018955263,
+STORE, 140473976991744, 140474018955263,
+STORE, 140473968599040, 140474018955263,
+SNULL, 140473968603135, 140474018955263,
+STORE, 140473968599040, 140473968603135,
+STORE, 140473968603136, 140474018955263,
+SNULL, 140474111213568, 140474119602175,
+STORE, 140474119602176, 140474127994879,
+STORE, 140474111213568, 140474119602175,
+SNULL, 140474119606271, 140474127994879,
+STORE, 140474119602176, 140474119606271,
+STORE, 140474119606272, 140474127994879,
+STORE, 140473960206336, 140473968599039,
+SNULL, 140474094428159, 140474102816767,
+STORE, 140474094424064, 140474094428159,
+STORE, 140474094428160, 140474102816767,
+STORE, 140473876344832, 140473884737535,
+STORE, 140473867952128, 140473884737535,
+STORE, 140473859559424, 140473884737535,
+SNULL, 140473859563519, 140473884737535,
+STORE, 140473859559424, 140473859563519,
+STORE, 140473859563520, 140473884737535,
+SNULL, 140473968603136, 140473993777151,
+STORE, 140473993777152, 140474018955263,
+STORE, 140473968603136, 140473993777151,
+SNULL, 140473993781247, 140474018955263,
+STORE, 140473993777152, 140473993781247,
+STORE, 140473993781248, 140474018955263,
+SNULL, 140473960210431, 140473968599039,
+STORE, 140473960206336, 140473960210431,
+STORE, 140473960210432, 140473968599039,
+SNULL, 140473993781248, 140474010562559,
+STORE, 140474010562560, 140474018955263,
+STORE, 140473993781248, 140474010562559,
+SNULL, 140474010566655, 140474018955263,
+STORE, 140474010562560, 140474010566655,
+STORE, 140474010566656, 140474018955263,
+SNULL, 140473968603136, 140473985384447,
+STORE, 140473985384448, 140473993777151,
+STORE, 140473968603136, 140473985384447,
+SNULL, 140473985388543, 140473993777151,
+STORE, 140473985384448, 140473985388543,
+STORE, 140473985388544, 140473993777151,
+SNULL, 140473993781248, 140474002169855,
+STORE, 140474002169856, 140474010562559,
+STORE, 140473993781248, 140474002169855,
+SNULL, 140474002173951, 140474010562559,
+STORE, 140474002169856, 140474002173951,
+STORE, 140474002173952, 140474010562559,
+STORE, 140473851166720, 140473859559423,
+SNULL, 140473851170815, 140473859559423,
+STORE, 140473851166720, 140473851170815,
+STORE, 140473851170816, 140473859559423,
+SNULL, 140473968603136, 140473976991743,
+STORE, 140473976991744, 140473985384447,
+STORE, 140473968603136, 140473976991743,
+SNULL, 140473976995839, 140473985384447,
+STORE, 140473976991744, 140473976995839,
+STORE, 140473976995840, 140473985384447,
+STORE, 140473842774016, 140473851166719,
+SNULL, 140473859563520, 140473867952127,
+STORE, 140473867952128, 140473884737535,
+STORE, 140473859563520, 140473867952127,
+SNULL, 140473867956223, 140473884737535,
+STORE, 140473867952128, 140473867956223,
+STORE, 140473867956224, 140473884737535,
+SNULL, 140473867956224, 140473876344831,
+STORE, 140473876344832, 140473884737535,
+STORE, 140473867956224, 140473876344831,
+SNULL, 140473876348927, 140473884737535,
+STORE, 140473876344832, 140473876348927,
+STORE, 140473876348928, 140473884737535,
+STORE, 140473834381312, 140473851166719,
+SNULL, 140473834385407, 140473851166719,
+STORE, 140473834381312, 140473834385407,
+STORE, 140473834385408, 140473851166719,
+SNULL, 140473834385408, 140473842774015,
+STORE, 140473842774016, 140473851166719,
+STORE, 140473834385408, 140473842774015,
+SNULL, 140473842778111, 140473851166719,
+STORE, 140473842774016, 140473842778111,
+STORE, 140473842778112, 140473851166719,
+STORE, 140473825988608, 140473834381311,
+SNULL, 140473825992703, 140473834381311,
+STORE, 140473825988608, 140473825992703,
+STORE, 140473825992704, 140473834381311,
+STORE, 140475577475072, 140475577503743,
+STORE, 140475499917312, 140475502108671,
+SNULL, 140475499917312, 140475500007423,
+STORE, 140475500007424, 140475502108671,
+STORE, 140475499917312, 140475500007423,
+SNULL, 140475502100479, 140475502108671,
+STORE, 140475500007424, 140475502100479,
+STORE, 140475502100480, 140475502108671,
+ERASE, 140475502100480, 140475502108671,
+STORE, 140475502100480, 140475502108671,
+SNULL, 140475502104575, 140475502108671,
+STORE, 140475502100480, 140475502104575,
+STORE, 140475502104576, 140475502108671,
+ERASE, 140475577475072, 140475577503743,
+ERASE, 140475235274752, 140475235278847,
+ERASE, 140475235278848, 140475243667455,
+ERASE, 140474815868928, 140474815873023,
+ERASE, 140474815873024, 140474824261631,
+ERASE, 140474606149632, 140474606153727,
+ERASE, 140474606153728, 140474614542335,
+ERASE, 140474270605312, 140474270609407,
+ERASE, 140474270609408, 140474278998015,
+ERASE, 140474438361088, 140474438365183,
+ERASE, 140474438365184, 140474446753791,
+ERASE, 140474597756928, 140474597761023,
+ERASE, 140474597761024, 140474606149631,
+ERASE, 140475126235136, 140475126239231,
+ERASE, 140475126239232, 140475134627839,
+ERASE, 140474463539200, 140474463543295,
+ERASE, 140474463543296, 140474471931903,
+ERASE, 140474388037632, 140474388041727,
+ERASE, 140474388041728, 140474396430335,
+ERASE, 140474404823040, 140474404827135,
+ERASE, 140474404827136, 140474413215743,
+ERASE, 140474278998016, 140474279002111,
+ERASE, 140474279002112, 140474287390719,
+ERASE, 140474094424064, 140474094428159,
+ERASE, 140474094428160, 140474102816767,
+ERASE, 140473867952128, 140473867956223,
+ERASE, 140473867956224, 140473876344831,
+ERASE, 140475151413248, 140475151417343,
+ERASE, 140475151417344, 140475159805951,
+ERASE, 140474455146496, 140474455150591,
+ERASE, 140474455150592, 140474463539199,
+ERASE, 140474807476224, 140474807480319,
+ERASE, 140474807480320, 140474815868927,
+ERASE, 140475117842432, 140475117846527,
+ERASE, 140475117846528, 140475126235135,
+ERASE, 140474446753792, 140474446757887,
+ERASE, 140474446757888, 140474455146495,
+ERASE, 140474429968384, 140474429972479,
+ERASE, 140474429972480, 140474438361087,
+ERASE, 140474782298112, 140474782302207,
+ERASE, 140474782302208, 140474790690815,
+ERASE, 140474136387584, 140474136391679,
+ERASE, 140474136391680, 140474144780287,
+ERASE, 140474002169856, 140474002173951,
+ERASE, 140474002173952, 140474010562559,
+ERASE, 140475134627840, 140475134631935,
+ERASE, 140475134631936, 140475143020543,
+ERASE, 140474471931904, 140474471935999,
+ERASE, 140474471936000, 140474480324607,
+ERASE, 140474396430336, 140474396434431,
+ERASE, 140474396434432, 140474404823039,
+ };
+ static const unsigned long set36[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140723893125120, 140737488351231,
+SNULL, 140723893129215, 140737488351231,
+STORE, 140723893125120, 140723893129215,
+STORE, 140723892994048, 140723893129215,
+STORE, 94076829786112, 94076832038911,
+SNULL, 94076829917183, 94076832038911,
+STORE, 94076829786112, 94076829917183,
+STORE, 94076829917184, 94076832038911,
+ERASE, 94076829917184, 94076832038911,
+STORE, 94076832010240, 94076832018431,
+STORE, 94076832018432, 94076832038911,
+STORE, 140122444345344, 140122446598143,
+SNULL, 140122444488703, 140122446598143,
+STORE, 140122444345344, 140122444488703,
+STORE, 140122444488704, 140122446598143,
+ERASE, 140122444488704, 140122446598143,
+STORE, 140122446585856, 140122446594047,
+STORE, 140122446594048, 140122446598143,
+STORE, 140723893538816, 140723893542911,
+STORE, 140723893526528, 140723893538815,
+STORE, 140122446557184, 140122446585855,
+STORE, 140122446548992, 140122446557183,
+STORE, 140122442129408, 140122444345343,
+SNULL, 140122442129408, 140122442227711,
+STORE, 140122442227712, 140122444345343,
+STORE, 140122442129408, 140122442227711,
+SNULL, 140122444320767, 140122444345343,
+STORE, 140122442227712, 140122444320767,
+STORE, 140122444320768, 140122444345343,
+SNULL, 140122444320768, 140122444328959,
+STORE, 140122444328960, 140122444345343,
+STORE, 140122444320768, 140122444328959,
+ERASE, 140122444320768, 140122444328959,
+STORE, 140122444320768, 140122444328959,
+ERASE, 140122444328960, 140122444345343,
+STORE, 140122444328960, 140122444345343,
+STORE, 140122438332416, 140122442129407,
+SNULL, 140122438332416, 140122439991295,
+STORE, 140122439991296, 140122442129407,
+STORE, 140122438332416, 140122439991295,
+SNULL, 140122442088447, 140122442129407,
+STORE, 140122439991296, 140122442088447,
+STORE, 140122442088448, 140122442129407,
+SNULL, 140122442088448, 140122442113023,
+STORE, 140122442113024, 140122442129407,
+STORE, 140122442088448, 140122442113023,
+ERASE, 140122442088448, 140122442113023,
+STORE, 140122442088448, 140122442113023,
+ERASE, 140122442113024, 140122442129407,
+STORE, 140122442113024, 140122442129407,
+STORE, 140122446540800, 140122446557183,
+SNULL, 140122442104831, 140122442113023,
+STORE, 140122442088448, 140122442104831,
+STORE, 140122442104832, 140122442113023,
+SNULL, 140122444324863, 140122444328959,
+STORE, 140122444320768, 140122444324863,
+STORE, 140122444324864, 140122444328959,
+SNULL, 94076832014335, 94076832018431,
+STORE, 94076832010240, 94076832014335,
+STORE, 94076832014336, 94076832018431,
+SNULL, 140122446589951, 140122446594047,
+STORE, 140122446585856, 140122446589951,
+STORE, 140122446589952, 140122446594047,
+ERASE, 140122446557184, 140122446585855,
+STORE, 94076845723648, 94076845858815,
+STORE, 140122429939712, 140122438332415,
+SNULL, 140122429943807, 140122438332415,
+STORE, 140122429939712, 140122429943807,
+STORE, 140122429943808, 140122438332415,
+STORE, 140122421547008, 140122429939711,
+STORE, 140122287329280, 140122421547007,
+SNULL, 140122287329280, 140122301399039,
+STORE, 140122301399040, 140122421547007,
+STORE, 140122287329280, 140122301399039,
+ERASE, 140122287329280, 140122301399039,
+SNULL, 140122368507903, 140122421547007,
+STORE, 140122301399040, 140122368507903,
+STORE, 140122368507904, 140122421547007,
+ERASE, 140122368507904, 140122421547007,
+SNULL, 140122301534207, 140122368507903,
+STORE, 140122301399040, 140122301534207,
+STORE, 140122301534208, 140122368507903,
+SNULL, 140122421551103, 140122429939711,
+STORE, 140122421547008, 140122421551103,
+STORE, 140122421551104, 140122429939711,
+STORE, 140122413154304, 140122421547007,
+SNULL, 140122413158399, 140122421547007,
+STORE, 140122413154304, 140122413158399,
+STORE, 140122413158400, 140122421547007,
+STORE, 140122404761600, 140122413154303,
+SNULL, 140122404765695, 140122413154303,
+STORE, 140122404761600, 140122404765695,
+STORE, 140122404765696, 140122413154303,
+STORE, 140122396368896, 140122404761599,
+SNULL, 140122396372991, 140122404761599,
+STORE, 140122396368896, 140122396372991,
+STORE, 140122396372992, 140122404761599,
+STORE, 140122387976192, 140122396368895,
+STORE, 140122167181312, 140122301399039,
+SNULL, 140122234290175, 140122301399039,
+STORE, 140122167181312, 140122234290175,
+STORE, 140122234290176, 140122301399039,
+ERASE, 140122234290176, 140122301399039,
+SNULL, 140122167316479, 140122234290175,
+STORE, 140122167181312, 140122167316479,
+STORE, 140122167316480, 140122234290175,
+STORE, 140122379583488, 140122396368895,
+STORE, 140122371190784, 140122396368895,
+STORE, 140122167316480, 140122301399039,
+STORE, 140122158788608, 140122167181311,
+SNULL, 140122371190784, 140122387976191,
+STORE, 140122387976192, 140122396368895,
+STORE, 140122371190784, 140122387976191,
+SNULL, 140122387980287, 140122396368895,
+STORE, 140122387976192, 140122387980287,
+STORE, 140122387980288, 140122396368895,
+SNULL, 140122167316480, 140122234290175,
+STORE, 140122234290176, 140122301399039,
+STORE, 140122167316480, 140122234290175,
+SNULL, 140122234425343, 140122301399039,
+STORE, 140122234290176, 140122234425343,
+STORE, 140122234425344, 140122301399039,
+STORE, 140122024570880, 140122158788607,
+SNULL, 140122024570880, 140122032963583,
+STORE, 140122032963584, 140122158788607,
+STORE, 140122024570880, 140122032963583,
+ERASE, 140122024570880, 140122032963583,
+STORE, 140121898745856, 140122158788607,
+STORE, 140121890353152, 140121898745855,
+SNULL, 140122100072447, 140122158788607,
+STORE, 140121898745856, 140122100072447,
+STORE, 140122100072448, 140122158788607,
+ERASE, 140122100072448, 140122158788607,
+SNULL, 140121965854719, 140122100072447,
+STORE, 140121898745856, 140121965854719,
+STORE, 140121965854720, 140122100072447,
+SNULL, 140121965854720, 140122032963583,
+STORE, 140122032963584, 140122100072447,
+STORE, 140121965854720, 140122032963583,
+ERASE, 140121965854720, 140122032963583,
+SNULL, 140121898881023, 140121965854719,
+STORE, 140121898745856, 140121898881023,
+STORE, 140121898881024, 140121965854719,
+SNULL, 140121890357247, 140121898745855,
+STORE, 140121890353152, 140121890357247,
+STORE, 140121890357248, 140121898745855,
+SNULL, 140122371190784, 140122379583487,
+STORE, 140122379583488, 140122387976191,
+STORE, 140122371190784, 140122379583487,
+SNULL, 140122379587583, 140122387976191,
+STORE, 140122379583488, 140122379587583,
+STORE, 140122379587584, 140122387976191,
+SNULL, 140122033098751, 140122100072447,
+STORE, 140122032963584, 140122033098751,
+STORE, 140122033098752, 140122100072447,
+SNULL, 140122158792703, 140122167181311,
+STORE, 140122158788608, 140122158792703,
+STORE, 140122158792704, 140122167181311,
+STORE, 140122150395904, 140122158788607,
+STORE, 140122142003200, 140122158788607,
+SNULL, 140122142007295, 140122158788607,
+STORE, 140122142003200, 140122142007295,
+STORE, 140122142007296, 140122158788607,
+SNULL, 140122371194879, 140122379583487,
+STORE, 140122371190784, 140122371194879,
+STORE, 140122371194880, 140122379583487,
+SNULL, 140122142007296, 140122150395903,
+STORE, 140122150395904, 140122158788607,
+STORE, 140122142007296, 140122150395903,
+SNULL, 140122150399999, 140122158788607,
+STORE, 140122150395904, 140122150399999,
+STORE, 140122150400000, 140122158788607,
+STORE, 140122133610496, 140122142003199,
+STORE, 140122125217792, 140122142003199,
+STORE, 140122116825088, 140122142003199,
+SNULL, 140122116829183, 140122142003199,
+STORE, 140122116825088, 140122116829183,
+STORE, 140122116829184, 140122142003199,
+SNULL, 140122116829184, 140122133610495,
+STORE, 140122133610496, 140122142003199,
+STORE, 140122116829184, 140122133610495,
+SNULL, 140122133614591, 140122142003199,
+STORE, 140122133610496, 140122133614591,
+STORE, 140122133614592, 140122142003199,
+SNULL, 140122116829184, 140122125217791,
+STORE, 140122125217792, 140122133610495,
+STORE, 140122116829184, 140122125217791,
+SNULL, 140122125221887, 140122133610495,
+STORE, 140122125217792, 140122125221887,
+STORE, 140122125221888, 140122133610495,
+STORE, 140122108432384, 140122116825087,
+SNULL, 140122108436479, 140122116825087,
+STORE, 140122108432384, 140122108436479,
+STORE, 140122108436480, 140122116825087,
+STORE, 140122024570880, 140122032963583,
+STORE, 140122016178176, 140122032963583,
+SNULL, 140122016182271, 140122032963583,
+STORE, 140122016178176, 140122016182271,
+STORE, 140122016182272, 140122032963583,
+SNULL, 140122016182272, 140122024570879,
+STORE, 140122024570880, 140122032963583,
+STORE, 140122016182272, 140122024570879,
+SNULL, 140122024574975, 140122032963583,
+STORE, 140122024570880, 140122024574975,
+STORE, 140122024574976, 140122032963583,
+STORE, 140122007785472, 140122016178175,
+SNULL, 140122007789567, 140122016178175,
+STORE, 140122007785472, 140122007789567,
+STORE, 140122007789568, 140122016178175,
+STORE, 140121999392768, 140122007785471,
+STORE, 140121991000064, 140122007785471,
+SNULL, 140121991004159, 140122007785471,
+STORE, 140121991000064, 140121991004159,
+STORE, 140121991004160, 140122007785471,
+SNULL, 140121991004160, 140121999392767,
+STORE, 140121999392768, 140122007785471,
+STORE, 140121991004160, 140121999392767,
+SNULL, 140121999396863, 140122007785471,
+STORE, 140121999392768, 140121999396863,
+STORE, 140121999396864, 140122007785471,
+STORE, 140121982607360, 140121991000063,
+STORE, 140121823244288, 140121890353151,
+ERASE, 140121823244288, 140121890353151,
+STORE, 140121756135424, 140121890353151,
+SNULL, 140121756135424, 140121764528127,
+STORE, 140121764528128, 140121890353151,
+STORE, 140121756135424, 140121764528127,
+ERASE, 140121756135424, 140121764528127,
+SNULL, 140121831636991, 140121890353151,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121831636992, 140121890353151,
+ERASE, 140121831636992, 140121890353151,
+STORE, 140121974214656, 140121991000063,
+STORE, 140121630310400, 140121831636991,
+SNULL, 140121697419263, 140121831636991,
+STORE, 140121630310400, 140121697419263,
+STORE, 140121697419264, 140121831636991,
+SNULL, 140121697419264, 140121764528127,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121697419264, 140121764528127,
+ERASE, 140121697419264, 140121764528127,
+STORE, 140121881960448, 140121890353151,
+STORE, 140121630310400, 140121831636991,
+STORE, 140121873567744, 140121890353151,
+SNULL, 140121630310400, 140121697419263,
+STORE, 140121697419264, 140121831636991,
+STORE, 140121630310400, 140121697419263,
+SNULL, 140121697554431, 140121831636991,
+STORE, 140121697419264, 140121697554431,
+STORE, 140121697554432, 140121831636991,
+STORE, 140121865175040, 140121890353151,
+STORE, 140121856782336, 140121890353151,
+STORE, 140121848389632, 140121890353151,
+STORE, 140121839996928, 140121890353151,
+STORE, 140121496092672, 140121697419263,
+STORE, 140121487699968, 140121496092671,
+STORE, 140121420591104, 140121487699967,
+STORE, 140121412198400, 140121420591103,
+ERASE, 140121420591104, 140121487699967,
+STORE, 140121479307264, 140121496092671,
+STORE, 140121277980672, 140121412198399,
+SNULL, 140121277980672, 140121294766079,
+STORE, 140121294766080, 140121412198399,
+STORE, 140121277980672, 140121294766079,
+ERASE, 140121277980672, 140121294766079,
+STORE, 140121470914560, 140121496092671,
+STORE, 140121462521856, 140121496092671,
+STORE, 140121160548352, 140121412198399,
+STORE, 140121454129152, 140121496092671,
+SNULL, 140121227657215, 140121412198399,
+STORE, 140121160548352, 140121227657215,
+STORE, 140121227657216, 140121412198399,
+SNULL, 140121227657216, 140121294766079,
+STORE, 140121294766080, 140121412198399,
+STORE, 140121227657216, 140121294766079,
+ERASE, 140121227657216, 140121294766079,
+STORE, 140121445736448, 140121496092671,
+STORE, 140121437343744, 140121496092671,
+SNULL, 140121437343744, 140121445736447,
+STORE, 140121445736448, 140121496092671,
+STORE, 140121437343744, 140121445736447,
+SNULL, 140121445740543, 140121496092671,
+STORE, 140121445736448, 140121445740543,
+STORE, 140121445740544, 140121496092671,
+SNULL, 140121697554432, 140121764528127,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121697554432, 140121764528127,
+SNULL, 140121764663295, 140121831636991,
+STORE, 140121764528128, 140121764663295,
+STORE, 140121764663296, 140121831636991,
+SNULL, 140121496092672, 140121630310399,
+STORE, 140121630310400, 140121697419263,
+STORE, 140121496092672, 140121630310399,
+SNULL, 140121630445567, 140121697419263,
+STORE, 140121630310400, 140121630445567,
+STORE, 140121630445568, 140121697419263,
+SNULL, 140121445740544, 140121454129151,
+STORE, 140121454129152, 140121496092671,
+STORE, 140121445740544, 140121454129151,
+SNULL, 140121454133247, 140121496092671,
+STORE, 140121454129152, 140121454133247,
+STORE, 140121454133248, 140121496092671,
+STORE, 140121026330624, 140121227657215,
+SNULL, 140121093439487, 140121227657215,
+STORE, 140121026330624, 140121093439487,
+STORE, 140121093439488, 140121227657215,
+SNULL, 140121093439488, 140121160548351,
+STORE, 140121160548352, 140121227657215,
+STORE, 140121093439488, 140121160548351,
+ERASE, 140121093439488, 140121160548351,
+SNULL, 140121563201535, 140121630310399,
+STORE, 140121496092672, 140121563201535,
+STORE, 140121563201536, 140121630310399,
+ERASE, 140121563201536, 140121630310399,
+STORE, 140120892112896, 140121093439487,
+SNULL, 140120959221759, 140121093439487,
+STORE, 140120892112896, 140120959221759,
+STORE, 140120959221760, 140121093439487,
+SNULL, 140120959221760, 140121026330623,
+STORE, 140121026330624, 140121093439487,
+STORE, 140120959221760, 140121026330623,
+ERASE, 140120959221760, 140121026330623,
+STORE, 140120757895168, 140120959221759,
+SNULL, 140121361874943, 140121412198399,
+STORE, 140121294766080, 140121361874943,
+STORE, 140121361874944, 140121412198399,
+ERASE, 140121361874944, 140121412198399,
+SNULL, 140121294901247, 140121361874943,
+STORE, 140121294766080, 140121294901247,
+STORE, 140121294901248, 140121361874943,
+STORE, 140120623677440, 140120959221759,
+SNULL, 140120690786303, 140120959221759,
+STORE, 140120623677440, 140120690786303,
+STORE, 140120690786304, 140120959221759,
+SNULL, 140120690786304, 140120757895167,
+STORE, 140120757895168, 140120959221759,
+STORE, 140120690786304, 140120757895167,
+ERASE, 140120690786304, 140120757895167,
+SNULL, 140121160683519, 140121227657215,
+STORE, 140121160548352, 140121160683519,
+STORE, 140121160683520, 140121227657215,
+SNULL, 140121974214656, 140121982607359,
+STORE, 140121982607360, 140121991000063,
+STORE, 140121974214656, 140121982607359,
+SNULL, 140121982611455, 140121991000063,
+STORE, 140121982607360, 140121982611455,
+STORE, 140121982611456, 140121991000063,
+SNULL, 140121839996928, 140121873567743,
+STORE, 140121873567744, 140121890353151,
+STORE, 140121839996928, 140121873567743,
+SNULL, 140121873571839, 140121890353151,
+STORE, 140121873567744, 140121873571839,
+STORE, 140121873571840, 140121890353151,
+SNULL, 140121873571840, 140121881960447,
+STORE, 140121881960448, 140121890353151,
+STORE, 140121873571840, 140121881960447,
+SNULL, 140121881964543, 140121890353151,
+STORE, 140121881960448, 140121881964543,
+STORE, 140121881964544, 140121890353151,
+SNULL, 140121840001023, 140121873567743,
+STORE, 140121839996928, 140121840001023,
+STORE, 140121840001024, 140121873567743,
+SNULL, 140121840001024, 140121865175039,
+STORE, 140121865175040, 140121873567743,
+STORE, 140121840001024, 140121865175039,
+SNULL, 140121865179135, 140121873567743,
+STORE, 140121865175040, 140121865179135,
+STORE, 140121865179136, 140121873567743,
+SNULL, 140121437347839, 140121445736447,
+STORE, 140121437343744, 140121437347839,
+STORE, 140121437347840, 140121445736447,
+STORE, 140121621917696, 140121630310399,
+STORE, 140121613524992, 140121630310399,
+SNULL, 140121026465791, 140121093439487,
+STORE, 140121026330624, 140121026465791,
+STORE, 140121026465792, 140121093439487,
+SNULL, 140121496227839, 140121563201535,
+STORE, 140121496092672, 140121496227839,
+STORE, 140121496227840, 140121563201535,
+SNULL, 140120757895168, 140120892112895,
+STORE, 140120892112896, 140120959221759,
+STORE, 140120757895168, 140120892112895,
+SNULL, 140120892248063, 140120959221759,
+STORE, 140120892112896, 140120892248063,
+STORE, 140120892248064, 140120959221759,
+SNULL, 140120825004031, 140120892112895,
+STORE, 140120757895168, 140120825004031,
+STORE, 140120825004032, 140120892112895,
+ERASE, 140120825004032, 140120892112895,
+SNULL, 140120623812607, 140120690786303,
+STORE, 140120623677440, 140120623812607,
+STORE, 140120623812608, 140120690786303,
+SNULL, 140120758030335, 140120825004031,
+STORE, 140120757895168, 140120758030335,
+STORE, 140120758030336, 140120825004031,
+SNULL, 140121454133248, 140121462521855,
+STORE, 140121462521856, 140121496092671,
+STORE, 140121454133248, 140121462521855,
+SNULL, 140121462525951, 140121496092671,
+STORE, 140121462521856, 140121462525951,
+STORE, 140121462525952, 140121496092671,
+STORE, 140121605132288, 140121630310399,
+SNULL, 140121605136383, 140121630310399,
+STORE, 140121605132288, 140121605136383,
+STORE, 140121605136384, 140121630310399,
+STORE, 140121596739584, 140121605132287,
+SNULL, 140121605136384, 140121621917695,
+STORE, 140121621917696, 140121630310399,
+STORE, 140121605136384, 140121621917695,
+SNULL, 140121621921791, 140121630310399,
+STORE, 140121621917696, 140121621921791,
+STORE, 140121621921792, 140121630310399,
+STORE, 140121588346880, 140121605132287,
+STORE, 140121579954176, 140121605132287,
+SNULL, 140121412202495, 140121420591103,
+STORE, 140121412198400, 140121412202495,
+STORE, 140121412202496, 140121420591103,
+SNULL, 140121974218751, 140121982607359,
+STORE, 140121974214656, 140121974218751,
+STORE, 140121974218752, 140121982607359,
+SNULL, 140121462525952, 140121479307263,
+STORE, 140121479307264, 140121496092671,
+STORE, 140121462525952, 140121479307263,
+SNULL, 140121479311359, 140121496092671,
+STORE, 140121479307264, 140121479311359,
+STORE, 140121479311360, 140121496092671,
+STORE, 140121571561472, 140121605132287,
+SNULL, 140121571565567, 140121605132287,
+STORE, 140121571561472, 140121571565567,
+STORE, 140121571565568, 140121605132287,
+STORE, 140121428951040, 140121437343743,
+SNULL, 140121428955135, 140121437343743,
+STORE, 140121428951040, 140121428955135,
+STORE, 140121428955136, 140121437343743,
+SNULL, 140121840001024, 140121856782335,
+STORE, 140121856782336, 140121865175039,
+STORE, 140121840001024, 140121856782335,
+SNULL, 140121856786431, 140121865175039,
+STORE, 140121856782336, 140121856786431,
+STORE, 140121856786432, 140121865175039,
+STORE, 140121403805696, 140121412198399,
+SNULL, 140121840001024, 140121848389631,
+STORE, 140121848389632, 140121856782335,
+STORE, 140121840001024, 140121848389631,
+SNULL, 140121848393727, 140121856782335,
+STORE, 140121848389632, 140121848393727,
+STORE, 140121848393728, 140121856782335,
+SNULL, 140121479311360, 140121487699967,
+STORE, 140121487699968, 140121496092671,
+STORE, 140121479311360, 140121487699967,
+SNULL, 140121487704063, 140121496092671,
+STORE, 140121487699968, 140121487704063,
+STORE, 140121487704064, 140121496092671,
+STORE, 140121395412992, 140121412198399,
+STORE, 140121387020288, 140121412198399,
+SNULL, 140121387024383, 140121412198399,
+STORE, 140121387020288, 140121387024383,
+STORE, 140121387024384, 140121412198399,
+SNULL, 140121605136384, 140121613524991,
+STORE, 140121613524992, 140121621917695,
+STORE, 140121605136384, 140121613524991,
+SNULL, 140121613529087, 140121621917695,
+STORE, 140121613524992, 140121613529087,
+STORE, 140121613529088, 140121621917695,
+SNULL, 140121462525952, 140121470914559,
+STORE, 140121470914560, 140121479307263,
+STORE, 140121462525952, 140121470914559,
+SNULL, 140121470918655, 140121479307263,
+STORE, 140121470914560, 140121470918655,
+STORE, 140121470918656, 140121479307263,
+STORE, 140121378627584, 140121387020287,
+SNULL, 140121378631679, 140121387020287,
+STORE, 140121378627584, 140121378631679,
+STORE, 140121378631680, 140121387020287,
+SNULL, 140121571565568, 140121596739583,
+STORE, 140121596739584, 140121605132287,
+STORE, 140121571565568, 140121596739583,
+SNULL, 140121596743679, 140121605132287,
+STORE, 140121596739584, 140121596743679,
+STORE, 140121596743680, 140121605132287,
+SNULL, 140121387024384, 140121403805695,
+STORE, 140121403805696, 140121412198399,
+STORE, 140121387024384, 140121403805695,
+SNULL, 140121403809791, 140121412198399,
+STORE, 140121403805696, 140121403809791,
+STORE, 140121403809792, 140121412198399,
+STORE, 140121370234880, 140121378627583,
+SNULL, 140121387024384, 140121395412991,
+STORE, 140121395412992, 140121403805695,
+STORE, 140121387024384, 140121395412991,
+SNULL, 140121395417087, 140121403805695,
+STORE, 140121395412992, 140121395417087,
+STORE, 140121395417088, 140121403805695,
+SNULL, 140121571565568, 140121588346879,
+STORE, 140121588346880, 140121596739583,
+STORE, 140121571565568, 140121588346879,
+SNULL, 140121588350975, 140121596739583,
+STORE, 140121588346880, 140121588350975,
+STORE, 140121588350976, 140121596739583,
+SNULL, 140121571565568, 140121579954175,
+STORE, 140121579954176, 140121588346879,
+STORE, 140121571565568, 140121579954175,
+SNULL, 140121579958271, 140121588346879,
+STORE, 140121579954176, 140121579958271,
+STORE, 140121579958272, 140121588346879,
+STORE, 140121286373376, 140121294766079,
+STORE, 140121277980672, 140121294766079,
+SNULL, 140121277980672, 140121286373375,
+STORE, 140121286373376, 140121294766079,
+STORE, 140121277980672, 140121286373375,
+SNULL, 140121286377471, 140121294766079,
+STORE, 140121286373376, 140121286377471,
+STORE, 140121286377472, 140121294766079,
+STORE, 140121269587968, 140121286373375,
+STORE, 140121261195264, 140121286373375,
+SNULL, 140121261195264, 140121269587967,
+STORE, 140121269587968, 140121286373375,
+STORE, 140121261195264, 140121269587967,
+SNULL, 140121269592063, 140121286373375,
+STORE, 140121269587968, 140121269592063,
+STORE, 140121269592064, 140121286373375,
+STORE, 140121252802560, 140121269587967,
+SNULL, 140121252806655, 140121269587967,
+STORE, 140121252802560, 140121252806655,
+STORE, 140121252806656, 140121269587967,
+STORE, 140121244409856, 140121252802559,
+STORE, 140121236017152, 140121252802559,
+SNULL, 140121236017152, 140121244409855,
+STORE, 140121244409856, 140121252802559,
+STORE, 140121236017152, 140121244409855,
+SNULL, 140121244413951, 140121252802559,
+STORE, 140121244409856, 140121244413951,
+STORE, 140121244413952, 140121252802559,
+SNULL, 140121370238975, 140121378627583,
+STORE, 140121370234880, 140121370238975,
+STORE, 140121370238976, 140121378627583,
+STORE, 140121152155648, 140121160548351,
+STORE, 140121143762944, 140121160548351,
+STORE, 140121135370240, 140121160548351,
+SNULL, 140121135374335, 140121160548351,
+STORE, 140121135370240, 140121135374335,
+STORE, 140121135374336, 140121160548351,
+STORE, 140121126977536, 140121135370239,
+STORE, 140121118584832, 140121135370239,
+STORE, 140121110192128, 140121135370239,
+SNULL, 140121110192128, 140121118584831,
+STORE, 140121118584832, 140121135370239,
+STORE, 140121110192128, 140121118584831,
+SNULL, 140121118588927, 140121135370239,
+STORE, 140121118584832, 140121118588927,
+STORE, 140121118588928, 140121135370239,
+STORE, 140121101799424, 140121118584831,
+STORE, 140121017937920, 140121026330623,
+STORE, 140121009545216, 140121026330623,
+SNULL, 140121009545216, 140121017937919,
+STORE, 140121017937920, 140121026330623,
+STORE, 140121009545216, 140121017937919,
+SNULL, 140121017942015, 140121026330623,
+STORE, 140121017937920, 140121017942015,
+STORE, 140121017942016, 140121026330623,
+SNULL, 140121269592064, 140121277980671,
+STORE, 140121277980672, 140121286373375,
+STORE, 140121269592064, 140121277980671,
+SNULL, 140121277984767, 140121286373375,
+STORE, 140121277980672, 140121277984767,
+STORE, 140121277984768, 140121286373375,
+STORE, 140121001152512, 140121017937919,
+SNULL, 140121252806656, 140121261195263,
+STORE, 140121261195264, 140121269587967,
+STORE, 140121252806656, 140121261195263,
+SNULL, 140121261199359, 140121269587967,
+STORE, 140121261195264, 140121261199359,
+STORE, 140121261199360, 140121269587967,
+SNULL, 140121135374336, 140121152155647,
+STORE, 140121152155648, 140121160548351,
+STORE, 140121135374336, 140121152155647,
+SNULL, 140121152159743, 140121160548351,
+STORE, 140121152155648, 140121152159743,
+STORE, 140121152159744, 140121160548351,
+STORE, 140120992759808, 140121017937919,
+STORE, 140120984367104, 140121017937919,
+STORE, 140120975974400, 140121017937919,
+SNULL, 140121101799424, 140121110192127,
+STORE, 140121110192128, 140121118584831,
+STORE, 140121101799424, 140121110192127,
+SNULL, 140121110196223, 140121118584831,
+STORE, 140121110192128, 140121110196223,
+STORE, 140121110196224, 140121118584831,
+SNULL, 140121118588928, 140121126977535,
+STORE, 140121126977536, 140121135370239,
+STORE, 140121118588928, 140121126977535,
+SNULL, 140121126981631, 140121135370239,
+STORE, 140121126977536, 140121126981631,
+STORE, 140121126981632, 140121135370239,
+STORE, 140120967581696, 140121017937919,
+STORE, 140120883720192, 140120892112895,
+SNULL, 140120883724287, 140120892112895,
+STORE, 140120883720192, 140120883724287,
+STORE, 140120883724288, 140120892112895,
+STORE, 140120875327488, 140120883720191,
+SNULL, 140121101803519, 140121110192127,
+STORE, 140121101799424, 140121101803519,
+STORE, 140121101803520, 140121110192127,
+SNULL, 140121135374336, 140121143762943,
+STORE, 140121143762944, 140121152155647,
+STORE, 140121135374336, 140121143762943,
+SNULL, 140121143767039, 140121152155647,
+STORE, 140121143762944, 140121143767039,
+STORE, 140121143767040, 140121152155647,
+STORE, 140120866934784, 140120883720191,
+SNULL, 140120967581696, 140120984367103,
+STORE, 140120984367104, 140121017937919,
+STORE, 140120967581696, 140120984367103,
+SNULL, 140120984371199, 140121017937919,
+STORE, 140120984367104, 140120984371199,
+STORE, 140120984371200, 140121017937919,
+STORE, 140120858542080, 140120883720191,
+SNULL, 140121236021247, 140121244409855,
+STORE, 140121236017152, 140121236021247,
+STORE, 140121236021248, 140121244409855,
+SNULL, 140120984371200, 140121009545215,
+STORE, 140121009545216, 140121017937919,
+STORE, 140120984371200, 140121009545215,
+SNULL, 140121009549311, 140121017937919,
+STORE, 140121009545216, 140121009549311,
+STORE, 140121009549312, 140121017937919,
+SNULL, 140120984371200, 140120992759807,
+STORE, 140120992759808, 140121009545215,
+STORE, 140120984371200, 140120992759807,
+SNULL, 140120992763903, 140121009545215,
+STORE, 140120992759808, 140120992763903,
+STORE, 140120992763904, 140121009545215,
+SNULL, 140120992763904, 140121001152511,
+STORE, 140121001152512, 140121009545215,
+STORE, 140120992763904, 140121001152511,
+SNULL, 140121001156607, 140121009545215,
+STORE, 140121001152512, 140121001156607,
+STORE, 140121001156608, 140121009545215,
+STORE, 140120850149376, 140120883720191,
+SNULL, 140120850153471, 140120883720191,
+STORE, 140120850149376, 140120850153471,
+STORE, 140120850153472, 140120883720191,
+SNULL, 140120967585791, 140120984367103,
+STORE, 140120967581696, 140120967585791,
+STORE, 140120967585792, 140120984367103,
+SNULL, 140120850153472, 140120866934783,
+STORE, 140120866934784, 140120883720191,
+STORE, 140120850153472, 140120866934783,
+SNULL, 140120866938879, 140120883720191,
+STORE, 140120866934784, 140120866938879,
+STORE, 140120866938880, 140120883720191,
+STORE, 140120841756672, 140120850149375,
+SNULL, 140120967585792, 140120975974399,
+STORE, 140120975974400, 140120984367103,
+STORE, 140120967585792, 140120975974399,
+SNULL, 140120975978495, 140120984367103,
+STORE, 140120975974400, 140120975978495,
+STORE, 140120975978496, 140120984367103,
+SNULL, 140120866938880, 140120875327487,
+STORE, 140120875327488, 140120883720191,
+STORE, 140120866938880, 140120875327487,
+SNULL, 140120875331583, 140120883720191,
+STORE, 140120875327488, 140120875331583,
+STORE, 140120875331584, 140120883720191,
+STORE, 140120833363968, 140120850149375,
+STORE, 140120749502464, 140120757895167,
+STORE, 140120741109760, 140120757895167,
+STORE, 140120732717056, 140120757895167,
+STORE, 140120724324352, 140120757895167,
+SNULL, 140120724324352, 140120732717055,
+STORE, 140120732717056, 140120757895167,
+STORE, 140120724324352, 140120732717055,
+SNULL, 140120732721151, 140120757895167,
+STORE, 140120732717056, 140120732721151,
+STORE, 140120732721152, 140120757895167,
+STORE, 140120715931648, 140120732717055,
+SNULL, 140120715935743, 140120732717055,
+STORE, 140120715931648, 140120715935743,
+STORE, 140120715935744, 140120732717055,
+SNULL, 140120850153472, 140120858542079,
+STORE, 140120858542080, 140120866934783,
+STORE, 140120850153472, 140120858542079,
+SNULL, 140120858546175, 140120866934783,
+STORE, 140120858542080, 140120858546175,
+STORE, 140120858546176, 140120866934783,
+STORE, 140120707538944, 140120715931647,
+SNULL, 140120707543039, 140120715931647,
+STORE, 140120707538944, 140120707543039,
+STORE, 140120707543040, 140120715931647,
+SNULL, 140120833368063, 140120850149375,
+STORE, 140120833363968, 140120833368063,
+STORE, 140120833368064, 140120850149375,
+SNULL, 140120833368064, 140120841756671,
+STORE, 140120841756672, 140120850149375,
+STORE, 140120833368064, 140120841756671,
+SNULL, 140120841760767, 140120850149375,
+STORE, 140120841756672, 140120841760767,
+STORE, 140120841760768, 140120850149375,
+STORE, 140120699146240, 140120707538943,
+SNULL, 140120715935744, 140120724324351,
+STORE, 140120724324352, 140120732717055,
+STORE, 140120715935744, 140120724324351,
+SNULL, 140120724328447, 140120732717055,
+STORE, 140120724324352, 140120724328447,
+STORE, 140120724328448, 140120732717055,
+SNULL, 140120732721152, 140120741109759,
+STORE, 140120741109760, 140120757895167,
+STORE, 140120732721152, 140120741109759,
+SNULL, 140120741113855, 140120757895167,
+STORE, 140120741109760, 140120741113855,
+STORE, 140120741113856, 140120757895167,
+SNULL, 140120741113856, 140120749502463,
+STORE, 140120749502464, 140120757895167,
+STORE, 140120741113856, 140120749502463,
+SNULL, 140120749506559, 140120757895167,
+STORE, 140120749502464, 140120749506559,
+STORE, 140120749506560, 140120757895167,
+SNULL, 140120699150335, 140120707538943,
+STORE, 140120699146240, 140120699150335,
+STORE, 140120699150336, 140120707538943,
+STORE, 140122446557184, 140122446585855,
+STORE, 140122368999424, 140122371190783,
+SNULL, 140122368999424, 140122369089535,
+STORE, 140122369089536, 140122371190783,
+STORE, 140122368999424, 140122369089535,
+SNULL, 140122371182591, 140122371190783,
+STORE, 140122369089536, 140122371182591,
+STORE, 140122371182592, 140122371190783,
+ERASE, 140122371182592, 140122371190783,
+STORE, 140122371182592, 140122371190783,
+SNULL, 140122371186687, 140122371190783,
+STORE, 140122371182592, 140122371186687,
+STORE, 140122371186688, 140122371190783,
+ERASE, 140122446557184, 140122446585855,
+ERASE, 140121445736448, 140121445740543,
+ERASE, 140121445740544, 140121454129151,
+ERASE, 140121621917696, 140121621921791,
+ERASE, 140121621921792, 140121630310399,
+ERASE, 140121579954176, 140121579958271,
+ERASE, 140121579958272, 140121588346879,
+ERASE, 140121261195264, 140121261199359,
+ERASE, 140121261199360, 140121269587967,
+ERASE, 140121454129152, 140121454133247,
+ERASE, 140121454133248, 140121462521855,
+ERASE, 140121588346880, 140121588350975,
+ERASE, 140121588350976, 140121596739583,
+ERASE, 140121135370240, 140121135374335,
+ERASE, 140121135374336, 140121143762943,
+ERASE, 140121881960448, 140121881964543,
+ERASE, 140121881964544, 140121890353151,
+ERASE, 140121428951040, 140121428955135,
+ERASE, 140121428955136, 140121437343743,
+ERASE, 140121387020288, 140121387024383,
+ERASE, 140121387024384, 140121395412991,
+ERASE, 140121487699968, 140121487704063,
+ERASE, 140121487704064, 140121496092671,
+ERASE, 140121437343744, 140121437347839,
+ERASE, 140121437347840, 140121445736447,
+ERASE, 140121613524992, 140121613529087,
+ERASE, 140121613529088, 140121621917695,
+ERASE, 140121856782336, 140121856786431,
+ERASE, 140121856786432, 140121865175039,
+ERASE, 140121252802560, 140121252806655,
+ERASE, 140121252806656, 140121261195263,
+ERASE, 140121839996928, 140121840001023,
+ERASE, 140121840001024, 140121848389631,
+ERASE, 140121596739584, 140121596743679,
+ERASE, 140121596743680, 140121605132287,
+ERASE, 140121009545216, 140121009549311,
+ERASE, 140121009549312, 140121017937919,
+ERASE, 140120724324352, 140120724328447,
+ERASE, 140120724328448, 140120732717055,
+ERASE, 140120883720192, 140120883724287,
+ERASE, 140120883724288, 140120892112895,
+ERASE, 140121982607360, 140121982611455,
+ERASE, 140121982611456, 140121991000063,
+ERASE, 140121571561472, 140121571565567,
+ERASE, 140121571565568, 140121579954175,
+ERASE, 140121286373376, 140121286377471,
+ERASE, 140121286377472, 140121294766079,
+ERASE, 140120875327488, 140120875331583,
+ERASE, 140120875331584, 140120883720191,
+ERASE, 140121848389632, 140121848393727,
+ERASE, 140121848393728, 140121856782335,
+ERASE, 140121370234880, 140121370238975,
+ERASE, 140121370238976, 140121378627583,
+ERASE, 140121143762944, 140121143767039,
+ERASE, 140121143767040, 140121152155647,
+ERASE, 140121118584832, 140121118588927,
+ERASE, 140121118588928, 140121126977535,
+ERASE, 140120866934784, 140120866938879,
+ERASE, 140120866938880, 140120875327487,
+ERASE, 140120741109760, 140120741113855,
+ERASE, 140120741113856, 140120749502463,
+ERASE, 140121865175040, 140121865179135,
+ERASE, 140121865179136, 140121873567743,
+ERASE, 140121403805696, 140121403809791,
+ERASE, 140121403809792, 140121412198399,
+ERASE, 140121236017152, 140121236021247,
+ERASE, 140121236021248, 140121244409855,
+ERASE, 140120732717056, 140120732721151,
+ERASE, 140120732721152, 140120741109759,
+ERASE, 140121017937920, 140121017942015,
+ERASE, 140121017942016, 140121026330623,
+ERASE, 140121873567744, 140121873571839,
+ERASE, 140121873571840, 140121881960447,
+ERASE, 140121470914560, 140121470918655,
+ERASE, 140121470918656, 140121479307263,
+ERASE, 140121126977536, 140121126981631,
+ERASE, 140121126981632, 140121135370239,
+ERASE, 140120850149376, 140120850153471,
+ERASE, 140120850153472, 140120858542079,
+ERASE, 140120707538944, 140120707543039,
+ERASE, 140120707543040, 140120715931647,
+ERASE, 140121479307264, 140121479311359,
+ERASE, 140121479311360, 140121487699967,
+ERASE, 140120967581696, 140120967585791,
+ERASE, 140120967585792, 140120975974399,
+ERASE, 140120841756672, 140120841760767,
+ERASE, 140120841760768, 140120850149375,
+ERASE, 140121412198400, 140121412202495,
+ERASE, 140121412202496, 140121420591103,
+ERASE, 140122158788608, 140122158792703,
+ERASE, 140122158792704, 140122167181311,
+ERASE, 140122142003200, 140122142007295,
+ERASE, 140122142007296, 140122150395903,
+ERASE, 140121101799424, 140121101803519,
+ERASE, 140121101803520, 140121110192127,
+ERASE, 140120858542080, 140120858546175,
+ERASE, 140120858546176, 140120866934783,
+ERASE, 140120833363968, 140120833368063,
+ERASE, 140120833368064, 140120841756671,
+ERASE, 140121277980672, 140121277984767,
+ERASE, 140121277984768, 140121286373375,
+ERASE, 140121001152512, 140121001156607,
+ERASE, 140121001156608, 140121009545215,
+ERASE, 140120749502464, 140120749506559,
+ERASE, 140120749506560, 140120757895167,
+ERASE, 140121605132288, 140121605136383,
+ERASE, 140121605136384, 140121613524991,
+ERASE, 140121378627584, 140121378631679,
+ERASE, 140121378631680, 140121387020287,
+ERASE, 140121110192128, 140121110196223,
+ERASE, 140121110196224, 140121118584831,
+ERASE, 140121462521856, 140121462525951,
+ERASE, 140121462525952, 140121470914559,
+ERASE, 140121395412992, 140121395417087,
+ERASE, 140121395417088, 140121403805695,
+ERASE, 140121152155648, 140121152159743,
+ERASE, 140121152159744, 140121160548351,
+ERASE, 140120992759808, 140120992763903,
+ERASE, 140120992763904, 140121001152511,
+ERASE, 140122387976192, 140122387980287,
+ERASE, 140122387980288, 140122396368895,
+ERASE, 140121890353152, 140121890357247,
+ERASE, 140121890357248, 140121898745855,
+ERASE, 140121269587968, 140121269592063,
+ERASE, 140121269592064, 140121277980671,
+ };
+ static const unsigned long set37[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722404016128, 140737488351231,
+SNULL, 140722404020223, 140737488351231,
+STORE, 140722404016128, 140722404020223,
+STORE, 140722403885056, 140722404020223,
+STORE, 94637010001920, 94637012254719,
+SNULL, 94637010132991, 94637012254719,
+STORE, 94637010001920, 94637010132991,
+STORE, 94637010132992, 94637012254719,
+ERASE, 94637010132992, 94637012254719,
+STORE, 94637012226048, 94637012234239,
+STORE, 94637012234240, 94637012254719,
+STORE, 139760240594944, 139760242847743,
+SNULL, 139760240738303, 139760242847743,
+STORE, 139760240594944, 139760240738303,
+STORE, 139760240738304, 139760242847743,
+ERASE, 139760240738304, 139760242847743,
+STORE, 139760242835456, 139760242843647,
+STORE, 139760242843648, 139760242847743,
+STORE, 140722405232640, 140722405236735,
+STORE, 140722405220352, 140722405232639,
+STORE, 139760242806784, 139760242835455,
+STORE, 139760242798592, 139760242806783,
+STORE, 139760238379008, 139760240594943,
+SNULL, 139760238379008, 139760238477311,
+STORE, 139760238477312, 139760240594943,
+STORE, 139760238379008, 139760238477311,
+SNULL, 139760240570367, 139760240594943,
+STORE, 139760238477312, 139760240570367,
+STORE, 139760240570368, 139760240594943,
+SNULL, 139760240570368, 139760240578559,
+STORE, 139760240578560, 139760240594943,
+STORE, 139760240570368, 139760240578559,
+ERASE, 139760240570368, 139760240578559,
+STORE, 139760240570368, 139760240578559,
+ERASE, 139760240578560, 139760240594943,
+STORE, 139760240578560, 139760240594943,
+STORE, 139760234582016, 139760238379007,
+SNULL, 139760234582016, 139760236240895,
+STORE, 139760236240896, 139760238379007,
+STORE, 139760234582016, 139760236240895,
+SNULL, 139760238338047, 139760238379007,
+STORE, 139760236240896, 139760238338047,
+STORE, 139760238338048, 139760238379007,
+SNULL, 139760238338048, 139760238362623,
+STORE, 139760238362624, 139760238379007,
+STORE, 139760238338048, 139760238362623,
+ERASE, 139760238338048, 139760238362623,
+STORE, 139760238338048, 139760238362623,
+ERASE, 139760238362624, 139760238379007,
+STORE, 139760238362624, 139760238379007,
+STORE, 139760242790400, 139760242806783,
+SNULL, 139760238354431, 139760238362623,
+STORE, 139760238338048, 139760238354431,
+STORE, 139760238354432, 139760238362623,
+SNULL, 139760240574463, 139760240578559,
+STORE, 139760240570368, 139760240574463,
+STORE, 139760240574464, 139760240578559,
+SNULL, 94637012230143, 94637012234239,
+STORE, 94637012226048, 94637012230143,
+STORE, 94637012230144, 94637012234239,
+SNULL, 139760242839551, 139760242843647,
+STORE, 139760242835456, 139760242839551,
+STORE, 139760242839552, 139760242843647,
+ERASE, 139760242806784, 139760242835455,
+STORE, 94637033324544, 94637033459711,
+STORE, 139760226189312, 139760234582015,
+SNULL, 139760226193407, 139760234582015,
+STORE, 139760226189312, 139760226193407,
+STORE, 139760226193408, 139760234582015,
+STORE, 139760217796608, 139760226189311,
+STORE, 139760083578880, 139760217796607,
+SNULL, 139760083578880, 139760114860031,
+STORE, 139760114860032, 139760217796607,
+STORE, 139760083578880, 139760114860031,
+ERASE, 139760083578880, 139760114860031,
+SNULL, 139760181968895, 139760217796607,
+STORE, 139760114860032, 139760181968895,
+STORE, 139760181968896, 139760217796607,
+ERASE, 139760181968896, 139760217796607,
+SNULL, 139760114995199, 139760181968895,
+STORE, 139760114860032, 139760114995199,
+STORE, 139760114995200, 139760181968895,
+SNULL, 139760217800703, 139760226189311,
+STORE, 139760217796608, 139760217800703,
+STORE, 139760217800704, 139760226189311,
+STORE, 139760209403904, 139760217796607,
+SNULL, 139760209407999, 139760217796607,
+STORE, 139760209403904, 139760209407999,
+STORE, 139760209408000, 139760217796607,
+STORE, 139760201011200, 139760209403903,
+SNULL, 139760201015295, 139760209403903,
+STORE, 139760201011200, 139760201015295,
+STORE, 139760201015296, 139760209403903,
+STORE, 139760192618496, 139760201011199,
+SNULL, 139760192622591, 139760201011199,
+STORE, 139760192618496, 139760192622591,
+STORE, 139760192622592, 139760201011199,
+STORE, 139760184225792, 139760192618495,
+STORE, 139759980642304, 139760114860031,
+STORE, 139759972249600, 139759980642303,
+STORE, 139759963856896, 139759980642303,
+STORE, 139759955464192, 139759980642303,
+STORE, 139759888355328, 139759955464191,
+SNULL, 139760047751167, 139760114860031,
+STORE, 139759980642304, 139760047751167,
+STORE, 139760047751168, 139760114860031,
+ERASE, 139760047751168, 139760114860031,
+SNULL, 139759980777471, 139760047751167,
+STORE, 139759980642304, 139759980777471,
+STORE, 139759980777472, 139760047751167,
+STORE, 139759980777472, 139760114860031,
+SNULL, 139759980777472, 139760047751167,
+STORE, 139760047751168, 139760114860031,
+STORE, 139759980777472, 139760047751167,
+SNULL, 139760047886335, 139760114860031,
+STORE, 139760047751168, 139760047886335,
+STORE, 139760047886336, 139760114860031,
+STORE, 139759821246464, 139759955464191,
+SNULL, 139759821246464, 139759888355327,
+STORE, 139759888355328, 139759955464191,
+STORE, 139759821246464, 139759888355327,
+ERASE, 139759821246464, 139759888355327,
+ERASE, 139759888355328, 139759955464191,
+ };
+ static const unsigned long set38[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730666221568, 140737488351231,
+SNULL, 140730666225663, 140737488351231,
+STORE, 140730666221568, 140730666225663,
+STORE, 140730666090496, 140730666225663,
+STORE, 94177584803840, 94177587056639,
+SNULL, 94177584934911, 94177587056639,
+STORE, 94177584803840, 94177584934911,
+STORE, 94177584934912, 94177587056639,
+ERASE, 94177584934912, 94177587056639,
+STORE, 94177587027968, 94177587036159,
+STORE, 94177587036160, 94177587056639,
+STORE, 140614382714880, 140614384967679,
+SNULL, 140614382858239, 140614384967679,
+STORE, 140614382714880, 140614382858239,
+STORE, 140614382858240, 140614384967679,
+ERASE, 140614382858240, 140614384967679,
+STORE, 140614384955392, 140614384963583,
+STORE, 140614384963584, 140614384967679,
+STORE, 140730666315776, 140730666319871,
+STORE, 140730666303488, 140730666315775,
+STORE, 140614384926720, 140614384955391,
+STORE, 140614384918528, 140614384926719,
+STORE, 140614380498944, 140614382714879,
+SNULL, 140614380498944, 140614380597247,
+STORE, 140614380597248, 140614382714879,
+STORE, 140614380498944, 140614380597247,
+SNULL, 140614382690303, 140614382714879,
+STORE, 140614380597248, 140614382690303,
+STORE, 140614382690304, 140614382714879,
+SNULL, 140614382690304, 140614382698495,
+STORE, 140614382698496, 140614382714879,
+STORE, 140614382690304, 140614382698495,
+ERASE, 140614382690304, 140614382698495,
+STORE, 140614382690304, 140614382698495,
+ERASE, 140614382698496, 140614382714879,
+STORE, 140614382698496, 140614382714879,
+STORE, 140614376701952, 140614380498943,
+SNULL, 140614376701952, 140614378360831,
+STORE, 140614378360832, 140614380498943,
+STORE, 140614376701952, 140614378360831,
+SNULL, 140614380457983, 140614380498943,
+STORE, 140614378360832, 140614380457983,
+STORE, 140614380457984, 140614380498943,
+SNULL, 140614380457984, 140614380482559,
+STORE, 140614380482560, 140614380498943,
+STORE, 140614380457984, 140614380482559,
+ERASE, 140614380457984, 140614380482559,
+STORE, 140614380457984, 140614380482559,
+ERASE, 140614380482560, 140614380498943,
+STORE, 140614380482560, 140614380498943,
+STORE, 140614384910336, 140614384926719,
+SNULL, 140614380474367, 140614380482559,
+STORE, 140614380457984, 140614380474367,
+STORE, 140614380474368, 140614380482559,
+SNULL, 140614382694399, 140614382698495,
+STORE, 140614382690304, 140614382694399,
+STORE, 140614382694400, 140614382698495,
+SNULL, 94177587032063, 94177587036159,
+STORE, 94177587027968, 94177587032063,
+STORE, 94177587032064, 94177587036159,
+SNULL, 140614384959487, 140614384963583,
+STORE, 140614384955392, 140614384959487,
+STORE, 140614384959488, 140614384963583,
+ERASE, 140614384926720, 140614384955391,
+STORE, 94177619791872, 94177619927039,
+STORE, 140614368309248, 140614376701951,
+SNULL, 140614368313343, 140614376701951,
+STORE, 140614368309248, 140614368313343,
+STORE, 140614368313344, 140614376701951,
+STORE, 140614359916544, 140614368309247,
+STORE, 140614225698816, 140614359916543,
+SNULL, 140614225698816, 140614276481023,
+STORE, 140614276481024, 140614359916543,
+STORE, 140614225698816, 140614276481023,
+ERASE, 140614225698816, 140614276481023,
+SNULL, 140614343589887, 140614359916543,
+STORE, 140614276481024, 140614343589887,
+STORE, 140614343589888, 140614359916543,
+ERASE, 140614343589888, 140614359916543,
+SNULL, 140614276616191, 140614343589887,
+STORE, 140614276481024, 140614276616191,
+STORE, 140614276616192, 140614343589887,
+SNULL, 140614359920639, 140614368309247,
+STORE, 140614359916544, 140614359920639,
+STORE, 140614359920640, 140614368309247,
+STORE, 140614351523840, 140614359916543,
+SNULL, 140614351527935, 140614359916543,
+STORE, 140614351523840, 140614351527935,
+STORE, 140614351527936, 140614359916543,
+STORE, 140614268088320, 140614276481023,
+SNULL, 140614268092415, 140614276481023,
+STORE, 140614268088320, 140614268092415,
+STORE, 140614268092416, 140614276481023,
+STORE, 140614259695616, 140614268088319,
+SNULL, 140614259699711, 140614268088319,
+STORE, 140614259695616, 140614259699711,
+STORE, 140614259699712, 140614268088319,
+STORE, 140614251302912, 140614259695615,
+STORE, 140614242910208, 140614259695615,
+STORE, 140614108692480, 140614242910207,
+SNULL, 140614108692480, 140614142263295,
+STORE, 140614142263296, 140614242910207,
+STORE, 140614108692480, 140614142263295,
+ERASE, 140614108692480, 140614142263295,
+STORE, 140614133870592, 140614142263295,
+STORE, 140613999652864, 140614133870591,
+SNULL, 140613999652864, 140614008045567,
+STORE, 140614008045568, 140614133870591,
+STORE, 140613999652864, 140614008045567,
+ERASE, 140613999652864, 140614008045567,
+STORE, 140613999652864, 140614008045567,
+STORE, 140613865435136, 140613999652863,
+SNULL, 140613865435136, 140613873827839,
+STORE, 140613873827840, 140613999652863,
+STORE, 140613865435136, 140613873827839,
+ERASE, 140613865435136, 140613873827839,
+SNULL, 140614209372159, 140614242910207,
+STORE, 140614142263296, 140614209372159,
+STORE, 140614209372160, 140614242910207,
+ERASE, 140614209372160, 140614242910207,
+SNULL, 140614142398463, 140614209372159,
+STORE, 140614142263296, 140614142398463,
+STORE, 140614142398464, 140614209372159,
+SNULL, 140614075154431, 140614133870591,
+STORE, 140614008045568, 140614075154431,
+STORE, 140614075154432, 140614133870591,
+ERASE, 140614075154432, 140614133870591,
+SNULL, 140614008180735, 140614075154431,
+STORE, 140614008045568, 140614008180735,
+STORE, 140614008180736, 140614075154431,
+SNULL, 140613940936703, 140613999652863,
+STORE, 140613873827840, 140613940936703,
+STORE, 140613940936704, 140613999652863,
+ERASE, 140613940936704, 140613999652863,
+SNULL, 140614242914303, 140614259695615,
+STORE, 140614242910208, 140614242914303,
+STORE, 140614242914304, 140614259695615,
+STORE, 140613739610112, 140613940936703,
+STORE, 140614234517504, 140614242910207,
+SNULL, 140614242914304, 140614251302911,
+STORE, 140614251302912, 140614259695615,
+STORE, 140614242914304, 140614251302911,
+SNULL, 140614251307007, 140614259695615,
+STORE, 140614251302912, 140614251307007,
+STORE, 140614251307008, 140614259695615,
+SNULL, 140613739610112, 140613873827839,
+STORE, 140613873827840, 140613940936703,
+STORE, 140613739610112, 140613873827839,
+SNULL, 140613873963007, 140613940936703,
+STORE, 140613873827840, 140613873963007,
+STORE, 140613873963008, 140613940936703,
+SNULL, 140614133874687, 140614142263295,
+STORE, 140614133870592, 140614133874687,
+STORE, 140614133874688, 140614142263295,
+SNULL, 140613806718975, 140613873827839,
+STORE, 140613739610112, 140613806718975,
+STORE, 140613806718976, 140613873827839,
+ERASE, 140613806718976, 140613873827839,
+STORE, 140614226124800, 140614242910207,
+SNULL, 140613739745279, 140613806718975,
+STORE, 140613739610112, 140613739745279,
+STORE, 140613739745280, 140613806718975,
+SNULL, 140613999656959, 140614008045567,
+STORE, 140613999652864, 140613999656959,
+STORE, 140613999656960, 140614008045567,
+SNULL, 140614226124800, 140614234517503,
+STORE, 140614234517504, 140614242910207,
+STORE, 140614226124800, 140614234517503,
+SNULL, 140614234521599, 140614242910207,
+STORE, 140614234517504, 140614234521599,
+STORE, 140614234521600, 140614242910207,
+STORE, 140614217732096, 140614234517503,
+STORE, 140614125477888, 140614133870591,
+SNULL, 140614125481983, 140614133870591,
+STORE, 140614125477888, 140614125481983,
+STORE, 140614125481984, 140614133870591,
+STORE, 140614117085184, 140614125477887,
+SNULL, 140614217736191, 140614234517503,
+STORE, 140614217732096, 140614217736191,
+STORE, 140614217736192, 140614234517503,
+SNULL, 140614117089279, 140614125477887,
+STORE, 140614117085184, 140614117089279,
+STORE, 140614117089280, 140614125477887,
+SNULL, 140614217736192, 140614226124799,
+STORE, 140614226124800, 140614234517503,
+STORE, 140614217736192, 140614226124799,
+SNULL, 140614226128895, 140614234517503,
+STORE, 140614226124800, 140614226128895,
+STORE, 140614226128896, 140614234517503,
+STORE, 140614108692480, 140614117085183,
+STORE, 140614100299776, 140614117085183,
+STORE, 140614091907072, 140614117085183,
+SNULL, 140614091907072, 140614108692479,
+STORE, 140614108692480, 140614117085183,
+STORE, 140614091907072, 140614108692479,
+SNULL, 140614108696575, 140614117085183,
+STORE, 140614108692480, 140614108696575,
+STORE, 140614108696576, 140614117085183,
+SNULL, 140614091907072, 140614100299775,
+STORE, 140614100299776, 140614108692479,
+STORE, 140614091907072, 140614100299775,
+SNULL, 140614100303871, 140614108692479,
+STORE, 140614100299776, 140614100303871,
+STORE, 140614100303872, 140614108692479,
+STORE, 140614083514368, 140614100299775,
+SNULL, 140614083518463, 140614100299775,
+STORE, 140614083514368, 140614083518463,
+STORE, 140614083518464, 140614100299775,
+STORE, 140613991260160, 140613999652863,
+SNULL, 140614083518464, 140614091907071,
+STORE, 140614091907072, 140614100299775,
+STORE, 140614083518464, 140614091907071,
+SNULL, 140614091911167, 140614100299775,
+STORE, 140614091907072, 140614091911167,
+STORE, 140614091911168, 140614100299775,
+SNULL, 140613991264255, 140613999652863,
+STORE, 140613991260160, 140613991264255,
+STORE, 140613991264256, 140613999652863,
+STORE, 140613982867456, 140613991260159,
+SNULL, 140613982871551, 140613991260159,
+STORE, 140613982867456, 140613982871551,
+STORE, 140613982871552, 140613991260159,
+STORE, 140613974474752, 140613982867455,
+SNULL, 140613974478847, 140613982867455,
+STORE, 140613974474752, 140613974478847,
+STORE, 140613974478848, 140613982867455,
+STORE, 140613966082048, 140613974474751,
+STORE, 140613739745280, 140613873827839,
+SNULL, 140613739745280, 140613806718975,
+STORE, 140613806718976, 140613873827839,
+STORE, 140613739745280, 140613806718975,
+SNULL, 140613806854143, 140613873827839,
+STORE, 140613806718976, 140613806854143,
+STORE, 140613806854144, 140613873827839,
+SNULL, 140613966086143, 140613974474751,
+STORE, 140613966082048, 140613966086143,
+STORE, 140613966086144, 140613974474751,
+STORE, 140613957689344, 140613966082047,
+STORE, 140613605392384, 140613739610111,
+STORE, 140613949296640, 140613966082047,
+STORE, 140613596999680, 140613605392383,
+STORE, 140613529890816, 140613596999679,
+STORE, 140613521498112, 140613529890815,
+STORE, 140613513105408, 140613529890815,
+STORE, 140613378887680, 140613513105407,
+SNULL, 140613378887680, 140613404065791,
+STORE, 140613404065792, 140613513105407,
+STORE, 140613378887680, 140613404065791,
+ERASE, 140613378887680, 140613404065791,
+STORE, 140613395673088, 140613404065791,
+STORE, 140613261455360, 140613395673087,
+SNULL, 140613261455360, 140613269848063,
+STORE, 140613269848064, 140613395673087,
+STORE, 140613261455360, 140613269848063,
+ERASE, 140613261455360, 140613269848063,
+STORE, 140613261455360, 140613269848063,
+STORE, 140613253062656, 140613269848063,
+STORE, 140613118844928, 140613253062655,
+STORE, 140613110452224, 140613118844927,
+SNULL, 140613118844928, 140613135630335,
+STORE, 140613135630336, 140613253062655,
+STORE, 140613118844928, 140613135630335,
+ERASE, 140613118844928, 140613135630335,
+STORE, 140613127237632, 140613135630335,
+STORE, 140613110452224, 140613135630335,
+STORE, 140612976234496, 140613110452223,
+STORE, 140612967841792, 140612976234495,
+STORE, 140612833624064, 140612967841791,
+STORE, 140612825231360, 140612833624063,
+STORE, 140612816838656, 140612833624063,
+STORE, 140612682620928, 140612816838655,
+STORE, 140612674228224, 140612682620927,
+SNULL, 140612682620928, 140612732977151,
+STORE, 140612732977152, 140612816838655,
+STORE, 140612682620928, 140612732977151,
+ERASE, 140612682620928, 140612732977151,
+SNULL, 140613672501247, 140613739610111,
+STORE, 140613605392384, 140613672501247,
+STORE, 140613672501248, 140613739610111,
+ERASE, 140613672501248, 140613739610111,
+SNULL, 140613605527551, 140613672501247,
+STORE, 140613605392384, 140613605527551,
+STORE, 140613605527552, 140613672501247,
+ERASE, 140613529890816, 140613596999679,
+STORE, 140612540010496, 140612674228223,
+SNULL, 140612540010496, 140612598759423,
+STORE, 140612598759424, 140612674228223,
+STORE, 140612540010496, 140612598759423,
+ERASE, 140612540010496, 140612598759423,
+SNULL, 140613471174655, 140613513105407,
+STORE, 140613404065792, 140613471174655,
+STORE, 140613471174656, 140613513105407,
+ERASE, 140613471174656, 140613513105407,
+SNULL, 140613404200959, 140613471174655,
+STORE, 140613404065792, 140613404200959,
+STORE, 140613404200960, 140613471174655,
+SNULL, 140613336956927, 140613395673087,
+STORE, 140613269848064, 140613336956927,
+STORE, 140613336956928, 140613395673087,
+ERASE, 140613336956928, 140613395673087,
+SNULL, 140612833624064, 140612867194879,
+STORE, 140612867194880, 140612967841791,
+STORE, 140612833624064, 140612867194879,
+ERASE, 140612833624064, 140612867194879,
+SNULL, 140612976234496, 140613001412607,
+STORE, 140613001412608, 140613110452223,
+STORE, 140612976234496, 140613001412607,
+ERASE, 140612976234496, 140613001412607,
+SNULL, 140613202739199, 140613253062655,
+STORE, 140613135630336, 140613202739199,
+STORE, 140613202739200, 140613253062655,
+ERASE, 140613202739200, 140613253062655,
+SNULL, 140613135765503, 140613202739199,
+STORE, 140613135630336, 140613135765503,
+STORE, 140613135765504, 140613202739199,
+SNULL, 140612816842751, 140612833624063,
+STORE, 140612816838656, 140612816842751,
+STORE, 140612816842752, 140612833624063,
+SNULL, 140613110456319, 140613135630335,
+STORE, 140613110452224, 140613110456319,
+STORE, 140613110456320, 140613135630335,
+SNULL, 140613949300735, 140613966082047,
+STORE, 140613949296640, 140613949300735,
+STORE, 140613949300736, 140613966082047,
+SNULL, 140613110456320, 140613118844927,
+STORE, 140613118844928, 140613135630335,
+STORE, 140613110456320, 140613118844927,
+SNULL, 140613118849023, 140613135630335,
+STORE, 140613118844928, 140613118849023,
+STORE, 140613118849024, 140613135630335,
+SNULL, 140612800086015, 140612816838655,
+STORE, 140612732977152, 140612800086015,
+STORE, 140612800086016, 140612816838655,
+ERASE, 140612800086016, 140612816838655,
+SNULL, 140613253062656, 140613261455359,
+STORE, 140613261455360, 140613269848063,
+STORE, 140613253062656, 140613261455359,
+SNULL, 140613261459455, 140613269848063,
+STORE, 140613261455360, 140613261459455,
+STORE, 140613261459456, 140613269848063,
+SNULL, 140612674232319, 140612682620927,
+STORE, 140612674228224, 140612674232319,
+STORE, 140612674232320, 140612682620927,
+STORE, 140613731217408, 140613739610111,
+STORE, 140613722824704, 140613739610111,
+SNULL, 140613949300736, 140613957689343,
+STORE, 140613957689344, 140613966082047,
+STORE, 140613949300736, 140613957689343,
+SNULL, 140613957693439, 140613966082047,
+STORE, 140613957689344, 140613957693439,
+STORE, 140613957693440, 140613966082047,
+STORE, 140612464541696, 140612674228223,
+SNULL, 140612531650559, 140612674228223,
+STORE, 140612464541696, 140612531650559,
+STORE, 140612531650560, 140612674228223,
+SNULL, 140612531650560, 140612598759423,
+STORE, 140612598759424, 140612674228223,
+STORE, 140612531650560, 140612598759423,
+ERASE, 140612531650560, 140612598759423,
+SNULL, 140612665868287, 140612674228223,
+STORE, 140612598759424, 140612665868287,
+STORE, 140612665868288, 140612674228223,
+ERASE, 140612665868288, 140612674228223,
+SNULL, 140613269983231, 140613336956927,
+STORE, 140613269848064, 140613269983231,
+STORE, 140613269983232, 140613336956927,
+SNULL, 140612934303743, 140612967841791,
+STORE, 140612867194880, 140612934303743,
+STORE, 140612934303744, 140612967841791,
+ERASE, 140612934303744, 140612967841791,
+SNULL, 140613068521471, 140613110452223,
+STORE, 140613001412608, 140613068521471,
+STORE, 140613068521472, 140613110452223,
+ERASE, 140613068521472, 140613110452223,
+STORE, 140613714432000, 140613739610111,
+SNULL, 140613001547775, 140613068521471,
+STORE, 140613001412608, 140613001547775,
+STORE, 140613001547776, 140613068521471,
+SNULL, 140612733112319, 140612800086015,
+STORE, 140612732977152, 140612733112319,
+STORE, 140612733112320, 140612800086015,
+SNULL, 140613513109503, 140613529890815,
+STORE, 140613513105408, 140613513109503,
+STORE, 140613513109504, 140613529890815,
+STORE, 140613706039296, 140613739610111,
+STORE, 140613697646592, 140613739610111,
+STORE, 140613689253888, 140613739610111,
+SNULL, 140613689257983, 140613739610111,
+STORE, 140613689253888, 140613689257983,
+STORE, 140613689257984, 140613739610111,
+SNULL, 140613253066751, 140613261455359,
+STORE, 140613253062656, 140613253066751,
+STORE, 140613253066752, 140613261455359,
+STORE, 140613680861184, 140613689253887,
+STORE, 140613588606976, 140613605392383,
+SNULL, 140613689257984, 140613731217407,
+STORE, 140613731217408, 140613739610111,
+STORE, 140613689257984, 140613731217407,
+SNULL, 140613731221503, 140613739610111,
+STORE, 140613731217408, 140613731221503,
+STORE, 140613731221504, 140613739610111,
+STORE, 140613580214272, 140613605392383,
+SNULL, 140612464676863, 140612531650559,
+STORE, 140612464541696, 140612464676863,
+STORE, 140612464676864, 140612531650559,
+SNULL, 140612598894591, 140612665868287,
+STORE, 140612598759424, 140612598894591,
+STORE, 140612598894592, 140612665868287,
+SNULL, 140612867330047, 140612934303743,
+STORE, 140612867194880, 140612867330047,
+STORE, 140612867330048, 140612934303743,
+STORE, 140613571821568, 140613605392383,
+SNULL, 140613571825663, 140613605392383,
+STORE, 140613571821568, 140613571825663,
+STORE, 140613571825664, 140613605392383,
+SNULL, 140613689257984, 140613722824703,
+STORE, 140613722824704, 140613731217407,
+STORE, 140613689257984, 140613722824703,
+SNULL, 140613722828799, 140613731217407,
+STORE, 140613722824704, 140613722828799,
+STORE, 140613722828800, 140613731217407,
+SNULL, 140613689257984, 140613714431999,
+STORE, 140613714432000, 140613722824703,
+STORE, 140613689257984, 140613714431999,
+SNULL, 140613714436095, 140613722824703,
+STORE, 140613714432000, 140613714436095,
+STORE, 140613714436096, 140613722824703,
+SNULL, 140612816842752, 140612825231359,
+STORE, 140612825231360, 140612833624063,
+STORE, 140612816842752, 140612825231359,
+SNULL, 140612825235455, 140612833624063,
+STORE, 140612825231360, 140612825235455,
+STORE, 140612825235456, 140612833624063,
+SNULL, 140613395677183, 140613404065791,
+STORE, 140613395673088, 140613395677183,
+STORE, 140613395677184, 140613404065791,
+SNULL, 140613689257984, 140613706039295,
+STORE, 140613706039296, 140613714431999,
+STORE, 140613689257984, 140613706039295,
+SNULL, 140613706043391, 140613714431999,
+STORE, 140613706039296, 140613706043391,
+STORE, 140613706043392, 140613714431999,
+SNULL, 140613118849024, 140613127237631,
+STORE, 140613127237632, 140613135630335,
+STORE, 140613118849024, 140613127237631,
+SNULL, 140613127241727, 140613135630335,
+STORE, 140613127237632, 140613127241727,
+STORE, 140613127241728, 140613135630335,
+SNULL, 140613571825664, 140613580214271,
+STORE, 140613580214272, 140613605392383,
+STORE, 140613571825664, 140613580214271,
+SNULL, 140613580218367, 140613605392383,
+STORE, 140613580214272, 140613580218367,
+STORE, 140613580218368, 140613605392383,
+SNULL, 140613689257984, 140613697646591,
+STORE, 140613697646592, 140613706039295,
+STORE, 140613689257984, 140613697646591,
+SNULL, 140613697650687, 140613706039295,
+STORE, 140613697646592, 140613697650687,
+STORE, 140613697650688, 140613706039295,
+SNULL, 140613680865279, 140613689253887,
+STORE, 140613680861184, 140613680865279,
+STORE, 140613680865280, 140613689253887,
+STORE, 140613563428864, 140613571821567,
+SNULL, 140613563432959, 140613571821567,
+STORE, 140613563428864, 140613563432959,
+STORE, 140613563432960, 140613571821567,
+SNULL, 140613580218368, 140613588606975,
+STORE, 140613588606976, 140613605392383,
+STORE, 140613580218368, 140613588606975,
+SNULL, 140613588611071, 140613605392383,
+STORE, 140613588606976, 140613588611071,
+STORE, 140613588611072, 140613605392383,
+SNULL, 140613513109504, 140613521498111,
+STORE, 140613521498112, 140613529890815,
+STORE, 140613513109504, 140613521498111,
+SNULL, 140613521502207, 140613529890815,
+STORE, 140613521498112, 140613521502207,
+STORE, 140613521502208, 140613529890815,
+SNULL, 140613588611072, 140613596999679,
+STORE, 140613596999680, 140613605392383,
+STORE, 140613588611072, 140613596999679,
+SNULL, 140613597003775, 140613605392383,
+STORE, 140613596999680, 140613597003775,
+STORE, 140613597003776, 140613605392383,
+STORE, 140613555036160, 140613563428863,
+SNULL, 140613555040255, 140613563428863,
+STORE, 140613555036160, 140613555040255,
+STORE, 140613555040256, 140613563428863,
+STORE, 140613546643456, 140613555036159,
+STORE, 140613538250752, 140613555036159,
+SNULL, 140613538250752, 140613546643455,
+STORE, 140613546643456, 140613555036159,
+STORE, 140613538250752, 140613546643455,
+SNULL, 140613546647551, 140613555036159,
+STORE, 140613546643456, 140613546647551,
+STORE, 140613546647552, 140613555036159,
+STORE, 140613504712704, 140613513105407,
+STORE, 140613496320000, 140613513105407,
+SNULL, 140613496324095, 140613513105407,
+STORE, 140613496320000, 140613496324095,
+STORE, 140613496324096, 140613513105407,
+STORE, 140613487927296, 140613496319999,
+SNULL, 140613487931391, 140613496319999,
+STORE, 140613487927296, 140613487931391,
+STORE, 140613487931392, 140613496319999,
+STORE, 140613479534592, 140613487927295,
+SNULL, 140612967845887, 140612976234495,
+STORE, 140612967841792, 140612967845887,
+STORE, 140612967845888, 140612976234495,
+STORE, 140613387280384, 140613395673087,
+STORE, 140613378887680, 140613395673087,
+SNULL, 140613378887680, 140613387280383,
+STORE, 140613387280384, 140613395673087,
+STORE, 140613378887680, 140613387280383,
+SNULL, 140613387284479, 140613395673087,
+STORE, 140613387280384, 140613387284479,
+STORE, 140613387284480, 140613395673087,
+STORE, 140613370494976, 140613387280383,
+STORE, 140613362102272, 140613387280383,
+SNULL, 140613479538687, 140613487927295,
+STORE, 140613479534592, 140613479538687,
+STORE, 140613479538688, 140613487927295,
+STORE, 140613353709568, 140613387280383,
+STORE, 140613345316864, 140613387280383,
+STORE, 140613244669952, 140613253062655,
+SNULL, 140613345320959, 140613387280383,
+STORE, 140613345316864, 140613345320959,
+STORE, 140613345320960, 140613387280383,
+SNULL, 140613538254847, 140613546643455,
+STORE, 140613538250752, 140613538254847,
+STORE, 140613538254848, 140613546643455,
+STORE, 140613236277248, 140613253062655,
+STORE, 140613227884544, 140613253062655,
+STORE, 140613219491840, 140613253062655,
+STORE, 140613211099136, 140613253062655,
+SNULL, 140613211103231, 140613253062655,
+STORE, 140613211099136, 140613211103231,
+STORE, 140613211103232, 140613253062655,
+STORE, 140613102059520, 140613110452223,
+STORE, 140613093666816, 140613110452223,
+SNULL, 140613093670911, 140613110452223,
+STORE, 140613093666816, 140613093670911,
+STORE, 140613093670912, 140613110452223,
+STORE, 140613085274112, 140613093666815,
+SNULL, 140613496324096, 140613504712703,
+STORE, 140613504712704, 140613513105407,
+STORE, 140613496324096, 140613504712703,
+SNULL, 140613504716799, 140613513105407,
+STORE, 140613504712704, 140613504716799,
+STORE, 140613504716800, 140613513105407,
+SNULL, 140613345320960, 140613378887679,
+STORE, 140613378887680, 140613387280383,
+STORE, 140613345320960, 140613378887679,
+SNULL, 140613378891775, 140613387280383,
+STORE, 140613378887680, 140613378891775,
+STORE, 140613378891776, 140613387280383,
+SNULL, 140613345320960, 140613362102271,
+STORE, 140613362102272, 140613378887679,
+STORE, 140613345320960, 140613362102271,
+SNULL, 140613362106367, 140613378887679,
+STORE, 140613362102272, 140613362106367,
+STORE, 140613362106368, 140613378887679,
+SNULL, 140613362106368, 140613370494975,
+STORE, 140613370494976, 140613378887679,
+STORE, 140613362106368, 140613370494975,
+SNULL, 140613370499071, 140613378887679,
+STORE, 140613370494976, 140613370499071,
+STORE, 140613370499072, 140613378887679,
+STORE, 140613076881408, 140613093666815,
+STORE, 140612993019904, 140613001412607,
+SNULL, 140613076885503, 140613093666815,
+STORE, 140613076881408, 140613076885503,
+STORE, 140613076885504, 140613093666815,
+SNULL, 140613093670912, 140613102059519,
+STORE, 140613102059520, 140613110452223,
+STORE, 140613093670912, 140613102059519,
+SNULL, 140613102063615, 140613110452223,
+STORE, 140613102059520, 140613102063615,
+STORE, 140613102063616, 140613110452223,
+SNULL, 140613076885504, 140613085274111,
+STORE, 140613085274112, 140613093666815,
+STORE, 140613076885504, 140613085274111,
+SNULL, 140613085278207, 140613093666815,
+STORE, 140613085274112, 140613085278207,
+STORE, 140613085278208, 140613093666815,
+STORE, 140612984627200, 140613001412607,
+STORE, 140612967845888, 140612984627199,
+SNULL, 140613211103232, 140613219491839,
+STORE, 140613219491840, 140613253062655,
+STORE, 140613211103232, 140613219491839,
+SNULL, 140613219495935, 140613253062655,
+STORE, 140613219491840, 140613219495935,
+STORE, 140613219495936, 140613253062655,
+STORE, 140612959449088, 140612967841791,
+STORE, 140612951056384, 140612967841791,
+SNULL, 140612951060479, 140612967841791,
+STORE, 140612951056384, 140612951060479,
+STORE, 140612951060480, 140612967841791,
+SNULL, 140613345320960, 140613353709567,
+STORE, 140613353709568, 140613362102271,
+STORE, 140613345320960, 140613353709567,
+SNULL, 140613353713663, 140613362102271,
+STORE, 140613353709568, 140613353713663,
+STORE, 140613353713664, 140613362102271,
+SNULL, 140613219495936, 140613244669951,
+STORE, 140613244669952, 140613253062655,
+STORE, 140613219495936, 140613244669951,
+SNULL, 140613244674047, 140613253062655,
+STORE, 140613244669952, 140613244674047,
+STORE, 140613244674048, 140613253062655,
+STORE, 140612942663680, 140612951056383,
+SNULL, 140613219495936, 140613236277247,
+STORE, 140613236277248, 140613244669951,
+STORE, 140613219495936, 140613236277247,
+SNULL, 140613236281343, 140613244669951,
+STORE, 140613236277248, 140613236281343,
+STORE, 140613236281344, 140613244669951,
+SNULL, 140613219495936, 140613227884543,
+STORE, 140613227884544, 140613236277247,
+STORE, 140613219495936, 140613227884543,
+SNULL, 140613227888639, 140613236277247,
+STORE, 140613227884544, 140613227888639,
+STORE, 140613227888640, 140613236277247,
+SNULL, 140612984627200, 140612993019903,
+STORE, 140612993019904, 140613001412607,
+STORE, 140612984627200, 140612993019903,
+SNULL, 140612993023999, 140613001412607,
+STORE, 140612993019904, 140612993023999,
+STORE, 140612993024000, 140613001412607,
+STORE, 140612858802176, 140612867194879,
+STORE, 140612850409472, 140612867194879,
+SNULL, 140612951060480, 140612959449087,
+STORE, 140612959449088, 140612967841791,
+STORE, 140612951060480, 140612959449087,
+SNULL, 140612959453183, 140612967841791,
+STORE, 140612959449088, 140612959453183,
+STORE, 140612959453184, 140612967841791,
+SNULL, 140612967845888, 140612976234495,
+STORE, 140612976234496, 140612984627199,
+STORE, 140612967845888, 140612976234495,
+SNULL, 140612976238591, 140612984627199,
+STORE, 140612976234496, 140612976238591,
+STORE, 140612976238592, 140612984627199,
+STORE, 140612842016768, 140612867194879,
+SNULL, 140612842020863, 140612867194879,
+STORE, 140612842016768, 140612842020863,
+STORE, 140612842020864, 140612867194879,
+SNULL, 140612984631295, 140612993019903,
+STORE, 140612984627200, 140612984631295,
+STORE, 140612984631296, 140612993019903,
+STORE, 140612825235456, 140612842016767,
+STORE, 140612808445952, 140612816838655,
+SNULL, 140612942667775, 140612951056383,
+STORE, 140612942663680, 140612942667775,
+STORE, 140612942667776, 140612951056383,
+STORE, 140612724584448, 140612732977151,
+SNULL, 140612724588543, 140612732977151,
+STORE, 140612724584448, 140612724588543,
+STORE, 140612724588544, 140612732977151,
+STORE, 140612716191744, 140612724584447,
+SNULL, 140612842020864, 140612850409471,
+STORE, 140612850409472, 140612867194879,
+STORE, 140612842020864, 140612850409471,
+SNULL, 140612850413567, 140612867194879,
+STORE, 140612850409472, 140612850413567,
+STORE, 140612850413568, 140612867194879,
+SNULL, 140612850413568, 140612858802175,
+STORE, 140612858802176, 140612867194879,
+STORE, 140612850413568, 140612858802175,
+SNULL, 140612858806271, 140612867194879,
+STORE, 140612858802176, 140612858806271,
+STORE, 140612858806272, 140612867194879,
+STORE, 140612707799040, 140612724584447,
+SNULL, 140612707803135, 140612724584447,
+STORE, 140612707799040, 140612707803135,
+STORE, 140612707803136, 140612724584447,
+SNULL, 140612707803136, 140612716191743,
+STORE, 140612716191744, 140612724584447,
+STORE, 140612707803136, 140612716191743,
+SNULL, 140612716195839, 140612724584447,
+STORE, 140612716191744, 140612716195839,
+STORE, 140612716195840, 140612724584447,
+SNULL, 140612808450047, 140612816838655,
+STORE, 140612808445952, 140612808450047,
+STORE, 140612808450048, 140612816838655,
+SNULL, 140612825235456, 140612833624063,
+STORE, 140612833624064, 140612842016767,
+STORE, 140612825235456, 140612833624063,
+SNULL, 140612833628159, 140612842016767,
+STORE, 140612833624064, 140612833628159,
+STORE, 140612833628160, 140612842016767,
+STORE, 140612699406336, 140612707799039,
+SNULL, 140612699410431, 140612707799039,
+STORE, 140612699406336, 140612699410431,
+STORE, 140612699410432, 140612707799039,
+STORE, 140614384926720, 140614384955391,
+STORE, 140614349332480, 140614351523839,
+SNULL, 140614349332480, 140614349422591,
+STORE, 140614349422592, 140614351523839,
+STORE, 140614349332480, 140614349422591,
+SNULL, 140614351515647, 140614351523839,
+STORE, 140614349422592, 140614351515647,
+STORE, 140614351515648, 140614351523839,
+ERASE, 140614351515648, 140614351523839,
+STORE, 140614351515648, 140614351523839,
+SNULL, 140614351519743, 140614351523839,
+STORE, 140614351515648, 140614351519743,
+STORE, 140614351519744, 140614351523839,
+ERASE, 140614384926720, 140614384955391,
+ERASE, 140613949296640, 140613949300735,
+ERASE, 140613949300736, 140613957689343,
+ERASE, 140613689253888, 140613689257983,
+ERASE, 140613689257984, 140613697646591,
+ERASE, 140613563428864, 140613563432959,
+ERASE, 140613563432960, 140613571821567,
+ERASE, 140613211099136, 140613211103231,
+ERASE, 140613211103232, 140613219491839,
+ERASE, 140614133870592, 140614133874687,
+ERASE, 140614133874688, 140614142263295,
+ERASE, 140612967841792, 140612967845887,
+ERASE, 140612967845888, 140612976234495,
+ERASE, 140613076881408, 140613076885503,
+ERASE, 140613076885504, 140613085274111,
+ERASE, 140612850409472, 140612850413567,
+ERASE, 140612850413568, 140612858802175,
+ERASE, 140613110452224, 140613110456319,
+ERASE, 140613110456320, 140613118844927,
+ERASE, 140613706039296, 140613706043391,
+ERASE, 140613706043392, 140613714431999,
+ERASE, 140613521498112, 140613521502207,
+ERASE, 140613521502208, 140613529890815,
+ERASE, 140613362102272, 140613362106367,
+ERASE, 140613362106368, 140613370494975,
+ERASE, 140613253062656, 140613253066751,
+ERASE, 140613253066752, 140613261455359,
+ERASE, 140612816838656, 140612816842751,
+ERASE, 140612816842752, 140612825231359,
+ERASE, 140613261455360, 140613261459455,
+ERASE, 140613261459456, 140613269848063,
+ERASE, 140613118844928, 140613118849023,
+ERASE, 140613118849024, 140613127237631,
+ERASE, 140613714432000, 140613714436095,
+ERASE, 140613714436096, 140613722824703,
+ERASE, 140613496320000, 140613496324095,
+ERASE, 140613496324096, 140613504712703,
+ERASE, 140613513105408, 140613513109503,
+ERASE, 140613513109504, 140613521498111,
+ERASE, 140613697646592, 140613697650687,
+ERASE, 140613697650688, 140613706039295,
+ERASE, 140613093666816, 140613093670911,
+ERASE, 140613093670912, 140613102059519,
+ERASE, 140612993019904, 140612993023999,
+ERASE, 140612993024000, 140613001412607,
+ERASE, 140613127237632, 140613127241727,
+ERASE, 140613127241728, 140613135630335,
+ERASE, 140613957689344, 140613957693439,
+ERASE, 140613957693440, 140613966082047,
+ERASE, 140613571821568, 140613571825663,
+ERASE, 140613571825664, 140613580214271,
+ERASE, 140613479534592, 140613479538687,
+ERASE, 140613479538688, 140613487927295,
+ERASE, 140612984627200, 140612984631295,
+ERASE, 140612984631296, 140612993019903,
+ERASE, 140613588606976, 140613588611071,
+ERASE, 140613588611072, 140613596999679,
+ERASE, 140613680861184, 140613680865279,
+ERASE, 140613680865280, 140613689253887,
+ERASE, 140613345316864, 140613345320959,
+ERASE, 140613345320960, 140613353709567,
+ERASE, 140613596999680, 140613597003775,
+ERASE, 140613597003776, 140613605392383,
+ERASE, 140613966082048, 140613966086143,
+ERASE, 140613966086144, 140613974474751,
+ERASE, 140613731217408, 140613731221503,
+ERASE, 140613731221504, 140613739610111,
+ERASE, 140613395673088, 140613395677183,
+ERASE, 140613395677184, 140613404065791,
+ERASE, 140612825231360, 140612825235455,
+ERASE, 140612825235456, 140612833624063,
+ERASE, 140612674228224, 140612674232319,
+ERASE, 140612674232320, 140612682620927,
+ERASE, 140613722824704, 140613722828799,
+ERASE, 140613722828800, 140613731217407,
+ERASE, 140613487927296, 140613487931391,
+ERASE, 140613487931392, 140613496319999,
+ERASE, 140613102059520, 140613102063615,
+ERASE, 140613102063616, 140613110452223,
+ERASE, 140614242910208, 140614242914303,
+ERASE, 140614242914304, 140614251302911,
+ERASE, 140612808445952, 140612808450047,
+ERASE, 140612808450048, 140612816838655,
+ERASE, 140613236277248, 140613236281343,
+ERASE, 140613236281344, 140613244669951,
+ERASE, 140613580214272, 140613580218367,
+ERASE, 140613580218368, 140613588606975,
+ERASE, 140613370494976, 140613370499071,
+ERASE, 140613370499072, 140613378887679,
+ERASE, 140613244669952, 140613244674047,
+ERASE, 140613244674048, 140613253062655,
+ERASE, 140612724584448, 140612724588543,
+ERASE, 140612724588544, 140612732977151,
+ERASE, 140612707799040, 140612707803135,
+ERASE, 140612707803136, 140612716191743,
+ERASE, 140613504712704, 140613504716799,
+ERASE, 140613504716800, 140613513105407,
+ };
+
+ static const unsigned long set39[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736271417344, 140737488351231,
+SNULL, 140736271421439, 140737488351231,
+STORE, 140736271417344, 140736271421439,
+STORE, 140736271286272, 140736271421439,
+STORE, 94412930822144, 94412933074943,
+SNULL, 94412930953215, 94412933074943,
+STORE, 94412930822144, 94412930953215,
+STORE, 94412930953216, 94412933074943,
+ERASE, 94412930953216, 94412933074943,
+STORE, 94412933046272, 94412933054463,
+STORE, 94412933054464, 94412933074943,
+STORE, 140326136901632, 140326139154431,
+SNULL, 140326137044991, 140326139154431,
+STORE, 140326136901632, 140326137044991,
+STORE, 140326137044992, 140326139154431,
+ERASE, 140326137044992, 140326139154431,
+STORE, 140326139142144, 140326139150335,
+STORE, 140326139150336, 140326139154431,
+STORE, 140736271585280, 140736271589375,
+STORE, 140736271572992, 140736271585279,
+STORE, 140326139113472, 140326139142143,
+STORE, 140326139105280, 140326139113471,
+STORE, 140326134685696, 140326136901631,
+SNULL, 140326134685696, 140326134783999,
+STORE, 140326134784000, 140326136901631,
+STORE, 140326134685696, 140326134783999,
+SNULL, 140326136877055, 140326136901631,
+STORE, 140326134784000, 140326136877055,
+STORE, 140326136877056, 140326136901631,
+SNULL, 140326136877056, 140326136885247,
+STORE, 140326136885248, 140326136901631,
+STORE, 140326136877056, 140326136885247,
+ERASE, 140326136877056, 140326136885247,
+STORE, 140326136877056, 140326136885247,
+ERASE, 140326136885248, 140326136901631,
+STORE, 140326136885248, 140326136901631,
+STORE, 140326130888704, 140326134685695,
+SNULL, 140326130888704, 140326132547583,
+STORE, 140326132547584, 140326134685695,
+STORE, 140326130888704, 140326132547583,
+SNULL, 140326134644735, 140326134685695,
+STORE, 140326132547584, 140326134644735,
+STORE, 140326134644736, 140326134685695,
+SNULL, 140326134644736, 140326134669311,
+STORE, 140326134669312, 140326134685695,
+STORE, 140326134644736, 140326134669311,
+ERASE, 140326134644736, 140326134669311,
+STORE, 140326134644736, 140326134669311,
+ERASE, 140326134669312, 140326134685695,
+STORE, 140326134669312, 140326134685695,
+STORE, 140326139097088, 140326139113471,
+SNULL, 140326134661119, 140326134669311,
+STORE, 140326134644736, 140326134661119,
+STORE, 140326134661120, 140326134669311,
+SNULL, 140326136881151, 140326136885247,
+STORE, 140326136877056, 140326136881151,
+STORE, 140326136881152, 140326136885247,
+SNULL, 94412933050367, 94412933054463,
+STORE, 94412933046272, 94412933050367,
+STORE, 94412933050368, 94412933054463,
+SNULL, 140326139146239, 140326139150335,
+STORE, 140326139142144, 140326139146239,
+STORE, 140326139146240, 140326139150335,
+ERASE, 140326139113472, 140326139142143,
+STORE, 94412939493376, 94412939628543,
+STORE, 140326122496000, 140326130888703,
+SNULL, 140326122500095, 140326130888703,
+STORE, 140326122496000, 140326122500095,
+STORE, 140326122500096, 140326130888703,
+STORE, 140326114103296, 140326122495999,
+STORE, 140325979885568, 140326114103295,
+SNULL, 140325979885568, 140326043910143,
+STORE, 140326043910144, 140326114103295,
+STORE, 140325979885568, 140326043910143,
+ERASE, 140325979885568, 140326043910143,
+SNULL, 140326111019007, 140326114103295,
+STORE, 140326043910144, 140326111019007,
+STORE, 140326111019008, 140326114103295,
+ERASE, 140326111019008, 140326114103295,
+SNULL, 140326044045311, 140326111019007,
+STORE, 140326043910144, 140326044045311,
+STORE, 140326044045312, 140326111019007,
+SNULL, 140326114107391, 140326122495999,
+STORE, 140326114103296, 140326114107391,
+STORE, 140326114107392, 140326122495999,
+STORE, 140326035517440, 140326043910143,
+SNULL, 140326035521535, 140326043910143,
+STORE, 140326035517440, 140326035521535,
+STORE, 140326035521536, 140326043910143,
+STORE, 140326027124736, 140326035517439,
+SNULL, 140326027128831, 140326035517439,
+STORE, 140326027124736, 140326027128831,
+STORE, 140326027128832, 140326035517439,
+STORE, 140326018732032, 140326027124735,
+SNULL, 140326018736127, 140326027124735,
+STORE, 140326018732032, 140326018736127,
+STORE, 140326018736128, 140326027124735,
+STORE, 140326010339328, 140326018732031,
+STORE, 140326001946624, 140326018732031,
+STORE, 140325993553920, 140326018732031,
+STORE, 140325859336192, 140325993553919,
+SNULL, 140325859336192, 140325909692415,
+STORE, 140325909692416, 140325993553919,
+STORE, 140325859336192, 140325909692415,
+ERASE, 140325859336192, 140325909692415,
+SNULL, 140325976801279, 140325993553919,
+STORE, 140325909692416, 140325976801279,
+STORE, 140325976801280, 140325993553919,
+ERASE, 140325976801280, 140325993553919,
+STORE, 140325985161216, 140326018732031,
+STORE, 140325775474688, 140325976801279,
+STORE, 140325708365824, 140325976801279,
+SNULL, 140325708500991, 140325976801279,
+STORE, 140325708365824, 140325708500991,
+STORE, 140325708500992, 140325976801279,
+SNULL, 140325708500992, 140325909692415,
+STORE, 140325909692416, 140325976801279,
+STORE, 140325708500992, 140325909692415,
+SNULL, 140325909827583, 140325976801279,
+STORE, 140325909692416, 140325909827583,
+STORE, 140325909827584, 140325976801279,
+SNULL, 140325842583551, 140325909692415,
+STORE, 140325708500992, 140325842583551,
+STORE, 140325842583552, 140325909692415,
+ERASE, 140325842583552, 140325909692415,
+SNULL, 140325708500992, 140325775474687,
+STORE, 140325775474688, 140325842583551,
+STORE, 140325708500992, 140325775474687,
+SNULL, 140325775609855, 140325842583551,
+STORE, 140325775474688, 140325775609855,
+STORE, 140325775609856, 140325842583551,
+STORE, 140325775609856, 140325909692415,
+SNULL, 140325775609856, 140325842583551,
+STORE, 140325842583552, 140325909692415,
+STORE, 140325775609856, 140325842583551,
+SNULL, 140325842718719, 140325909692415,
+STORE, 140325842583552, 140325842718719,
+STORE, 140325842718720, 140325909692415,
+SNULL, 140325985161216, 140325993553919,
+STORE, 140325993553920, 140326018732031,
+STORE, 140325985161216, 140325993553919,
+SNULL, 140325993558015, 140326018732031,
+STORE, 140325993553920, 140325993558015,
+STORE, 140325993558016, 140326018732031,
+SNULL, 140325985165311, 140325993553919,
+STORE, 140325985161216, 140325985165311,
+STORE, 140325985165312, 140325993553919,
+SNULL, 140325993558016, 140326001946623,
+STORE, 140326001946624, 140326018732031,
+STORE, 140325993558016, 140326001946623,
+SNULL, 140326001950719, 140326018732031,
+STORE, 140326001946624, 140326001950719,
+STORE, 140326001950720, 140326018732031,
+SNULL, 140326001950720, 140326010339327,
+STORE, 140326010339328, 140326018732031,
+STORE, 140326001950720, 140326010339327,
+SNULL, 140326010343423, 140326018732031,
+STORE, 140326010339328, 140326010343423,
+STORE, 140326010343424, 140326018732031,
+STORE, 140325699973120, 140325708365823,
+STORE, 140325691580416, 140325708365823,
+STORE, 140325683187712, 140325708365823,
+SNULL, 140325683191807, 140325708365823,
+STORE, 140325683187712, 140325683191807,
+STORE, 140325683191808, 140325708365823,
+SNULL, 140325683191808, 140325699973119,
+STORE, 140325699973120, 140325708365823,
+STORE, 140325683191808, 140325699973119,
+SNULL, 140325699977215, 140325708365823,
+STORE, 140325699973120, 140325699977215,
+STORE, 140325699977216, 140325708365823,
+STORE, 140325674795008, 140325683187711,
+STORE, 140325666402304, 140325683187711,
+STORE, 140325658009600, 140325683187711,
+SNULL, 140325658009600, 140325666402303,
+STORE, 140325666402304, 140325683187711,
+STORE, 140325658009600, 140325666402303,
+SNULL, 140325666406399, 140325683187711,
+STORE, 140325666402304, 140325666406399,
+STORE, 140325666406400, 140325683187711,
+SNULL, 140325683191808, 140325691580415,
+STORE, 140325691580416, 140325699973119,
+STORE, 140325683191808, 140325691580415,
+SNULL, 140325691584511, 140325699973119,
+STORE, 140325691580416, 140325691584511,
+STORE, 140325691584512, 140325699973119,
+SNULL, 140325666406400, 140325674795007,
+STORE, 140325674795008, 140325683187711,
+STORE, 140325666406400, 140325674795007,
+SNULL, 140325674799103, 140325683187711,
+STORE, 140325674795008, 140325674799103,
+STORE, 140325674799104, 140325683187711,
+STORE, 140325649616896, 140325666402303,
+SNULL, 140325649616896, 140325658009599,
+STORE, 140325658009600, 140325666402303,
+STORE, 140325649616896, 140325658009599,
+SNULL, 140325658013695, 140325666402303,
+STORE, 140325658009600, 140325658013695,
+STORE, 140325658013696, 140325666402303,
+SNULL, 140325649620991, 140325658009599,
+STORE, 140325649616896, 140325649620991,
+STORE, 140325649620992, 140325658009599,
+STORE, 140325641224192, 140325649616895,
+STORE, 140325632831488, 140325649616895,
+SNULL, 140325632835583, 140325649616895,
+STORE, 140325632831488, 140325632835583,
+STORE, 140325632835584, 140325649616895,
+STORE, 140325624438784, 140325632831487,
+SNULL, 140325624442879, 140325632831487,
+STORE, 140325624438784, 140325624442879,
+STORE, 140325624442880, 140325632831487,
+SNULL, 140325632835584, 140325641224191,
+STORE, 140325641224192, 140325649616895,
+STORE, 140325632835584, 140325641224191,
+SNULL, 140325641228287, 140325649616895,
+STORE, 140325641224192, 140325641228287,
+STORE, 140325641228288, 140325649616895,
+STORE, 140325616046080, 140325624438783,
+SNULL, 140325616050175, 140325624438783,
+STORE, 140325616046080, 140325616050175,
+STORE, 140325616050176, 140325624438783,
+STORE, 140325607653376, 140325616046079,
+SNULL, 140325607657471, 140325616046079,
+STORE, 140325607653376, 140325607657471,
+STORE, 140325607657472, 140325616046079,
+STORE, 140325599260672, 140325607653375,
+STORE, 140325590867968, 140325607653375,
+STORE, 140325456650240, 140325590867967,
+SNULL, 140325456650240, 140325507039231,
+STORE, 140325507039232, 140325590867967,
+STORE, 140325456650240, 140325507039231,
+ERASE, 140325456650240, 140325507039231,
+STORE, 140325498646528, 140325507039231,
+STORE, 140325364428800, 140325498646527,
+SNULL, 140325364428800, 140325372821503,
+STORE, 140325372821504, 140325498646527,
+STORE, 140325364428800, 140325372821503,
+ERASE, 140325364428800, 140325372821503,
+STORE, 140325364428800, 140325372821503,
+STORE, 140325356036096, 140325372821503,
+STORE, 140325221818368, 140325356036095,
+SNULL, 140325221818368, 140325238603775,
+STORE, 140325238603776, 140325356036095,
+STORE, 140325221818368, 140325238603775,
+ERASE, 140325221818368, 140325238603775,
+STORE, 140325230211072, 140325238603775,
+STORE, 140325221818368, 140325238603775,
+STORE, 140325087600640, 140325221818367,
+STORE, 140325079207936, 140325087600639,
+SNULL, 140325087600640, 140325104386047,
+STORE, 140325104386048, 140325221818367,
+STORE, 140325087600640, 140325104386047,
+ERASE, 140325087600640, 140325104386047,
+STORE, 140325095993344, 140325104386047,
+STORE, 140325079207936, 140325104386047,
+STORE, 140324944990208, 140325079207935,
+SNULL, 140324944990208, 140324970168319,
+STORE, 140324970168320, 140325079207935,
+STORE, 140324944990208, 140324970168319,
+ERASE, 140324944990208, 140324970168319,
+STORE, 140324961775616, 140324970168319,
+STORE, 140324953382912, 140324970168319,
+STORE, 140324819165184, 140324953382911,
+STORE, 140324684947456, 140324953382911,
+STORE, 140324676554752, 140324684947455,
+STORE, 140324668162048, 140324684947455,
+STORE, 140324533944320, 140324668162047,
+STORE, 140324525551616, 140324533944319,
+SNULL, 140324533944320, 140324567515135,
+STORE, 140324567515136, 140324668162047,
+STORE, 140324533944320, 140324567515135,
+ERASE, 140324533944320, 140324567515135,
+STORE, 140324559122432, 140324567515135,
+STORE, 140324391333888, 140324525551615,
+SNULL, 140325574148095, 140325590867967,
+STORE, 140325507039232, 140325574148095,
+STORE, 140325574148096, 140325590867967,
+ERASE, 140325574148096, 140325590867967,
+SNULL, 140325439930367, 140325498646527,
+STORE, 140325372821504, 140325439930367,
+STORE, 140325439930368, 140325498646527,
+ERASE, 140325439930368, 140325498646527,
+SNULL, 140325305712639, 140325356036095,
+STORE, 140325238603776, 140325305712639,
+STORE, 140325305712640, 140325356036095,
+ERASE, 140325305712640, 140325356036095,
+SNULL, 140325171494911, 140325221818367,
+STORE, 140325104386048, 140325171494911,
+STORE, 140325171494912, 140325221818367,
+ERASE, 140325171494912, 140325221818367,
+SNULL, 140325104521215, 140325171494911,
+STORE, 140325104386048, 140325104521215,
+STORE, 140325104521216, 140325171494911,
+STORE, 140324257116160, 140324525551615,
+SNULL, 140324257116160, 140324299079679,
+STORE, 140324299079680, 140324525551615,
+STORE, 140324257116160, 140324299079679,
+ERASE, 140324257116160, 140324299079679,
+SNULL, 140325037277183, 140325079207935,
+STORE, 140324970168320, 140325037277183,
+STORE, 140325037277184, 140325079207935,
+ERASE, 140325037277184, 140325079207935,
+SNULL, 140324819165183, 140324953382911,
+STORE, 140324684947456, 140324819165183,
+STORE, 140324819165184, 140324953382911,
+SNULL, 140324819165184, 140324835950591,
+STORE, 140324835950592, 140324953382911,
+STORE, 140324819165184, 140324835950591,
+ERASE, 140324819165184, 140324835950591,
+SNULL, 140324903059455, 140324953382911,
+STORE, 140324835950592, 140324903059455,
+STORE, 140324903059456, 140324953382911,
+ERASE, 140324903059456, 140324953382911,
+SNULL, 140324684947456, 140324701732863,
+STORE, 140324701732864, 140324819165183,
+STORE, 140324684947456, 140324701732863,
+ERASE, 140324684947456, 140324701732863,
+SNULL, 140324768841727, 140324819165183,
+STORE, 140324701732864, 140324768841727,
+STORE, 140324768841728, 140324819165183,
+ERASE, 140324768841728, 140324819165183,
+SNULL, 140324634623999, 140324668162047,
+STORE, 140324567515136, 140324634623999,
+STORE, 140324634624000, 140324668162047,
+ERASE, 140324634624000, 140324668162047,
+SNULL, 140324391333887, 140324525551615,
+STORE, 140324299079680, 140324391333887,
+STORE, 140324391333888, 140324525551615,
+SNULL, 140324391333888, 140324433297407,
+STORE, 140324433297408, 140324525551615,
+STORE, 140324391333888, 140324433297407,
+ERASE, 140324391333888, 140324433297407,
+SNULL, 140325507174399, 140325574148095,
+STORE, 140325507039232, 140325507174399,
+STORE, 140325507174400, 140325574148095,
+SNULL, 140325590867968, 140325599260671,
+STORE, 140325599260672, 140325607653375,
+STORE, 140325590867968, 140325599260671,
+SNULL, 140325599264767, 140325607653375,
+STORE, 140325599260672, 140325599264767,
+STORE, 140325599264768, 140325607653375,
+SNULL, 140325372956671, 140325439930367,
+STORE, 140325372821504, 140325372956671,
+STORE, 140325372956672, 140325439930367,
+SNULL, 140324668166143, 140324684947455,
+STORE, 140324668162048, 140324668166143,
+STORE, 140324668166144, 140324684947455,
+SNULL, 140324525555711, 140324533944319,
+STORE, 140324525551616, 140324525555711,
+STORE, 140324525555712, 140324533944319,
+SNULL, 140324953382912, 140324961775615,
+STORE, 140324961775616, 140324970168319,
+STORE, 140324953382912, 140324961775615,
+SNULL, 140324961779711, 140324970168319,
+STORE, 140324961775616, 140324961779711,
+STORE, 140324961779712, 140324970168319,
+SNULL, 140325079212031, 140325104386047,
+STORE, 140325079207936, 140325079212031,
+STORE, 140325079212032, 140325104386047,
+SNULL, 140325221818368, 140325230211071,
+STORE, 140325230211072, 140325238603775,
+STORE, 140325221818368, 140325230211071,
+SNULL, 140325230215167, 140325238603775,
+STORE, 140325230211072, 140325230215167,
+STORE, 140325230215168, 140325238603775,
+SNULL, 140325356036096, 140325364428799,
+STORE, 140325364428800, 140325372821503,
+STORE, 140325356036096, 140325364428799,
+SNULL, 140325364432895, 140325372821503,
+ };
+ static const unsigned long set40[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734309167104, 140737488351231,
+SNULL, 140734309171199, 140737488351231,
+STORE, 140734309167104, 140734309171199,
+STORE, 140734309036032, 140734309171199,
+STORE, 94270500081664, 94270502334463,
+SNULL, 94270500212735, 94270502334463,
+STORE, 94270500081664, 94270500212735,
+STORE, 94270500212736, 94270502334463,
+ERASE, 94270500212736, 94270502334463,
+STORE, 94270502305792, 94270502313983,
+STORE, 94270502313984, 94270502334463,
+STORE, 140321935110144, 140321937362943,
+SNULL, 140321935253503, 140321937362943,
+STORE, 140321935110144, 140321935253503,
+STORE, 140321935253504, 140321937362943,
+ERASE, 140321935253504, 140321937362943,
+STORE, 140321937350656, 140321937358847,
+STORE, 140321937358848, 140321937362943,
+STORE, 140734309625856, 140734309629951,
+STORE, 140734309613568, 140734309625855,
+STORE, 140321937321984, 140321937350655,
+STORE, 140321937313792, 140321937321983,
+STORE, 140321932894208, 140321935110143,
+SNULL, 140321932894208, 140321932992511,
+STORE, 140321932992512, 140321935110143,
+STORE, 140321932894208, 140321932992511,
+SNULL, 140321935085567, 140321935110143,
+STORE, 140321932992512, 140321935085567,
+STORE, 140321935085568, 140321935110143,
+SNULL, 140321935085568, 140321935093759,
+STORE, 140321935093760, 140321935110143,
+STORE, 140321935085568, 140321935093759,
+ERASE, 140321935085568, 140321935093759,
+STORE, 140321935085568, 140321935093759,
+ERASE, 140321935093760, 140321935110143,
+STORE, 140321935093760, 140321935110143,
+STORE, 140321929097216, 140321932894207,
+SNULL, 140321929097216, 140321930756095,
+STORE, 140321930756096, 140321932894207,
+STORE, 140321929097216, 140321930756095,
+SNULL, 140321932853247, 140321932894207,
+STORE, 140321930756096, 140321932853247,
+STORE, 140321932853248, 140321932894207,
+SNULL, 140321932853248, 140321932877823,
+STORE, 140321932877824, 140321932894207,
+STORE, 140321932853248, 140321932877823,
+ERASE, 140321932853248, 140321932877823,
+STORE, 140321932853248, 140321932877823,
+ERASE, 140321932877824, 140321932894207,
+STORE, 140321932877824, 140321932894207,
+STORE, 140321937305600, 140321937321983,
+SNULL, 140321932869631, 140321932877823,
+STORE, 140321932853248, 140321932869631,
+STORE, 140321932869632, 140321932877823,
+SNULL, 140321935089663, 140321935093759,
+STORE, 140321935085568, 140321935089663,
+STORE, 140321935089664, 140321935093759,
+SNULL, 94270502309887, 94270502313983,
+STORE, 94270502305792, 94270502309887,
+STORE, 94270502309888, 94270502313983,
+SNULL, 140321937354751, 140321937358847,
+STORE, 140321937350656, 140321937354751,
+STORE, 140321937354752, 140321937358847,
+ERASE, 140321937321984, 140321937350655,
+STORE, 94270507364352, 94270507499519,
+STORE, 140321920704512, 140321929097215,
+SNULL, 140321920708607, 140321929097215,
+STORE, 140321920704512, 140321920708607,
+STORE, 140321920708608, 140321929097215,
+STORE, 140321912311808, 140321920704511,
+STORE, 140321778094080, 140321912311807,
+SNULL, 140321778094080, 140321816051711,
+STORE, 140321816051712, 140321912311807,
+STORE, 140321778094080, 140321816051711,
+ERASE, 140321778094080, 140321816051711,
+SNULL, 140321883160575, 140321912311807,
+STORE, 140321816051712, 140321883160575,
+STORE, 140321883160576, 140321912311807,
+ERASE, 140321883160576, 140321912311807,
+SNULL, 140321816186879, 140321883160575,
+STORE, 140321816051712, 140321816186879,
+STORE, 140321816186880, 140321883160575,
+SNULL, 140321912315903, 140321920704511,
+STORE, 140321912311808, 140321912315903,
+STORE, 140321912315904, 140321920704511,
+STORE, 140321903919104, 140321912311807,
+SNULL, 140321903923199, 140321912311807,
+STORE, 140321903919104, 140321903923199,
+STORE, 140321903923200, 140321912311807,
+STORE, 140321895526400, 140321903919103,
+SNULL, 140321895530495, 140321903919103,
+STORE, 140321895526400, 140321895530495,
+STORE, 140321895530496, 140321903919103,
+STORE, 140321887133696, 140321895526399,
+SNULL, 140321887137791, 140321895526399,
+STORE, 140321887133696, 140321887137791,
+STORE, 140321887137792, 140321895526399,
+STORE, 140321807659008, 140321816051711,
+STORE, 140321673441280, 140321807659007,
+SNULL, 140321673441280, 140321681833983,
+STORE, 140321681833984, 140321807659007,
+STORE, 140321673441280, 140321681833983,
+ERASE, 140321673441280, 140321681833983,
+SNULL, 140321748942847, 140321807659007,
+STORE, 140321681833984, 140321748942847,
+STORE, 140321748942848, 140321807659007,
+ERASE, 140321748942848, 140321807659007,
+STORE, 140321799266304, 140321816051711,
+STORE, 140321790873600, 140321816051711,
+STORE, 140321782480896, 140321816051711,
+STORE, 140321547616256, 140321748942847,
+SNULL, 140321614725119, 140321748942847,
+STORE, 140321547616256, 140321614725119,
+STORE, 140321614725120, 140321748942847,
+SNULL, 140321614725120, 140321681833983,
+STORE, 140321681833984, 140321748942847,
+STORE, 140321614725120, 140321681833983,
+ERASE, 140321614725120, 140321681833983,
+SNULL, 140321681969151, 140321748942847,
+STORE, 140321681833984, 140321681969151,
+STORE, 140321681969152, 140321748942847,
+STORE, 140321547616256, 140321681833983,
+SNULL, 140321547616256, 140321614725119,
+STORE, 140321614725120, 140321681833983,
+STORE, 140321547616256, 140321614725119,
+SNULL, 140321614860287, 140321681833983,
+STORE, 140321614725120, 140321614860287,
+STORE, 140321614860288, 140321681833983,
+SNULL, 140321547751423, 140321614725119,
+STORE, 140321547616256, 140321547751423,
+STORE, 140321547751424, 140321614725119,
+STORE, 140321480507392, 140321547616255,
+SNULL, 140321782480896, 140321799266303,
+STORE, 140321799266304, 140321816051711,
+STORE, 140321782480896, 140321799266303,
+SNULL, 140321799270399, 140321816051711,
+STORE, 140321799266304, 140321799270399,
+STORE, 140321799270400, 140321816051711,
+STORE, 140321774088192, 140321799266303,
+SNULL, 140321774088192, 140321790873599,
+STORE, 140321790873600, 140321799266303,
+STORE, 140321774088192, 140321790873599,
+SNULL, 140321790877695, 140321799266303,
+STORE, 140321790873600, 140321790877695,
+STORE, 140321790877696, 140321799266303,
+SNULL, 140321480642559, 140321547616255,
+STORE, 140321480507392, 140321480642559,
+STORE, 140321480642560, 140321547616255,
+SNULL, 140321774088192, 140321782480895,
+STORE, 140321782480896, 140321790873599,
+STORE, 140321774088192, 140321782480895,
+SNULL, 140321782484991, 140321790873599,
+STORE, 140321782480896, 140321782484991,
+STORE, 140321782484992, 140321790873599,
+SNULL, 140321799270400, 140321807659007,
+STORE, 140321807659008, 140321816051711,
+STORE, 140321799270400, 140321807659007,
+SNULL, 140321807663103, 140321816051711,
+STORE, 140321807659008, 140321807663103,
+STORE, 140321807663104, 140321816051711,
+STORE, 140321765695488, 140321782480895,
+STORE, 140321757302784, 140321782480895,
+SNULL, 140321757306879, 140321782480895,
+STORE, 140321757302784, 140321757306879,
+STORE, 140321757306880, 140321782480895,
+STORE, 140321472114688, 140321480507391,
+STORE, 140321463721984, 140321480507391,
+SNULL, 140321463726079, 140321480507391,
+STORE, 140321463721984, 140321463726079,
+STORE, 140321463726080, 140321480507391,
+SNULL, 140321757306880, 140321774088191,
+STORE, 140321774088192, 140321782480895,
+STORE, 140321757306880, 140321774088191,
+SNULL, 140321774092287, 140321782480895,
+STORE, 140321774088192, 140321774092287,
+STORE, 140321774092288, 140321782480895,
+SNULL, 140321463726080, 140321472114687,
+STORE, 140321472114688, 140321480507391,
+STORE, 140321463726080, 140321472114687,
+SNULL, 140321472118783, 140321480507391,
+STORE, 140321472114688, 140321472118783,
+STORE, 140321472118784, 140321480507391,
+SNULL, 140321757306880, 140321765695487,
+STORE, 140321765695488, 140321774088191,
+STORE, 140321757306880, 140321765695487,
+SNULL, 140321765699583, 140321774088191,
+STORE, 140321765695488, 140321765699583,
+STORE, 140321765699584, 140321774088191,
+STORE, 140321455329280, 140321463721983,
+SNULL, 140321455333375, 140321463721983,
+STORE, 140321455329280, 140321455333375,
+STORE, 140321455333376, 140321463721983,
+STORE, 140321446936576, 140321455329279,
+STORE, 140321438543872, 140321455329279,
+STORE, 140321430151168, 140321455329279,
+SNULL, 140321430155263, 140321455329279,
+STORE, 140321430151168, 140321430155263,
+STORE, 140321430155264, 140321455329279,
+SNULL, 140321430155264, 140321446936575,
+STORE, 140321446936576, 140321455329279,
+STORE, 140321430155264, 140321446936575,
+SNULL, 140321446940671, 140321455329279,
+STORE, 140321446936576, 140321446940671,
+STORE, 140321446940672, 140321455329279,
+SNULL, 140321430155264, 140321438543871,
+STORE, 140321438543872, 140321446936575,
+STORE, 140321430155264, 140321438543871,
+SNULL, 140321438547967, 140321446936575,
+STORE, 140321438543872, 140321438547967,
+STORE, 140321438547968, 140321446936575,
+STORE, 140321421758464, 140321430151167,
+SNULL, 140321421762559, 140321430151167,
+STORE, 140321421758464, 140321421762559,
+STORE, 140321421762560, 140321430151167,
+STORE, 140321413365760, 140321421758463,
+SNULL, 140321413369855, 140321421758463,
+STORE, 140321413365760, 140321413369855,
+STORE, 140321413369856, 140321421758463,
+STORE, 140321404973056, 140321413365759,
+SNULL, 140321404977151, 140321413365759,
+STORE, 140321404973056, 140321404977151,
+STORE, 140321404977152, 140321413365759,
+STORE, 140321396580352, 140321404973055,
+STORE, 140321388187648, 140321404973055,
+STORE, 140321253969920, 140321388187647,
+SNULL, 140321253969920, 140321279180799,
+STORE, 140321279180800, 140321388187647,
+STORE, 140321253969920, 140321279180799,
+ERASE, 140321253969920, 140321279180799,
+SNULL, 140321346289663, 140321388187647,
+STORE, 140321279180800, 140321346289663,
+STORE, 140321346289664, 140321388187647,
+ERASE, 140321346289664, 140321388187647,
+STORE, 140321144963072, 140321346289663,
+STORE, 140321379794944, 140321404973055,
+STORE, 140321371402240, 140321404973055,
+STORE, 140321010745344, 140321346289663,
+STORE, 140321363009536, 140321404973055,
+SNULL, 140321077854207, 140321346289663,
+STORE, 140321010745344, 140321077854207,
+STORE, 140321077854208, 140321346289663,
+SNULL, 140321077854208, 140321144963071,
+STORE, 140321144963072, 140321346289663,
+STORE, 140321077854208, 140321144963071,
+ERASE, 140321077854208, 140321144963071,
+STORE, 140321354616832, 140321404973055,
+STORE, 140321136570368, 140321144963071,
+STORE, 140320943636480, 140321077854207,
+STORE, 140320876527616, 140321077854207,
+STORE, 140321128177664, 140321144963071,
+SNULL, 140320876662783, 140321077854207,
+STORE, 140320876527616, 140320876662783,
+STORE, 140320876662784, 140321077854207,
+STORE, 140321119784960, 140321144963071,
+STORE, 140321111392256, 140321144963071,
+STORE, 140320742309888, 140320876527615,
+STORE, 140321102999552, 140321144963071,
+STORE, 140320608092160, 140320876527615,
+SNULL, 140320675201023, 140320876527615,
+STORE, 140320608092160, 140320675201023,
+STORE, 140320675201024, 140320876527615,
+SNULL, 140320675201024, 140320742309887,
+STORE, 140320742309888, 140320876527615,
+STORE, 140320675201024, 140320742309887,
+ERASE, 140320675201024, 140320742309887,
+STORE, 140321094606848, 140321144963071,
+STORE, 140321086214144, 140321144963071,
+STORE, 140320608092160, 140320876527615,
+SNULL, 140320608092160, 140320675201023,
+STORE, 140320675201024, 140320876527615,
+STORE, 140320608092160, 140320675201023,
+SNULL, 140320675336191, 140320876527615,
+STORE, 140320675201024, 140320675336191,
+STORE, 140320675336192, 140320876527615,
+STORE, 140320599699456, 140320608092159,
+STORE, 140320591306752, 140320608092159,
+STORE, 140320457089024, 140320591306751,
+STORE, 140320448696320, 140320457089023,
+STORE, 140320314478592, 140320448696319,
+SNULL, 140321144963072, 140321279180799,
+STORE, 140321279180800, 140321346289663,
+STORE, 140321144963072, 140321279180799,
+SNULL, 140321279315967, 140321346289663,
+STORE, 140321279180800, 140321279315967,
+STORE, 140321279315968, 140321346289663,
+SNULL, 140321086214144, 140321136570367,
+STORE, 140321136570368, 140321144963071,
+STORE, 140321086214144, 140321136570367,
+SNULL, 140321136574463, 140321144963071,
+STORE, 140321136570368, 140321136574463,
+STORE, 140321136574464, 140321144963071,
+SNULL, 140321212071935, 140321279180799,
+STORE, 140321144963072, 140321212071935,
+STORE, 140321212071936, 140321279180799,
+ERASE, 140321212071936, 140321279180799,
+SNULL, 140321145098239, 140321212071935,
+STORE, 140321144963072, 140321145098239,
+STORE, 140321145098240, 140321212071935,
+SNULL, 140320876662784, 140321010745343,
+STORE, 140321010745344, 140321077854207,
+STORE, 140320876662784, 140321010745343,
+SNULL, 140321010880511, 140321077854207,
+STORE, 140321010745344, 140321010880511,
+STORE, 140321010880512, 140321077854207,
+SNULL, 140321354616832, 140321379794943,
+STORE, 140321379794944, 140321404973055,
+STORE, 140321354616832, 140321379794943,
+SNULL, 140321379799039, 140321404973055,
+STORE, 140321379794944, 140321379799039,
+STORE, 140321379799040, 140321404973055,
+SNULL, 140320876662784, 140320943636479,
+STORE, 140320943636480, 140321010745343,
+STORE, 140320876662784, 140320943636479,
+SNULL, 140320943771647, 140321010745343,
+STORE, 140320943636480, 140320943771647,
+STORE, 140320943771648, 140321010745343,
+SNULL, 140320809418751, 140320876527615,
+STORE, 140320675336192, 140320809418751,
+STORE, 140320809418752, 140320876527615,
+ERASE, 140320809418752, 140320876527615,
+SNULL, 140320675336192, 140320742309887,
+STORE, 140320742309888, 140320809418751,
+STORE, 140320675336192, 140320742309887,
+SNULL, 140320742445055, 140320809418751,
+STORE, 140320742309888, 140320742445055,
+STORE, 140320742445056, 140320809418751,
+SNULL, 140320608227327, 140320675201023,
+STORE, 140320608092160, 140320608227327,
+STORE, 140320608227328, 140320675201023,
+SNULL, 140320457089024, 140320473874431,
+STORE, 140320473874432, 140320591306751,
+STORE, 140320457089024, 140320473874431,
+ERASE, 140320457089024, 140320473874431,
+SNULL, 140320540983295, 140320591306751,
+STORE, 140320473874432, 140320540983295,
+STORE, 140320540983296, 140320591306751,
+ERASE, 140320540983296, 140320591306751,
+SNULL, 140320314478592, 140320339656703,
+STORE, 140320339656704, 140320448696319,
+STORE, 140320314478592, 140320339656703,
+ERASE, 140320314478592, 140320339656703,
+SNULL, 140321086214144, 140321128177663,
+STORE, 140321128177664, 140321136570367,
+STORE, 140321086214144, 140321128177663,
+SNULL, 140321128181759, 140321136570367,
+STORE, 140321128177664, 140321128181759,
+STORE, 140321128181760, 140321136570367,
+SNULL, 140321354616832, 140321371402239,
+STORE, 140321371402240, 140321379794943,
+STORE, 140321354616832, 140321371402239,
+SNULL, 140321371406335, 140321379794943,
+STORE, 140321371402240, 140321371406335,
+STORE, 140321371406336, 140321379794943,
+SNULL, 140320591310847, 140320608092159,
+STORE, 140320591306752, 140320591310847,
+STORE, 140320591310848, 140320608092159,
+SNULL, 140321354616832, 140321363009535,
+STORE, 140321363009536, 140321371402239,
+STORE, 140321354616832, 140321363009535,
+SNULL, 140321363013631, 140321371402239,
+STORE, 140321363009536, 140321363013631,
+STORE, 140321363013632, 140321371402239,
+SNULL, 140321086214144, 140321119784959,
+STORE, 140321119784960, 140321128177663,
+STORE, 140321086214144, 140321119784959,
+SNULL, 140321119789055, 140321128177663,
+STORE, 140321119784960, 140321119789055,
+STORE, 140321119789056, 140321128177663,
+SNULL, 140321086218239, 140321119784959,
+STORE, 140321086214144, 140321086218239,
+STORE, 140321086218240, 140321119784959,
+SNULL, 140321086218240, 140321094606847,
+STORE, 140321094606848, 140321119784959,
+STORE, 140321086218240, 140321094606847,
+SNULL, 140321094610943, 140321119784959,
+STORE, 140321094606848, 140321094610943,
+STORE, 140321094610944, 140321119784959,
+SNULL, 140320474009599, 140320540983295,
+STORE, 140320473874432, 140320474009599,
+STORE, 140320474009600, 140320540983295,
+SNULL, 140320406765567, 140320448696319,
+STORE, 140320339656704, 140320406765567,
+STORE, 140320406765568, 140320448696319,
+ERASE, 140320406765568, 140320448696319,
+SNULL, 140320339791871, 140320406765567,
+STORE, 140320339656704, 140320339791871,
+STORE, 140320339791872, 140320406765567,
+STORE, 140321270788096, 140321279180799,
+STORE, 140321262395392, 140321279180799,
+STORE, 140321254002688, 140321279180799,
+SNULL, 140321254002688, 140321262395391,
+STORE, 140321262395392, 140321279180799,
+STORE, 140321254002688, 140321262395391,
+SNULL, 140321262399487, 140321279180799,
+STORE, 140321262395392, 140321262399487,
+STORE, 140321262399488, 140321279180799,
+STORE, 140321245609984, 140321262395391,
+STORE, 140321237217280, 140321262395391,
+SNULL, 140321237217280, 140321245609983,
+STORE, 140321245609984, 140321262395391,
+STORE, 140321237217280, 140321245609983,
+SNULL, 140321245614079, 140321262395391,
+STORE, 140321245609984, 140321245614079,
+STORE, 140321245614080, 140321262395391,
+SNULL, 140321379799040, 140321388187647,
+STORE, 140321388187648, 140321404973055,
+STORE, 140321379799040, 140321388187647,
+SNULL, 140321388191743, 140321404973055,
+STORE, 140321388187648, 140321388191743,
+STORE, 140321388191744, 140321404973055,
+SNULL, 140321354620927, 140321363009535,
+STORE, 140321354616832, 140321354620927,
+STORE, 140321354620928, 140321363009535,
+SNULL, 140321388191744, 140321396580351,
+STORE, 140321396580352, 140321404973055,
+STORE, 140321388191744, 140321396580351,
+SNULL, 140321396584447, 140321404973055,
+STORE, 140321396580352, 140321396584447,
+STORE, 140321396584448, 140321404973055,
+SNULL, 140321094610944, 140321111392255,
+STORE, 140321111392256, 140321119784959,
+STORE, 140321094610944, 140321111392255,
+SNULL, 140321111396351, 140321119784959,
+STORE, 140321111392256, 140321111396351,
+STORE, 140321111396352, 140321119784959,
+STORE, 140321228824576, 140321245609983,
+SNULL, 140321094610944, 140321102999551,
+STORE, 140321102999552, 140321111392255,
+STORE, 140321094610944, 140321102999551,
+SNULL, 140321103003647, 140321111392255,
+STORE, 140321102999552, 140321103003647,
+STORE, 140321103003648, 140321111392255,
+STORE, 140321220431872, 140321245609983,
+SNULL, 140321220435967, 140321245609983,
+STORE, 140321220431872, 140321220435967,
+STORE, 140321220435968, 140321245609983,
+STORE, 140320868134912, 140320876527615,
+SNULL, 140320868139007, 140320876527615,
+STORE, 140320868134912, 140320868139007,
+STORE, 140320868139008, 140320876527615,
+SNULL, 140320591310848, 140320599699455,
+STORE, 140320599699456, 140320608092159,
+STORE, 140320591310848, 140320599699455,
+SNULL, 140320599703551, 140320608092159,
+STORE, 140320599699456, 140320599703551,
+STORE, 140320599703552, 140320608092159,
+STORE, 140320859742208, 140320868134911,
+SNULL, 140321262399488, 140321270788095,
+STORE, 140321270788096, 140321279180799,
+STORE, 140321262399488, 140321270788095,
+SNULL, 140321270792191, 140321279180799,
+STORE, 140321270788096, 140321270792191,
+STORE, 140321270792192, 140321279180799,
+STORE, 140320851349504, 140320868134911,
+STORE, 140320842956800, 140320868134911,
+STORE, 140320834564096, 140320868134911,
+STORE, 140320826171392, 140320868134911,
+SNULL, 140320826171392, 140320834564095,
+STORE, 140320834564096, 140320868134911,
+STORE, 140320826171392, 140320834564095,
+SNULL, 140320834568191, 140320868134911,
+STORE, 140320834564096, 140320834568191,
+STORE, 140320834568192, 140320868134911,
+SNULL, 140321220435968, 140321228824575,
+STORE, 140321228824576, 140321245609983,
+STORE, 140321220435968, 140321228824575,
+SNULL, 140321228828671, 140321245609983,
+STORE, 140321228824576, 140321228828671,
+STORE, 140321228828672, 140321245609983,
+STORE, 140320817778688, 140320834564095,
+SNULL, 140320817782783, 140320834564095,
+STORE, 140320817778688, 140320817782783,
+STORE, 140320817782784, 140320834564095,
+STORE, 140320582914048, 140320591306751,
+SNULL, 140321228828672, 140321237217279,
+STORE, 140321237217280, 140321245609983,
+STORE, 140321228828672, 140321237217279,
+SNULL, 140321237221375, 140321245609983,
+STORE, 140321237217280, 140321237221375,
+STORE, 140321237221376, 140321245609983,
+SNULL, 140320448700415, 140320457089023,
+STORE, 140320448696320, 140320448700415,
+STORE, 140320448700416, 140320457089023,
+SNULL, 140321245614080, 140321254002687,
+STORE, 140321254002688, 140321262395391,
+STORE, 140321245614080, 140321254002687,
+SNULL, 140321254006783, 140321262395391,
+STORE, 140321254002688, 140321254006783,
+STORE, 140321254006784, 140321262395391,
+STORE, 140320574521344, 140320591306751,
+SNULL, 140320574525439, 140320591306751,
+STORE, 140320574521344, 140320574525439,
+STORE, 140320574525440, 140320591306751,
+STORE, 140320566128640, 140320574521343,
+SNULL, 140320566132735, 140320574521343,
+STORE, 140320566128640, 140320566132735,
+STORE, 140320566132736, 140320574521343,
+SNULL, 140320574525440, 140320582914047,
+STORE, 140320582914048, 140320591306751,
+STORE, 140320574525440, 140320582914047,
+SNULL, 140320582918143, 140320591306751,
+STORE, 140320582914048, 140320582918143,
+STORE, 140320582918144, 140320591306751,
+STORE, 140320557735936, 140320566128639,
+SNULL, 140320557740031, 140320566128639,
+STORE, 140320557735936, 140320557740031,
+STORE, 140320557740032, 140320566128639,
+STORE, 140320549343232, 140320557735935,
+STORE, 140320465481728, 140320473874431,
+STORE, 140320448700416, 140320473874431,
+SNULL, 140320834568192, 140320859742207,
+STORE, 140320859742208, 140320868134911,
+STORE, 140320834568192, 140320859742207,
+SNULL, 140320859746303, 140320868134911,
+STORE, 140320859742208, 140320859746303,
+STORE, 140320859746304, 140320868134911,
+STORE, 140320440303616, 140320448696319,
+STORE, 140320431910912, 140320448696319,
+SNULL, 140320834568192, 140320851349503,
+STORE, 140320851349504, 140320859742207,
+STORE, 140320834568192, 140320851349503,
+SNULL, 140320851353599, 140320859742207,
+STORE, 140320851349504, 140320851353599,
+STORE, 140320851353600, 140320859742207,
+SNULL, 140320817782784, 140320826171391,
+STORE, 140320826171392, 140320834564095,
+STORE, 140320817782784, 140320826171391,
+SNULL, 140320826175487, 140320834564095,
+STORE, 140320826171392, 140320826175487,
+STORE, 140320826175488, 140320834564095,
+SNULL, 140320834568192, 140320842956799,
+STORE, 140320842956800, 140320851349503,
+STORE, 140320834568192, 140320842956799,
+SNULL, 140320842960895, 140320851349503,
+STORE, 140320842956800, 140320842960895,
+STORE, 140320842960896, 140320851349503,
+STORE, 140320423518208, 140320448696319,
+SNULL, 140320423522303, 140320448696319,
+STORE, 140320423518208, 140320423522303,
+STORE, 140320423522304, 140320448696319,
+STORE, 140320415125504, 140320423518207,
+STORE, 140320331264000, 140320339656703,
+STORE, 140320322871296, 140320339656703,
+STORE, 140320314478592, 140320339656703,
+SNULL, 140320314482687, 140320339656703,
+STORE, 140320314478592, 140320314482687,
+STORE, 140320314482688, 140320339656703,
+STORE, 140320306085888, 140320314478591,
+SNULL, 140320306089983, 140320314478591,
+STORE, 140320306085888, 140320306089983,
+STORE, 140320306089984, 140320314478591,
+STORE, 140320297693184, 140320306085887,
+SNULL, 140320297697279, 140320306085887,
+STORE, 140320297693184, 140320297697279,
+STORE, 140320297697280, 140320306085887,
+STORE, 140320289300480, 140320297693183,
+STORE, 140320280907776, 140320297693183,
+SNULL, 140320280911871, 140320297693183,
+STORE, 140320280907776, 140320280911871,
+STORE, 140320280911872, 140320297693183,
+SNULL, 140320423522304, 140320431910911,
+STORE, 140320431910912, 140320448696319,
+STORE, 140320423522304, 140320431910911,
+SNULL, 140320431915007, 140320448696319,
+STORE, 140320431910912, 140320431915007,
+STORE, 140320431915008, 140320448696319,
+SNULL, 140320549347327, 140320557735935,
+STORE, 140320549343232, 140320549347327,
+STORE, 140320549347328, 140320557735935,
+STORE, 140320272515072, 140320280907775,
+SNULL, 140320448700416, 140320457089023,
+STORE, 140320457089024, 140320473874431,
+STORE, 140320448700416, 140320457089023,
+SNULL, 140320457093119, 140320473874431,
+STORE, 140320457089024, 140320457093119,
+STORE, 140320457093120, 140320473874431,
+STORE, 140320264122368, 140320280907775,
+SNULL, 140320457093120, 140320465481727,
+STORE, 140320465481728, 140320473874431,
+STORE, 140320457093120, 140320465481727,
+SNULL, 140320465485823, 140320473874431,
+STORE, 140320465481728, 140320465485823,
+STORE, 140320465485824, 140320473874431,
+SNULL, 140320431915008, 140320440303615,
+STORE, 140320440303616, 140320448696319,
+STORE, 140320431915008, 140320440303615,
+SNULL, 140320440307711, 140320448696319,
+STORE, 140320440303616, 140320440307711,
+STORE, 140320440307712, 140320448696319,
+STORE, 140320255729664, 140320280907775,
+STORE, 140320247336960, 140320280907775,
+SNULL, 140320247341055, 140320280907775,
+STORE, 140320247336960, 140320247341055,
+STORE, 140320247341056, 140320280907775,
+STORE, 140320238944256, 140320247336959,
+STORE, 140320230551552, 140320247336959,
+SNULL, 140320230551552, 140320238944255,
+STORE, 140320238944256, 140320247336959,
+STORE, 140320230551552, 140320238944255,
+SNULL, 140320238948351, 140320247336959,
+STORE, 140320238944256, 140320238948351,
+STORE, 140320238948352, 140320247336959,
+SNULL, 140320314482688, 140320331263999,
+STORE, 140320331264000, 140320339656703,
+STORE, 140320314482688, 140320331263999,
+SNULL, 140320331268095, 140320339656703,
+STORE, 140320331264000, 140320331268095,
+STORE, 140320331268096, 140320339656703,
+SNULL, 140320280911872, 140320289300479,
+STORE, 140320289300480, 140320297693183,
+STORE, 140320280911872, 140320289300479,
+SNULL, 140320289304575, 140320297693183,
+STORE, 140320289300480, 140320289304575,
+STORE, 140320289304576, 140320297693183,
+SNULL, 140320415129599, 140320423518207,
+STORE, 140320415125504, 140320415129599,
+STORE, 140320415129600, 140320423518207,
+STORE, 140320222158848, 140320238944255,
+STORE, 140320213766144, 140320238944255,
+STORE, 140320205373440, 140320238944255,
+SNULL, 140320205377535, 140320238944255,
+STORE, 140320205373440, 140320205377535,
+STORE, 140320205377536, 140320238944255,
+SNULL, 140320314482688, 140320322871295,
+STORE, 140320322871296, 140320331263999,
+STORE, 140320314482688, 140320322871295,
+SNULL, 140320322875391, 140320331263999,
+STORE, 140320322871296, 140320322875391,
+STORE, 140320322875392, 140320331263999,
+SNULL, 140320247341056, 140320272515071,
+STORE, 140320272515072, 140320280907775,
+STORE, 140320247341056, 140320272515071,
+SNULL, 140320272519167, 140320280907775,
+STORE, 140320272515072, 140320272519167,
+STORE, 140320272519168, 140320280907775,
+SNULL, 140320247341056, 140320264122367,
+STORE, 140320264122368, 140320272515071,
+STORE, 140320247341056, 140320264122367,
+SNULL, 140320264126463, 140320272515071,
+STORE, 140320264122368, 140320264126463,
+STORE, 140320264126464, 140320272515071,
+SNULL, 140320205377536, 140320230551551,
+STORE, 140320230551552, 140320238944255,
+STORE, 140320205377536, 140320230551551,
+SNULL, 140320230555647, 140320238944255,
+STORE, 140320230551552, 140320230555647,
+STORE, 140320230555648, 140320238944255,
+STORE, 140320196980736, 140320205373439,
+SNULL, 140320196984831, 140320205373439,
+STORE, 140320196980736, 140320196984831,
+STORE, 140320196984832, 140320205373439,
+STORE, 140320188588032, 140320196980735,
+SNULL, 140320247341056, 140320255729663,
+STORE, 140320255729664, 140320264122367,
+STORE, 140320247341056, 140320255729663,
+SNULL, 140320255733759, 140320264122367,
+STORE, 140320255729664, 140320255733759,
+STORE, 140320255733760, 140320264122367,
+STORE, 140320180195328, 140320196980735,
+SNULL, 140320180199423, 140320196980735,
+STORE, 140320180195328, 140320180199423,
+STORE, 140320180199424, 140320196980735,
+STORE, 140320171802624, 140320180195327,
+STORE, 140320163409920, 140320180195327,
+SNULL, 140320163414015, 140320180195327,
+STORE, 140320163409920, 140320163414015,
+STORE, 140320163414016, 140320180195327,
+SNULL, 140320205377536, 140320222158847,
+STORE, 140320222158848, 140320230551551,
+STORE, 140320205377536, 140320222158847,
+SNULL, 140320222162943, 140320230551551,
+STORE, 140320222158848, 140320222162943,
+STORE, 140320222162944, 140320230551551,
+SNULL, 140320205377536, 140320213766143,
+STORE, 140320213766144, 140320222158847,
+STORE, 140320205377536, 140320213766143,
+SNULL, 140320213770239, 140320222158847,
+STORE, 140320213766144, 140320213770239,
+STORE, 140320213770240, 140320222158847,
+STORE, 140320155017216, 140320163409919,
+SNULL, 140320180199424, 140320188588031,
+STORE, 140320188588032, 140320196980735,
+STORE, 140320180199424, 140320188588031,
+SNULL, 140320188592127, 140320196980735,
+STORE, 140320188588032, 140320188592127,
+STORE, 140320188592128, 140320196980735,
+SNULL, 140320155021311, 140320163409919,
+STORE, 140320155017216, 140320155021311,
+STORE, 140320155021312, 140320163409919,
+SNULL, 140320163414016, 140320171802623,
+STORE, 140320171802624, 140320180195327,
+STORE, 140320163414016, 140320171802623,
+SNULL, 140320171806719, 140320180195327,
+STORE, 140320171802624, 140320171806719,
+STORE, 140320171806720, 140320180195327,
+STORE, 140320146624512, 140320155017215,
+SNULL, 140320146628607, 140320155017215,
+STORE, 140320146624512, 140320146628607,
+STORE, 140320146628608, 140320155017215,
+STORE, 140321937321984, 140321937350655,
+STORE, 140321884942336, 140321887133695,
+SNULL, 140321884942336, 140321885032447,
+STORE, 140321885032448, 140321887133695,
+STORE, 140321884942336, 140321885032447,
+SNULL, 140321887125503, 140321887133695,
+STORE, 140321885032448, 140321887125503,
+STORE, 140321887125504, 140321887133695,
+ERASE, 140321887125504, 140321887133695,
+STORE, 140321887125504, 140321887133695,
+SNULL, 140321887129599, 140321887133695,
+STORE, 140321887125504, 140321887129599,
+STORE, 140321887129600, 140321887133695,
+ERASE, 140321937321984, 140321937350655,
+ERASE, 140321086214144, 140321086218239,
+ERASE, 140321086218240, 140321094606847,
+ERASE, 140321119784960, 140321119789055,
+ERASE, 140321119789056, 140321128177663,
+ERASE, 140321245609984, 140321245614079,
+ERASE, 140321245614080, 140321254002687,
+ERASE, 140320574521344, 140320574525439,
+ERASE, 140320574525440, 140320582914047,
+ERASE, 140320297693184, 140320297697279,
+ERASE, 140320297697280, 140320306085887,
+ERASE, 140321354616832, 140321354620927,
+ERASE, 140321354620928, 140321363009535,
+ERASE, 140320834564096, 140320834568191,
+ERASE, 140320834568192, 140320842956799,
+ERASE, 140320591306752, 140320591310847,
+ERASE, 140320591310848, 140320599699455,
+ERASE, 140321136570368, 140321136574463,
+ERASE, 140321136574464, 140321144963071,
+ERASE, 140321237217280, 140321237221375,
+ERASE, 140321237221376, 140321245609983,
+ERASE, 140321363009536, 140321363013631,
+ERASE, 140321363013632, 140321371402239,
+ERASE, 140320599699456, 140320599703551,
+ERASE, 140320599703552, 140320608092159,
+ERASE, 140321396580352, 140321396584447,
+ERASE, 140321396584448, 140321404973055,
+ERASE, 140320566128640, 140320566132735,
+ERASE, 140320566132736, 140320574521343,
+ERASE, 140321094606848, 140321094610943,
+ERASE, 140321094610944, 140321102999551,
+ERASE, 140320582914048, 140320582918143,
+ERASE, 140320582918144, 140320591306751,
+ERASE, 140320289300480, 140320289304575,
+ERASE, 140320289304576, 140320297693183,
+ERASE, 140320163409920, 140320163414015,
+ };
+ static const unsigned long set41[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140728157171712, 140737488351231,
+SNULL, 140728157175807, 140737488351231,
+STORE, 140728157171712, 140728157175807,
+STORE, 140728157040640, 140728157175807,
+STORE, 94376106364928, 94376108613631,
+SNULL, 94376106487807, 94376108613631,
+STORE, 94376106364928, 94376106487807,
+STORE, 94376106487808, 94376108613631,
+SNULL, 94376106487808, 94376108613631,
+STORE, 94376108584960, 94376108593151,
+STORE, 94376108593152, 94376108613631,
+STORE, 140113496432640, 140113498685439,
+SNULL, 140113496575999, 140113498685439,
+STORE, 140113496432640, 140113496575999,
+STORE, 140113496576000, 140113498685439,
+SNULL, 140113496576000, 140113498685439,
+STORE, 140113498673152, 140113498681343,
+STORE, 140113498681344, 140113498685439,
+STORE, 140728157609984, 140728157618175,
+STORE, 140728157593600, 140728157609983,
+STORE, 140113498636288, 140113498673151,
+STORE, 140113498628096, 140113498636287,
+STORE, 140113492635648, 140113496432639,
+SNULL, 140113492635648, 140113494294527,
+STORE, 140113494294528, 140113496432639,
+STORE, 140113492635648, 140113494294527,
+SNULL, 140113496391679, 140113496432639,
+STORE, 140113494294528, 140113496391679,
+STORE, 140113496391680, 140113496432639,
+SNULL, 140113496391680, 140113496416255,
+STORE, 140113496416256, 140113496432639,
+STORE, 140113496391680, 140113496416255,
+SNULL, 140113496391680, 140113496416255,
+STORE, 140113496391680, 140113496416255,
+SNULL, 140113496416256, 140113496432639,
+STORE, 140113496416256, 140113496432639,
+SNULL, 140113496408063, 140113496416255,
+STORE, 140113496391680, 140113496408063,
+STORE, 140113496408064, 140113496416255,
+SNULL, 94376108589055, 94376108593151,
+STORE, 94376108584960, 94376108589055,
+STORE, 94376108589056, 94376108593151,
+SNULL, 140113498677247, 140113498681343,
+STORE, 140113498673152, 140113498677247,
+STORE, 140113498677248, 140113498681343,
+SNULL, 140113498636288, 140113498673151,
+STORE, 94376135090176, 94376135094271,
+STORE, 94376135090176, 94376135098367,
+STORE, 94376139288576, 94376139292671,
+STORE, 94376143482880, 94376143486975,
+STORE, 94376147677184, 94376147681279,
+STORE, 94376151871488, 94376151875583,
+STORE, 94376156065792, 94376156069887,
+STORE, 94376160260096, 94376160264191,
+STORE, 94376164454400, 94376164458495,
+STORE, 94376168648704, 94376168652799,
+STORE, 94376172843008, 94376172847103,
+STORE, 94376177037312, 94376177041407,
+STORE, 94376181231616, 94376181235711,
+STORE, 94376185425920, 94376185430015,
+STORE, 94376189620224, 94376189624319,
+STORE, 94376193814528, 94376193818623,
+STORE, 94376198008832, 94376198012927,
+STORE, 94376202203136, 94376202207231,
+STORE, 94376206397440, 94376206401535,
+STORE, 94376210591744, 94376210595839,
+STORE, 94376214786048, 94376214790143,
+STORE, 94376218980352, 94376218984447,
+STORE, 94376223174656, 94376223178751,
+STORE, 94376227368960, 94376227373055,
+STORE, 94376231563264, 94376231567359,
+STORE, 94376235757568, 94376235761663,
+STORE, 94376239951872, 94376239955967,
+STORE, 94376244146176, 94376244150271,
+STORE, 94376248340480, 94376248344575,
+STORE, 94376252534784, 94376252538879,
+STORE, 94376256729088, 94376256733183,
+STORE, 94376260923392, 94376260927487,
+STORE, 94376265117696, 94376265121791,
+STORE, 94376269312000, 94376269316095,
+STORE, 94376273506304, 94376273510399,
+STORE, 94376277700608, 94376277704703,
+STORE, 94376281894912, 94376281899007,
+STORE, 94376286089216, 94376286093311,
+STORE, 94376290283520, 94376290287615,
+STORE, 94376294477824, 94376294481919,
+STORE, 94376298672128, 94376298676223,
+STORE, 94376302866432, 94376302870527,
+STORE, 94376307060736, 94376307064831,
+STORE, 94376311255040, 94376311259135,
+STORE, 94376315449344, 94376315453439,
+STORE, 94376319643648, 94376319647743,
+STORE, 94376323837952, 94376323842047,
+STORE, 94376328032256, 94376328036351,
+STORE, 94376332226560, 94376332230655,
+STORE, 94376336420864, 94376336424959,
+STORE, 94376340615168, 94376340619263,
+STORE, 94376344809472, 94376344813567,
+STORE, 94376349003776, 94376349007871,
+STORE, 94376353198080, 94376353202175,
+STORE, 94376357392384, 94376357396479,
+STORE, 94376361586688, 94376361590783,
+STORE, 94376365780992, 94376365785087,
+STORE, 94376369975296, 94376369979391,
+STORE, 94376374169600, 94376374173695,
+STORE, 94376378363904, 94376378367999,
+STORE, 94376382558208, 94376382562303,
+STORE, 94376386752512, 94376386756607,
+STORE, 94376390946816, 94376390950911,
+STORE, 94376395141120, 94376395145215,
+STORE, 94376399335424, 94376399339519,
+STORE, 94376403529728, 94376403533823,
+STORE, 94376407724032, 94376407728127,
+STORE, 94376411918336, 94376411922431,
+STORE, 94376416112640, 94376416116735,
+STORE, 94376420306944, 94376420311039,
+STORE, 94376424501248, 94376424505343,
+STORE, 94376428695552, 94376428699647,
+STORE, 94376432889856, 94376432893951,
+STORE, 94376437084160, 94376437088255,
+STORE, 94376441278464, 94376441282559,
+STORE, 94376445472768, 94376445476863,
+STORE, 94376449667072, 94376449671167,
+STORE, 94376453861376, 94376453865471,
+STORE, 94376458055680, 94376458059775,
+STORE, 94376462249984, 94376462254079,
+STORE, 94376466444288, 94376466448383,
+STORE, 94376470638592, 94376470642687,
+STORE, 94376474832896, 94376474836991,
+STORE, 94376479027200, 94376479031295,
+STORE, 94376483221504, 94376483225599,
+STORE, 94376487415808, 94376487419903,
+STORE, 94376491610112, 94376491614207,
+STORE, 94376495804416, 94376495808511,
+STORE, 94376499998720, 94376500002815,
+STORE, 94376504193024, 94376504197119,
+STORE, 94376508387328, 94376508391423,
+STORE, 94376512581632, 94376512585727,
+STORE, 94376516775936, 94376516780031,
+STORE, 94376520970240, 94376520974335,
+STORE, 94376525164544, 94376525168639,
+STORE, 94376529358848, 94376529362943,
+STORE, 94376533553152, 94376533557247,
+STORE, 94376537747456, 94376537751551,
+STORE, 94376541941760, 94376541945855,
+STORE, 94376546136064, 94376546140159,
+STORE, 94376550330368, 94376550334463,
+STORE, 94376554524672, 94376554528767,
+STORE, 94376558718976, 94376558723071,
+STORE, 94376562913280, 94376562917375,
+STORE, 94376567107584, 94376567111679,
+STORE, 94376571301888, 94376571305983,
+STORE, 94376575496192, 94376575500287,
+STORE, 94376579690496, 94376579694591,
+STORE, 94376583884800, 94376583888895,
+STORE, 94376588079104, 94376588083199,
+STORE, 94376592273408, 94376592277503,
+STORE, 94376596467712, 94376596471807,
+STORE, 94376600662016, 94376600666111,
+STORE, 94376604856320, 94376604860415,
+STORE, 94376609050624, 94376609054719,
+STORE, 94376613244928, 94376613249023,
+STORE, 94376617439232, 94376617443327,
+STORE, 94376621633536, 94376621637631,
+STORE, 94376625827840, 94376625831935,
+STORE, 94376630022144, 94376630026239,
+STORE, 94376634216448, 94376634220543,
+STORE, 94376638410752, 94376638414847,
+STORE, 94376642605056, 94376642609151,
+STORE, 94376646799360, 94376646803455,
+STORE, 94376650993664, 94376650997759,
+STORE, 94376655187968, 94376655192063,
+STORE, 94376659382272, 94376659386367,
+STORE, 94376663576576, 94376663580671,
+STORE, 94376667770880, 94376667774975,
+STORE, 94376671965184, 94376671969279,
+STORE, 94376676159488, 94376676163583,
+STORE, 94376680353792, 94376680357887,
+STORE, 94376684548096, 94376684552191,
+STORE, 94376688742400, 94376688746495,
+STORE, 94376692936704, 94376692940799,
+STORE, 94376697131008, 94376697135103,
+STORE, 94376701325312, 94376701329407,
+STORE, 94376705519616, 94376705523711,
+STORE, 94376709713920, 94376709718015,
+STORE, 94376713908224, 94376713912319,
+STORE, 94376718102528, 94376718106623,
+STORE, 94376722296832, 94376722300927,
+STORE, 94376726491136, 94376726495231,
+STORE, 94376730685440, 94376730689535,
+STORE, 94376734879744, 94376734883839,
+STORE, 94376739074048, 94376739078143,
+STORE, 94376743268352, 94376743272447,
+STORE, 94376747462656, 94376747466751,
+STORE, 94376751656960, 94376751661055,
+STORE, 94376755851264, 94376755855359,
+STORE, 94376760045568, 94376760049663,
+STORE, 94376764239872, 94376764243967,
+STORE, 94376768434176, 94376768438271,
+STORE, 94376772628480, 94376772632575,
+STORE, 94376776822784, 94376776826879,
+STORE, 94376781017088, 94376781021183,
+STORE, 94376785211392, 94376785215487,
+STORE, 94376789405696, 94376789409791,
+STORE, 94376793600000, 94376793604095,
+STORE, 94376797794304, 94376797798399,
+STORE, 94376801988608, 94376801992703,
+STORE, 94376806182912, 94376806187007,
+STORE, 94376810377216, 94376810381311,
+STORE, 94376814571520, 94376814575615,
+STORE, 94376818765824, 94376818769919,
+STORE, 94376822960128, 94376822964223,
+STORE, 94376827154432, 94376827158527,
+STORE, 94376831348736, 94376831352831,
+STORE, 94376835543040, 94376835547135,
+STORE, 94376839737344, 94376839741439,
+STORE, 94376843931648, 94376843935743,
+STORE, 94376848125952, 94376848130047,
+STORE, 94376852320256, 94376852324351,
+STORE, 94376856514560, 94376856518655,
+STORE, 94376860708864, 94376860712959,
+STORE, 94376864903168, 94376864907263,
+STORE, 94376869097472, 94376869101567,
+STORE, 94376873291776, 94376873295871,
+STORE, 94376877486080, 94376877490175,
+STORE, 94376881680384, 94376881684479,
+STORE, 94376885874688, 94376885878783,
+STORE, 94376890068992, 94376890073087,
+STORE, 94376894263296, 94376894267391,
+STORE, 94376898457600, 94376898461695,
+STORE, 94376902651904, 94376902655999,
+STORE, 94376906846208, 94376906850303,
+STORE, 94376911040512, 94376911044607,
+STORE, 94376915234816, 94376915238911,
+STORE, 94376919429120, 94376919433215,
+STORE, 94376923623424, 94376923627519,
+STORE, 94376927817728, 94376927821823,
+STORE, 94376932012032, 94376932016127,
+STORE, 94376936206336, 94376936210431,
+STORE, 94376940400640, 94376940404735,
+STORE, 94376944594944, 94376944599039,
+STORE, 94376948789248, 94376948793343,
+STORE, 94376952983552, 94376952987647,
+STORE, 94376957177856, 94376957181951,
+STORE, 94376961372160, 94376961376255,
+STORE, 94376965566464, 94376965570559,
+STORE, 94376969760768, 94376969764863,
+STORE, 94376973955072, 94376973959167,
+STORE, 94376978149376, 94376978153471,
+STORE, 94376982343680, 94376982347775,
+STORE, 94376986537984, 94376986542079,
+STORE, 94376990732288, 94376990736383,
+STORE, 94376994926592, 94376994930687,
+STORE, 94376999120896, 94376999124991,
+STORE, 94377003315200, 94377003319295,
+STORE, 94377007509504, 94377007513599,
+STORE, 94377011703808, 94377011707903,
+STORE, 94377015898112, 94377015902207,
+STORE, 94377020092416, 94377020096511,
+STORE, 94377024286720, 94377024290815,
+STORE, 94377028481024, 94377028485119,
+STORE, 94377032675328, 94377032679423,
+STORE, 94377036869632, 94377036873727,
+STORE, 94377041063936, 94377041068031,
+STORE, 94377045258240, 94377045262335,
+STORE, 94377049452544, 94377049456639,
+STORE, 94377053646848, 94377053650943,
+STORE, 94377057841152, 94377057845247,
+STORE, 94377062035456, 94377062039551,
+STORE, 94377066229760, 94377066233855,
+STORE, 94377070424064, 94377070428159,
+STORE, 94377074618368, 94377074622463,
+STORE, 94377078812672, 94377078816767,
+STORE, 94377083006976, 94377083011071,
+STORE, 94377087201280, 94377087205375,
+STORE, 94377091395584, 94377091399679,
+STORE, 94377095589888, 94377095593983,
+STORE, 94377099784192, 94377099788287,
+STORE, 94377103978496, 94377103982591,
+STORE, 94377108172800, 94377108176895,
+STORE, 94377112367104, 94377112371199,
+STORE, 94377116561408, 94377116565503,
+STORE, 94377120755712, 94377120759807,
+STORE, 94377124950016, 94377124954111,
+STORE, 94377129144320, 94377129148415,
+STORE, 94377133338624, 94377133342719,
+STORE, 94377137532928, 94377137537023,
+STORE, 94377141727232, 94377141731327,
+STORE, 94377145921536, 94377145925631,
+STORE, 94377150115840, 94377150119935,
+STORE, 94377154310144, 94377154314239,
+STORE, 94377158504448, 94377158508543,
+STORE, 94377162698752, 94377162702847,
+STORE, 94377166893056, 94377166897151,
+STORE, 94377171087360, 94377171091455,
+STORE, 94377175281664, 94377175285759,
+STORE, 94377179475968, 94377179480063,
+STORE, 94377183670272, 94377183674367,
+STORE, 94377187864576, 94377187868671,
+STORE, 94377192058880, 94377192062975,
+STORE, 94377196253184, 94377196257279,
+STORE, 94377200447488, 94377200451583,
+STORE, 94377204641792, 94377204645887,
+SNULL, 94376135094271, 94376135098367,
+STORE, 94376135090176, 94376135094271,
+STORE, 94376135094272, 94376135098367,
+SNULL, 94376135094272, 94377208836095,
+ };
+ static const unsigned long set42[] = {
+STORE, 314572800, 1388314623,
+STORE, 1462157312, 1462169599,
+STORE, 1462169600, 1462185983,
+STORE, 1462185984, 1462190079,
+STORE, 1462190080, 1462194175,
+STORE, 1462194176, 1462198271,
+STORE, 1879986176, 1881800703,
+STORE, 1881800704, 1882034175,
+STORE, 1882034176, 1882193919,
+STORE, 1882193920, 1882406911,
+STORE, 1882406912, 1882451967,
+STORE, 1882451968, 1882996735,
+STORE, 1882996736, 1885892607,
+STORE, 1885892608, 1885896703,
+STORE, 1885896704, 1885904895,
+STORE, 1885904896, 1885908991,
+STORE, 1885908992, 1885913087,
+STORE, 1885913088, 1885966335,
+STORE, 1885966336, 1886232575,
+STORE, 1886232576, 1886236671,
+STORE, 1886236672, 1886240767,
+STORE, 1886240768, 1886244863,
+STORE, 1886244864, 1886248959,
+STORE, 1886248960, 1886294015,
+STORE, 1886294016, 1886494719,
+STORE, 1886494720, 1886498815,
+STORE, 1886498816, 1886502911,
+STORE, 1886502912, 1886507007,
+STORE, 1886507008, 1886511103,
+STORE, 1886511104, 1886556159,
+STORE, 1886556160, 1886629887,
+STORE, 1886629888, 1886633983,
+STORE, 1886633984, 1886638079,
+STORE, 1886638080, 1886642175,
+STORE, 1886642176, 1886646271,
+STORE, 1886646272, 1886666751,
+STORE, 1886666752, 1886670847,
+STORE, 1886670848, 1886674943,
+STORE, 1886674944, 1886679039,
+STORE, 1886679040, 1895419903,
+STORE, 1895419904, 1895550975,
+STORE, 1895550976, 1896148991,
+STORE, 1896148992, 1897189375,
+STORE, 1897189376, 1897701375,
+STORE, 1897701376, 1897803775,
+STORE, 1897803776, 1897816063,
+STORE, 1897816064, 1899913215,
+STORE, 1899913216, 1909379071,
+STORE, 1909379072, 1909387263,
+STORE, 1909387264, 1909391359,
+STORE, 1909391360, 1909432319,
+STORE, 1909432320, 1909436415,
+STORE, 1909436416, 1909440511,
+STORE, 1909440512, 1909460991,
+STORE, 1909460992, 1909547007,
+STORE, 1909547008, 1909551103,
+STORE, 1909551104, 1909555199,
+STORE, 1909555200, 1909559295,
+STORE, 1909559296, 1909563391,
+STORE, 1909563392, 1909739519,
+STORE, 1909739520, 1910566911,
+STORE, 1910566912, 1910571007,
+STORE, 1910571008, 1910575103,
+STORE, 1910575104, 1910579199,
+STORE, 1910579200, 1910583295,
+STORE, 1910583296, 1910587391,
+STORE, 1910587392, 1910620159,
+STORE, 1910620160, 1910624255,
+STORE, 1910624256, 1910628351,
+STORE, 1910628352, 1910632447,
+STORE, 1910632448, 1910652927,
+STORE, 1910652928, 1910657023,
+STORE, 1910657024, 1910661119,
+STORE, 1910661120, 1910665215,
+STORE, 1910665216, 1910669311,
+STORE, 1910669312, 1910677503,
+STORE, 1910677504, 1910681599,
+STORE, 1910681600, 1910685695,
+STORE, 1910685696, 1910689791,
+STORE, 1910689792, 1910697983,
+STORE, 1910697984, 1910702079,
+STORE, 1910702080, 1910706175,
+STORE, 1910706176, 1910710271,
+STORE, 1910710272, 1914093567,
+STORE, 1914093568, 1914097663,
+STORE, 1914097664, 1969434623,
+STORE, 1969434624, 1977819135,
+STORE, 3290435584, 3426750463,
+STORE, 3426750464, 3426754559,
+STORE, 3426754560, 3426762751,
+STORE, 3426762752, 3426766847,
+STORE, 3426766848, 3426770943,
+STORE, 3427037184, 3427061759,
+STORE, 3427061760, 3427135487,
+STORE, 3427135488, 3427143679,
+STORE, 3427143680, 3427147775,
+STORE, 3427147776, 3427209215,
+STORE, 3427319808, 3432116223,
+STORE, 3432116224, 3450130431,
+STORE, 3450130432, 3451027455,
+STORE, 3451027456, 3451031551,
+STORE, 3451031552, 3451461631,
+STORE, 3451736064, 3456688127,
+STORE, 3456688128, 3475222527,
+STORE, 3475222528, 3476119551,
+STORE, 3476119552, 3476127743,
+STORE, 3476127744, 3476553727,
+STORE, 3476631552, 3477315583,
+STORE, 3477315584, 3479949311,
+STORE, 3479949312, 3480002559,
+STORE, 3480002560, 3480006655,
+STORE, 3480006656, 3480432639,
+STORE, 3480539136, 3480543231,
+STORE, 3480543232, 3480547327,
+STORE, 3480547328, 3480555519,
+STORE, 3480854528, 3480903679,
+STORE, 3480903680, 3480969215,
+STORE, 3480969216, 3480977407,
+STORE, 3480977408, 3480981503,
+STORE, 3481030656, 3481092095,
+STORE, 3481092096, 3481235455,
+STORE, 3481235456, 3481243647,
+STORE, 3481243648, 3481247743,
+STORE, 3481436160, 3481444351,
+STORE, 3481444352, 3481456639,
+STORE, 3481456640, 3481460735,
+STORE, 3481460736, 3481464831,
+STORE, 3481587712, 3481645055,
+STORE, 3481645056, 3481772031,
+STORE, 3481772032, 3481776127,
+STORE, 3481776128, 3481780223,
+STORE, 3481874432, 3481935871,
+STORE, 3481935872, 3482030079,
+STORE, 3482030080, 3482038271,
+STORE, 3482038272, 3482042367,
+STORE, 3482198016, 3482230783,
+STORE, 3482230784, 3482271743,
+STORE, 3482271744, 3482279935,
+STORE, 3482279936, 3482284031,
+STORE, 3482562560, 3482566655,
+STORE, 3482566656, 3482570751,
+STORE, 3482570752, 3482574847,
+STORE, 3482636288, 3482689535,
+STORE, 3482689536, 3482746879,
+STORE, 3482746880, 3482755071,
+STORE, 3482755072, 3482759167,
+STORE, 3482972160, 3483062271,
+STORE, 3483062272, 3483242495,
+STORE, 3483242496, 3483246591,
+STORE, 3483246592, 3483250687,
+STORE, 3483398144, 3483688959,
+STORE, 3483688960, 3484114943,
+STORE, 3484114944, 3484131327,
+STORE, 3484131328, 3484135423,
+STORE, 3484135424, 3484143615,
+STORE, 3484184576, 3484475391,
+STORE, 3484475392, 3485028351,
+STORE, 3485028352, 3485057023,
+STORE, 3485057024, 3485061119,
+STORE, 3485360128, 3485364223,
+STORE, 3485364224, 3485368319,
+STORE, 3485368320, 3485372415,
+STORE, 3485589504, 3485593599,
+STORE, 3485593600, 3485597695,
+STORE, 3485597696, 3485601791,
+STORE, 3485913088, 3485937663,
+STORE, 3485937664, 3485974527,
+STORE, 3485974528, 3485982719,
+STORE, 3485982720, 3485986815,
+STORE, 3486052352, 3486056447,
+STORE, 3486056448, 3486064639,
+STORE, 3486064640, 3486068735,
+STORE, 3486068736, 3486072831,
+STORE, 3486294016, 3486302207,
+STORE, 3486302208, 3486306303,
+STORE, 3486306304, 3486310399,
+STORE, 3486310400, 3486314495,
+STORE, 3486670848, 3486679039,
+STORE, 3486679040, 3486683135,
+STORE, 3486683136, 3486687231,
+STORE, 3486687232, 3486691327,
+STORE, 3486863360, 3486871551,
+STORE, 3486871552, 3486875647,
+STORE, 3486875648, 3486879743,
+STORE, 3486879744, 3486883839,
+STORE, 3487584256, 3522543615,
+STORE, 3522543616, 3523321855,
+STORE, 3523321856, 3523342335,
+STORE, 3523342336, 3523387391,
+STORE, 3523387392, 3523391487,
+STORE, 3523391488, 3523395583,
+STORE, 3523477504, 3523686399,
+STORE, 3523686400, 3523981311,
+STORE, 3523981312, 3523997695,
+STORE, 3523997696, 3524001791,
+STORE, 3524177920, 3525013503,
+STORE, 3525013504, 3526582271,
+STORE, 3526582272, 3526606847,
+STORE, 3526606848, 3526610943,
+STORE, 3526610944, 3526615039,
+STORE, 3526672384, 3526746111,
+STORE, 3526746112, 3526860799,
+STORE, 3526860800, 3526868991,
+STORE, 3526868992, 3526873087,
+STORE, 3527000064, 3527475199,
+STORE, 3527475200, 3527479295,
+STORE, 3527479296, 3527573503,
+STORE, 3527573504, 3527581695,
+STORE, 3527581696, 3527585791,
+STORE, 3527585792, 3527606271,
+STORE, 3527909376, 3527913471,
+STORE, 3527913472, 3527917567,
+STORE, 3527917568, 3527921663,
+STORE, 3527950336, 3528011775,
+STORE, 3528011776, 3528093695,
+STORE, 3528093696, 3528101887,
+STORE, 3528101888, 3528105983,
+STORE, 3528228864, 3528241151,
+STORE, 3528241152, 3528261631,
+STORE, 3528261632, 3528265727,
+STORE, 3528273920, 3528593407,
+STORE, 3528593408, 3528609791,
+STORE, 3528609792, 3528638463,
+STORE, 3528638464, 3528642559,
+STORE, 3528642560, 3528646655,
+STORE, 3528880128, 3528912895,
+STORE, 3528912896, 3528962047,
+STORE, 3528962048, 3528966143,
+STORE, 3528966144, 3528970239,
+STORE, 3528982528, 3530293247,
+STORE, 3530366976, 3530825727,
+STORE, 3530825728, 3531317247,
+STORE, 3531317248, 3541041151,
+STORE, 3541041152, 3541303295,
+STORE, 3541430272, 3566206975,
+STORE, 3566206976, 3566993407,
+STORE, 3567239168, 3587571711,
+STORE, 3587571712, 3588284415,
+STORE, 3588284416, 3588661247,
+STORE, 3588661248, 3589066751,
+STORE, 3589066752, 3589574655,
+STORE, 3589574656, 3590078463,
+STORE, 3590078464, 3590373375,
+STORE, 3590373376, 3590668287,
+STORE, 3590668288, 3590963199,
+STORE, 3590963200, 3591294975,
+STORE, 3591294976, 3591602175,
+STORE, 3591602176, 3591933951,
+STORE, 3591933952, 3592241151,
+STORE, 3592241152, 3592572927,
+STORE, 3592572928, 3592876031,
+STORE, 3592876032, 3593211903,
+STORE, 3593211904, 3593547775,
+STORE, 3593547776, 3593650175,
+STORE, 3593650176, 3593928703,
+STORE, 3593928704, 3593936895,
+STORE, 3593936896, 3593940991,
+STORE, 3594006528, 3594301439,
+STORE, 3594301440, 3594739711,
+STORE, 3594739712, 3594756095,
+STORE, 3594756096, 3594760191,
+STORE, 3594760192, 3594768383,
+STORE, 3594952704, 3595051007,
+STORE, 3595051008, 3595223039,
+STORE, 3595223040, 3595227135,
+STORE, 3595227136, 3595235327,
+STORE, 3595431936, 3595775999,
+STORE, 3595776000, 3596701695,
+STORE, 3596701696, 3596742655,
+STORE, 3596742656, 3596746751,
+STORE, 3596746752, 3596750847,
+STORE, 3596767232, 3597070335,
+STORE, 3597070336, 3597402111,
+STORE, 3597402112, 3598188543,
+STORE, 3598262272, 3623428095,
+STORE, 3623428096, 3623432191,
+STORE, 3623432192, 3623436287,
+STORE, 3623436288, 3623440383,
+STORE, 3623616512, 3623878655,
+STORE, 3624169472, 3624300543,
+STORE, 3627524096, 3628523519,
+STORE, 3628523520, 3629522943,
+STORE, 3696631808, 3730186239,
+STORE, 3730186240, 3763740671,
+STORE, 3763740672, 3764027391,
+STORE, 3764027392, 3765133311,
+STORE, 3765133312, 3765145599,
+STORE, 3765145600, 3765149695,
+STORE, 3765178368, 3766022143,
+STORE, 3766022144, 3768791039,
+STORE, 3768791040, 3768840191,
+STORE, 3768840192, 3768844287,
+STORE, 3768897536, 3768913919,
+STORE, 3768913920, 3768934399,
+STORE, 3768934400, 3768938495,
+STORE, 3769016320, 3769147391,
+STORE, 3769147392, 3769233407,
+STORE, 3769233408, 3769356287,
+STORE, 3769356288, 3769360383,
+STORE, 3769360384, 3769368575,
+STORE, 3769376768, 3794542591,
+STORE, 3794542592, 3794599935,
+STORE, 3794599936, 3794731007,
+STORE, 3794731008, 3794735103,
+STORE, 3794735104, 3794743295,
+STORE, 3794849792, 3794980863,
+STORE, 3794980864, 3794984959,
+STORE, 3794984960, 3794989055,
+STORE, 3794989056, 3794993151,
+STORE, 3794993152, 3794997247,
+STORE, 3795103744, 3795128319,
+STORE, 3795128320, 3795165183,
+STORE, 3795165184, 3795169279,
+STORE, 3795169280, 3795173375,
+STORE, 3795210240, 3795357695,
+STORE, 3795357696, 3795365887,
+STORE, 3795365888, 3795374079,
+STORE, 3795374080, 3795378175,
+STORE, 3795378176, 3795382271,
+STORE, 3795406848, 3795738623,
+STORE, 3795738624, 3795742719,
+STORE, 3795742720, 3795755007,
+STORE, 3795755008, 3795759103,
+STORE, 3795763200, 3795894271,
+STORE, 3795894272, 3796041727,
+STORE, 3796041728, 3796054015,
+STORE, 3796054016, 3796066303,
+STORE, 3796066304, 3796070399,
+STORE, 3796176896, 3796205567,
+STORE, 3796205568, 3796250623,
+STORE, 3796250624, 3796254719,
+STORE, 3796254720, 3796258815,
+STORE, 3796262912, 3796393983,
+STORE, 3796393984, 3796516863,
+STORE, 3796516864, 3796873215,
+STORE, 3796873216, 3796885503,
+STORE, 3796885504, 3796889599,
+STORE, 3796963328, 3796967423,
+STORE, 3796967424, 3796975615,
+STORE, 3796975616, 3796979711,
+STORE, 3797000192, 3797307391,
+STORE, 3797307392, 3797311487,
+STORE, 3797311488, 3797315583,
+STORE, 3797315584, 3797323775,
+STORE, 3797327872, 3797450751,
+STORE, 3797450752, 3797458943,
+STORE, 3797458944, 3797471231,
+STORE, 3797471232, 3797475327,
+STORE, 3797577728, 3797700607,
+STORE, 3797700608, 3797721087,
+STORE, 3797721088, 3797733375,
+STORE, 3797733376, 3797741567,
+STORE, 3797741568, 3797864447,
+STORE, 3797864448, 3797995519,
+STORE, 3797995520, 3798048767,
+STORE, 3798048768, 3798179839,
+STORE, 3798179840, 3798188031,
+STORE, 3798188032, 3798192127,
+STORE, 3798290432, 3798302719,
+STORE, 3798302720, 3798323199,
+STORE, 3798323200, 3798327295,
+STORE, 3798327296, 3798331391,
+STORE, 3798429696, 3798433791,
+STORE, 3798433792, 3798552575,
+STORE, 3798552576, 3798556671,
+STORE, 3798556672, 3798568959,
+STORE, 3798568960, 3798573055,
+STORE, 3798573056, 3798581247,
+STORE, 3798618112, 3798749183,
+STORE, 3798749184, 3798855679,
+STORE, 3798855680, 3798966271,
+STORE, 3798966272, 3798982655,
+STORE, 3798982656, 3798986751,
+STORE, 3799101440, 3799171071,
+STORE, 3799171072, 3799240703,
+STORE, 3799240704, 3799248895,
+STORE, 3799248896, 3799252991,
+STORE, 3799326720, 3799650303,
+STORE, 3799650304, 3800629247,
+STORE, 3800629248, 3800641535,
+STORE, 3800641536, 3800645631,
+STORE, 3800645632, 3800649727,
+STORE, 3800649728, 3800903679,
+STORE, 3800903680, 3800936447,
+STORE, 3800936448, 3800969215,
+STORE, 3800969216, 3800981503,
+STORE, 3800981504, 3800985599,
+STORE, 3801001984, 3801133055,
+STORE, 3801133056, 3801202687,
+STORE, 3801202688, 3801591807,
+STORE, 3801591808, 3801599999,
+STORE, 3801600000, 3801604095,
+STORE, 3801604096, 3801608191,
+STORE, 3801608192, 3801739263,
+STORE, 3801739264, 3801755647,
+STORE, 3801755648, 3801796607,
+STORE, 3801796608, 3801804799,
+STORE, 3801804800, 3801808895,
+STORE, 3801878528, 3801944063,
+STORE, 3801944064, 3802116095,
+STORE, 3802116096, 3802124287,
+STORE, 3802124288, 3802128383,
+STORE, 3802136576, 3803447295,
+STORE, 3803492352, 3803553791,
+STORE, 3803553792, 3804233727,
+STORE, 3804233728, 3806068735,
+STORE, 3806121984, 3806253055,
+STORE, 3806253056, 3806674943,
+STORE, 3806674944, 3807117311,
+STORE, 3807117312, 3807379455,
+STORE, 3807379456, 3807432703,
+STORE, 3807432704, 3807563775,
+STORE, 3807563776, 3809202175,
+STORE, 3809202176, 3810250751,
+STORE, 3810250752, 3827027967,
+STORE, 3827027968, 3829125119,
+STORE, 3829125120, 3837513727,
+STORE, 3837513728, 3839610879,
+STORE, 3839610880, 3847999487,
+STORE, 3847999488, 3856392191,
+STORE, 3856392192, 3864784895,
+STORE, 3864784896, 3868983295,
+STORE, 3868983296, 3885760511,
+STORE, 3885760512, 3886809087,
+STORE, 3886809088, 3887857663,
+STORE, 3887857664, 3888119807,
+STORE, 3888144384, 3888148479,
+STORE, 3888148480, 3888218111,
+STORE, 3888218112, 3888222207,
+STORE, 3888222208, 3888353279,
+STORE, 3888353280, 3889172479,
+STORE, 3889172480, 3892314111,
+STORE, 3892314112, 3892576255,
+STORE, 3892588544, 3892637695,
+STORE, 3892637696, 3892686847,
+STORE, 3892686848, 3892744191,
+STORE, 3892748288, 3892785151,
+STORE, 3892785152, 3895459839,
+STORE, 3895459840, 3895721983,
+STORE, 3895738368, 3895885823,
+STORE, 3895885824, 3897081855,
+STORE, 3897081856, 3906482175,
+STORE, 3906482176, 3916144639,
+STORE, 3916144640, 3925766143,
+STORE, 3925766144, 3926974463,
+STORE, 3926974464, 3928367103,
+STORE, 3928367104, 3928911871,
+STORE, 3928911872, 3933995007,
+STORE, 3933995008, 3935830015,
+STORE, 3935830016, 3935846399,
+STORE, 3935879168, 3936010239,
+STORE, 3936010240, 3936026623,
+STORE, 3936026624, 3936034815,
+STORE, 3936034816, 3936051199,
+STORE, 3936051200, 3936055295,
+STORE, 3936071680, 3936137215,
+STORE, 3936137216, 3936202751,
+STORE, 3936202752, 3936219135,
+STORE, 3936235520, 3936251903,
+STORE, 3936268288, 3936276479,
+STORE, 3936276480, 3936284671,
+STORE, 3936284672, 3936288767,
+STORE, 3936288768, 3936292863,
+STORE, 3936296960, 3936354303,
+STORE, 3936354304, 3936616447,
+STORE, 3936628736, 3936669695,
+STORE, 3936669696, 3936747519,
+STORE, 3936747520, 3936870399,
+STORE, 3936870400, 3936874495,
+STORE, 3936874496, 3936878591,
+STORE, 3936882688, 3936903167,
+STORE, 3936911360, 3936948223,
+STORE, 3936948224, 3936964607,
+STORE, 3936964608, 3937103871,
+STORE, 3937103872, 3937107967,
+STORE, 3937132544, 3937161215,
+STORE, 3937189888, 3937255423,
+STORE, 3937255424, 3938512895,
+STORE, 3938512896, 3945435135,
+STORE, 3945435136, 3945476095,
+STORE, 3945476096, 3945484287,
+STORE, 3945484288, 3945496575,
+STORE, 3945500672, 3945541631,
+STORE, 3945558016, 3945566207,
+STORE, 3945566208, 3945594879,
+STORE, 3945594880, 3945598975,
+STORE, 3945598976, 3945603071,
+STORE, 3945611264, 3945742335,
+STORE, 3945742336, 3945844735,
+STORE, 3945844736, 3945848831,
+STORE, 3945848832, 3945861119,
+STORE, 3945861120, 3945865215,
+STORE, 3945869312, 3945897983,
+STORE, 3945897984, 3946303487,
+STORE, 3946303488, 3946397695,
+STORE, 3946397696, 3946569727,
+STORE, 3946569728, 3946573823,
+STORE, 3946573824, 3946594303,
+STORE, 3946594304, 3946663935,
+STORE, 3946663936, 3946708991,
+STORE, 3946708992, 3946823679,
+STORE, 3946823680, 3946827775,
+STORE, 3946827776, 3946831871,
+STORE, 3946831872, 3946860543,
+STORE, 3946893312, 3946897407,
+STORE, 3946897408, 3946905599,
+STORE, 3946905600, 3946909695,
+STORE, 3946909696, 3946913791,
+STORE, 3946913792, 3946930175,
+STORE, 3946930176, 3946967039,
+STORE, 3946967040, 3947102207,
+STORE, 3947102208, 3948412927,
+STORE, 3948441600, 3948556287,
+STORE, 3948556288, 3948576767,
+STORE, 3948576768, 3948597247,
+STORE, 3948597248, 3948605439,
+STORE, 3948605440, 3948609535,
+STORE, 3948609536, 3948654591,
+STORE, 3948654592, 3948781567,
+STORE, 3948781568, 3948822527,
+STORE, 3948822528, 3948904447,
+STORE, 3948904448, 3948908543,
+STORE, 3948908544, 3948912639,
+STORE, 3948945408, 3949043711,
+STORE, 3949043712, 3949174783,
+STORE, 3949174784, 3949191167,
+STORE, 3949191168, 3949195263,
+STORE, 3949207552, 3949252607,
+STORE, 3949252608, 3949256703,
+STORE, 3949256704, 3949363199,
+STORE, 3949363200, 3949367295,
+STORE, 3949367296, 3949379583,
+STORE, 3949379584, 3949383679,
+STORE, 3949383680, 3949400063,
+STORE, 3949400064, 3949404159,
+STORE, 3949416448, 3949481983,
+STORE, 3949481984, 3949486079,
+STORE, 3949486080, 3949592575,
+STORE, 3949592576, 3949596671,
+STORE, 3949596672, 3949621247,
+STORE, 3949621248, 3949662207,
+STORE, 3949662208, 3949666303,
+STORE, 3949694976, 3949727743,
+STORE, 3949727744, 3949731839,
+STORE, 3949731840, 3949838335,
+STORE, 3949838336, 3949842431,
+STORE, 3949842432, 3949846527,
+STORE, 3949846528, 3949854719,
+STORE, 3949854720, 3949858815,
+STORE, 3949858816, 3949862911,
+STORE, 3949867008, 3949891583,
+STORE, 3949891584, 3949928447,
+STORE, 3949928448, 3949993983,
+STORE, 3949993984, 3950043135,
+STORE, 3950043136, 3950059519,
+STORE, 3950059520, 3950096383,
+STORE, 3950096384, 3950100479,
+STORE, 3950100480, 3950104575,
+STORE, 3950104576, 3950157823,
+STORE, 3950157824, 3950292991,
+STORE, 3950292992, 3950346239,
+STORE, 3950346240, 3950477311,
+STORE, 3950477312, 3950485503,
+STORE, 3950485504, 3950489599,
+STORE, 3950493696, 3950510079,
+STORE, 3950510080, 3950661631,
+STORE, 3950661632, 3951005695,
+STORE, 3951005696, 3951026175,
+STORE, 3951026176, 3951030271,
+STORE, 3951030272, 3951054847,
+STORE, 3951054848, 3951116287,
+STORE, 3951116288, 3951144959,
+STORE, 3951144960, 3951149055,
+STORE, 3951149056, 3951194111,
+STORE, 3951194112, 3951202303,
+STORE, 3951202304, 3951206399,
+STORE, 3951210496, 3951226879,
+STORE, 3951226880, 3951329279,
+STORE, 3951329280, 3951366143,
+STORE, 3951366144, 3951411199,
+STORE, 3951411200, 3951415295,
+STORE, 3951415296, 3951419391,
+STORE, 3951419392, 3951452159,
+STORE, 3951452160, 3951566847,
+STORE, 3951566848, 3951812607,
+STORE, 3951812608, 3952173055,
+STORE, 3952173056, 3952214015,
+STORE, 3952214016, 3952218111,
+STORE, 3952222208, 3952250879,
+STORE, 3952250880, 3952369663,
+STORE, 3952369664, 3952488447,
+STORE, 3952488448, 3952627711,
+STORE, 3952627712, 3952635903,
+STORE, 3952635904, 3952639999,
+STORE, 3952652288, 3952668671,
+STORE, 3952668672, 3953000447,
+STORE, 3953000448, 3953004543,
+STORE, 3953004544, 3953008639,
+STORE, 3953008640, 3953012735,
+STORE, 3953012736, 3953037311,
+STORE, 3953037312, 3953151999,
+STORE, 3953152000, 3953291263,
+STORE, 3953291264, 3953324031,
+STORE, 3953324032, 3953364991,
+STORE, 3953364992, 3953373183,
+STORE, 3953373184, 3953377279,
+STORE, 3953381376, 3953410047,
+STORE, 3953410048, 3953491967,
+STORE, 3953491968, 3953643519,
+STORE, 3953643520, 3953651711,
+STORE, 3953651712, 3953655807,
+STORE, 3953659904, 3953766399,
+STORE, 3953766400, 3953774591,
+STORE, 3953774592, 3953786879,
+STORE, 3953786880, 3953790975,
+STORE, 3953790976, 3953823743,
+STORE, 3953823744, 3953963007,
+STORE, 3953963008, 3954024447,
+STORE, 3954024448, 3954118655,
+STORE, 3954118656, 3954122751,
+STORE, 3954122752, 3954126847,
+STORE, 3954130944, 3954184191,
+STORE, 3954184192, 3954294783,
+STORE, 3954294784, 3954323455,
+STORE, 3954323456, 3954393087,
+STORE, 3954393088, 3954397183,
+STORE, 3954397184, 3954401279,
+STORE, 3954401280, 3954405375,
+STORE, 3954409472, 3954528255,
+STORE, 3954528256, 3954737151,
+STORE, 3954737152, 3955052543,
+STORE, 3955052544, 3955060735,
+STORE, 3955060736, 3955064831,
+STORE, 3955068928, 3955105791,
+STORE, 3955105792, 3955167231,
+STORE, 3955167232, 3955277823,
+STORE, 3955277824, 3955310591,
+STORE, 3955310592, 3955351551,
+STORE, 3955351552, 3955359743,
+STORE, 3955359744, 3955363839,
+STORE, 3955363840, 3955392511,
+STORE, 3955392512, 3955453951,
+STORE, 3955453952, 3955601407,
+STORE, 3955601408, 3955777535,
+STORE, 3955777536, 3955982335,
+STORE, 3955982336, 3956011007,
+STORE, 3956011008, 3956015103,
+STORE, 3956023296, 3956039679,
+STORE, 3956039680, 3956125695,
+STORE, 3956125696, 3956129791,
+STORE, 3956129792, 3956133887,
+STORE, 3956133888, 3956137983,
+STORE, 3956142080, 3956449279,
+STORE, 3956449280, 3956543487,
+STORE, 3956543488, 3956719615,
+STORE, 3956719616, 3956731903,
+STORE, 3956731904, 3956735999,
+STORE, 3956744192, 3956793343,
+STORE, 3956793344, 3956887551,
+STORE, 3956887552, 3956953087,
+STORE, 3956953088, 3957035007,
+STORE, 3957035008, 3957039103,
+STORE, 3957039104, 3957047295,
+STORE, 3957047296, 3957071871,
+STORE, 3957071872, 3957231615,
+STORE, 3957231616, 3957563391,
+STORE, 3957563392, 3957579775,
+STORE, 3957579776, 3957583871,
+STORE, 3957592064, 3957608447,
+STORE, 3957608448, 3957878783,
+STORE, 3957878784, 3958591487,
+STORE, 3958591488, 3958599679,
+STORE, 3958599680, 3958607871,
+STORE, 3958607872, 3958620159,
+STORE, 3958620160, 3958624255,
+STORE, 3958624256, 3963199487,
+STORE, 3963199488, 3963285503,
+STORE, 3963285504, 3963371519,
+STORE, 3963371520, 3963428863,
+STORE, 3963428864, 3963555839,
+STORE, 3963555840, 3963559935,
+STORE, 3963559936, 3963564031,
+STORE, 3963568128, 3963596799,
+STORE, 3963596800, 3963682815,
+STORE, 3963682816, 3963695103,
+STORE, 3963695104, 3963711487,
+STORE, 3963711488, 3963715583,
+STORE, 3963719680, 3963752447,
+STORE, 3963752448, 3963846655,
+STORE, 3963846656, 3963932671,
+STORE, 3963932672, 3964444671,
+STORE, 3964444672, 3964448767,
+STORE, 3964448768, 3965808639,
+STORE, 3965808640, 3965845503,
+STORE, 3965845504, 3965849599,
+STORE, 3965853696, 3965935615,
+STORE, 3965935616, 3966017535,
+STORE, 3966017536, 3966103551,
+STORE, 3966103552, 3966685183,
+STORE, 3966685184, 3967705087,
+STORE, 3967705088, 3967758335,
+STORE, 3967758336, 3967762431,
+STORE, 3967762432, 3967770623,
+STORE, 3967770624, 3967799295,
+STORE, 3967799296, 3967848447,
+STORE, 3967848448, 3967868927,
+STORE, 3967868928, 3967901695,
+STORE, 3967901696, 3967905791,
+STORE, 3967905792, 3967909887,
+STORE, 3967909888, 3967995903,
+STORE, 3967995904, 3968077823,
+STORE, 3968077824, 3968159743,
+STORE, 3968159744, 3968167935,
+STORE, 3968167936, 3968172031,
+STORE, 3968172032, 3968192511,
+STORE, 3968192512, 3968196607,
+STORE, 3968196608, 3968200703,
+STORE, 3968208896, 3968516095,
+STORE, 3968516096, 3968528383,
+STORE, 3968528384, 3968552959,
+STORE, 3968552960, 3968557055,
+STORE, 3968561152, 3968593919,
+STORE, 3968593920, 3968626687,
+STORE, 3968626688, 3971153919,
+STORE, 3971153920, 3973754879,
+STORE, 3973754880, 3973804031,
+STORE, 3973804032, 3973820415,
+STORE, 3973820416, 3973832703,
+STORE, 3973840896, 3973873663,
+STORE, 3973873664, 3973967871,
+STORE, 3973967872, 3973976063,
+STORE, 3973976064, 3973984255,
+STORE, 3973984256, 3973988351,
+STORE, 3973988352, 3973992447,
+STORE, 3973996544, 3974008831,
+STORE, 3974008832, 3974045695,
+STORE, 3974045696, 3974139903,
+STORE, 3974139904, 3974254591,
+STORE, 3974254592, 3974275071,
+STORE, 3974275072, 3974291455,
+STORE, 3974291456, 3974295551,
+STORE, 3974295552, 3974373375,
+STORE, 3974373376, 3974524927,
+STORE, 3974524928, 3974529023,
+STORE, 3974529024, 3974537215,
+STORE, 3974537216, 3974541311,
+STORE, 3974541312, 3974545407,
+STORE, 3974545408, 3974627327,
+STORE, 3974627328, 3974680575,
+STORE, 3974680576, 3974811647,
+STORE, 3974811648, 3974819839,
+STORE, 3974819840, 3974823935,
+STORE, 3974832128, 3974918143,
+STORE, 3974918144, 3974963199,
+STORE, 3974963200, 3975077887,
+STORE, 3975077888, 3975090175,
+STORE, 3975090176, 3975094271,
+STORE, 3975094272, 3975102463,
+STORE, 3975102464, 3975114751,
+STORE, 3975114752, 3975266303,
+STORE, 3975266304, 3975274495,
+STORE, 3975274496, 3975286783,
+STORE, 3975286784, 3975290879,
+STORE, 3975290880, 3975299071,
+STORE, 3975299072, 3975315455,
+STORE, 3975315456, 3975430143,
+STORE, 3975430144, 3975536639,
+STORE, 3975536640, 3975651327,
+STORE, 3975651328, 3975655423,
+STORE, 3975655424, 3975659519,
+STORE, 3975659520, 3975770111,
+STORE, 3975770112, 3975778303,
+STORE, 3975778304, 3975790591,
+STORE, 3975790592, 3975794687,
+STORE, 3975794688, 3975798783,
+STORE, 3975798784, 3975831551,
+STORE, 3975831552, 3975872511,
+STORE, 3975872512, 3975987199,
+STORE, 3975987200, 3976134655,
+STORE, 3976134656, 3977175039,
+STORE, 3977175040, 3977183231,
+STORE, 3977183232, 3977191423,
+STORE, 3977191424, 3977195519,
+STORE, 3977199616, 3977248767,
+STORE, 3977248768, 3977539583,
+STORE, 3977539584, 3977965567,
+STORE, 3977965568, 3977981951,
+STORE, 3977981952, 3977986047,
+STORE, 3977986048, 3977994239,
+STORE, 3977994240, 3978002431,
+STORE, 3978002432, 3978084351,
+STORE, 3978084352, 3978125311,
+STORE, 3978125312, 3978174463,
+STORE, 3978174464, 3978178559,
+STORE, 3978178560, 3978182655,
+STORE, 3978182656, 3978207231,
+STORE, 3978207232, 3978297343,
+STORE, 3978297344, 3978301439,
+STORE, 3978301440, 3978305535,
+STORE, 3978305536, 3978309631,
+STORE, 3978309632, 3978317823,
+STORE, 3978317824, 3978625023,
+STORE, 3978625024, 3978657791,
+STORE, 3978657792, 3978727423,
+STORE, 3978727424, 3978735615,
+STORE, 3978735616, 3978739711,
+STORE, 3978739712, 3978760191,
+STORE, 3978760192, 3978842111,
+STORE, 3978842112, 3978850303,
+STORE, 3978850304, 3978858495,
+STORE, 3978858496, 3978862591,
+STORE, 3978862592, 3978895359,
+STORE, 3978895360, 3979014143,
+STORE, 3979014144, 3979132927,
+STORE, 3979132928, 3979288575,
+STORE, 3979288576, 3979481087,
+STORE, 3979481088, 3979489279,
+STORE, 3979489280, 3979493375,
+STORE, 3979497472, 3979583487,
+STORE, 3979583488, 3979673599,
+STORE, 3979673600, 3979718655,
+STORE, 3979718656, 3979829247,
+STORE, 3979829248, 3979841535,
+STORE, 3979841536, 3979882495,
+STORE, 3979882496, 3979964415,
+STORE, 3979964416, 3980013567,
+STORE, 3980013568, 3980148735,
+STORE, 3980148736, 3980152831,
+STORE, 3980152832, 3980320767,
+STORE, 3980320768, 3980337151,
+STORE, 3980337152, 3980341247,
+STORE, 3980345344, 3980365823,
+STORE, 3980365824, 3980423167,
+STORE, 3980423168, 3980460031,
+STORE, 3980460032, 3980500991,
+STORE, 3980500992, 3980509183,
+STORE, 3980509184, 3980513279,
+STORE, 3980513280, 3980546047,
+STORE, 3980546048, 3980660735,
+STORE, 3980660736, 3980951551,
+STORE, 3980951552, 3981500415,
+STORE, 3981500416, 3981529087,
+STORE, 3981529088, 3981533183,
+STORE, 3981537280, 3981549567,
+STORE, 3981549568, 3981598719,
+STORE, 3981598720, 3981717503,
+STORE, 3981717504, 3982127103,
+STORE, 3982127104, 3982675967,
+STORE, 3982675968, 3982733311,
+STORE, 3982733312, 3982737407,
+STORE, 3982741504, 3982860287,
+STORE, 3982860288, 3982905343,
+STORE, 3982905344, 3982966783,
+STORE, 3982966784, 3982974975,
+STORE, 3982974976, 3982979071,
+STORE, 3982979072, 3983032319,
+STORE, 3983032320, 3983085567,
+STORE, 3983085568, 3983208447,
+STORE, 3983208448, 3983212543,
+STORE, 3983212544, 3983220735,
+STORE, 3983220736, 3983224831,
+STORE, 3983224832, 3983237119,
+STORE, 3983237120, 3983351807,
+STORE, 3983351808, 3983376383,
+STORE, 3983376384, 3983392767,
+STORE, 3983392768, 3983396863,
+STORE, 3983396864, 3983400959,
+STORE, 3983400960, 3983417343,
+STORE, 3983417344, 3983753215,
+STORE, 3983753216, 3983757311,
+STORE, 3983757312, 3983761407,
+STORE, 3983761408, 3983765503,
+STORE, 3983765504, 3983769599,
+STORE, 3983769600, 3983880191,
+STORE, 3983880192, 3983892479,
+STORE, 3983892480, 3983900671,
+STORE, 3983900672, 3983904767,
+STORE, 3983904768, 3983908863,
+STORE, 3983908864, 3983941631,
+STORE, 3983941632, 3983990783,
+STORE, 3983990784, 3984097279,
+STORE, 3984097280, 3984105471,
+STORE, 3984105472, 3984117759,
+STORE, 3984117760, 3984121855,
+STORE, 3984121856, 3984125951,
+STORE, 3984125952, 3984134143,
+STORE, 3984134144, 3984150527,
+STORE, 3984150528, 3984416767,
+STORE, 3984416768, 3984470015,
+STORE, 3984470016, 3984564223,
+STORE, 3984564224, 3984568319,
+STORE, 3984572416, 3984629759,
+STORE, 3984629760, 3984805887,
+STORE, 3984805888, 3985096703,
+STORE, 3985096704, 3985104895,
+STORE, 3985104896, 3985108991,
+STORE, 3985113088, 3986862079,
+STORE, 3986862080, 3993640959,
+STORE, 3993640960, 3993739263,
+STORE, 3993739264, 3993743359,
+STORE, 3993743360, 3993759743,
+STORE, 3993759744, 3993780223,
+STORE, 3993780224, 3993784319,
+STORE, 3993784320, 3993792511,
+STORE, 3993792512, 3993796607,
+STORE, 3993796608, 3993800703,
+STORE, 3993804800, 3994214399,
+STORE, 3994214400, 3994218495,
+STORE, 3994218496, 3994222591,
+STORE, 3994222592, 3994226687,
+STORE, 3994230784, 3994243071,
+STORE, 3994243072, 3994255359,
+STORE, 3994255360, 3994304511,
+STORE, 3994304512, 3994386431,
+STORE, 3994386432, 3994509311,
+STORE, 3994509312, 3994521599,
+STORE, 3994521600, 3994525695,
+STORE, 3994529792, 3994542079,
+STORE, 3994542080, 3994660863,
+STORE, 3994660864, 3994705919,
+STORE, 3994705920, 3994796031,
+STORE, 3994796032, 3994800127,
+STORE, 3994800128, 3994804223,
+STORE, 3994804224, 3994812415,
+STORE, 3994812416, 3994845183,
+STORE, 3994845184, 3994898431,
+STORE, 3994898432, 3994902527,
+STORE, 3994902528, 3994906623,
+STORE, 3994910720, 3994931199,
+STORE, 3994931200, 3995181055,
+STORE, 3995181056, 3995222015,
+STORE, 3995222016, 3995275263,
+STORE, 3995275264, 3995279359,
+STORE, 3995279360, 3995283455,
+STORE, 3995283456, 3995291647,
+STORE, 3995291648, 3995324415,
+STORE, 3995324416, 3995451391,
+STORE, 3995451392, 3995697151,
+STORE, 3995697152, 3996078079,
+STORE, 3996078080, 3996086271,
+STORE, 3996086272, 3996090367,
+STORE, 3996094464, 3996119039,
+STORE, 3996119040, 3996200959,
+STORE, 3996200960, 3996229631,
+STORE, 3996229632, 3996233727,
+STORE, 3996233728, 3996282879,
+STORE, 3996282880, 3996291071,
+STORE, 3996291072, 3996295167,
+STORE, 3996299264, 3996311551,
+STORE, 3996311552, 3996430335,
+STORE, 3996430336, 3996467199,
+STORE, 3996467200, 3996504063,
+STORE, 3996504064, 3996512255,
+STORE, 3996512256, 3996516351,
+STORE, 3996516352, 3996540927,
+STORE, 3996540928, 3996671999,
+STORE, 3996672000, 3996676095,
+STORE, 3996676096, 3996684287,
+STORE, 3996684288, 3996688383,
+STORE, 3996688384, 3996692479,
+STORE, 3996692480, 3996717055,
+STORE, 3996717056, 3997048831,
+STORE, 3997048832, 3997057023,
+STORE, 3997057024, 3997073407,
+STORE, 3997073408, 3997077503,
+STORE, 3997077504, 3997081599,
+STORE, 3997081600, 3997097983,
+STORE, 3997097984, 3997179903,
+STORE, 3997179904, 3997356031,
+STORE, 3997356032, 3997650943,
+STORE, 3997650944, 3997675519,
+STORE, 3997675520, 3997679615,
+STORE, 3997683712, 3997700095,
+STORE, 3997700096, 3997745151,
+STORE, 3997745152, 3997802495,
+STORE, 3997802496, 3997810687,
+STORE, 3997810688, 3997814783,
+STORE, 3997814784, 3998064639,
+STORE, 3998064640, 3998081023,
+STORE, 3998081024, 3998085119,
+STORE, 3998085120, 3998130175,
+STORE, 3998130176, 3998134271,
+STORE, 3998134272, 3998142463,
+STORE, 3998142464, 3998179327,
+STORE, 3998179328, 3998212095,
+STORE, 3998212096, 3998326783,
+STORE, 3998326784, 3998351359,
+STORE, 3998351360, 3998392319,
+STORE, 3998392320, 3998396415,
+STORE, 3998396416, 3998400511,
+STORE, 3998400512, 3998433279,
+STORE, 3998433280, 3998466047,
+STORE, 3998466048, 3998613503,
+STORE, 3998613504, 3998666751,
+STORE, 3998666752, 3998724095,
+STORE, 3998724096, 3998732287,
+STORE, 3998732288, 3998736383,
+STORE, 3998736384, 3998760959,
+STORE, 3998760960, 3998777343,
+STORE, 3998777344, 3998822399,
+STORE, 3998822400, 3998826495,
+STORE, 3998826496, 3998830591,
+STORE, 3998830592, 3998863359,
+STORE, 3998863360, 3998900223,
+STORE, 3998900224, 3999043583,
+STORE, 3999043584, 3999121407,
+STORE, 3999121408, 3999215615,
+STORE, 3999215616, 3999223807,
+STORE, 3999223808, 3999227903,
+STORE, 3999227904, 3999236095,
+STORE, 3999236096, 3999268863,
+STORE, 3999268864, 3999301631,
+STORE, 3999301632, 3999354879,
+STORE, 3999354880, 3999428607,
+STORE, 3999428608, 3999436799,
+STORE, 3999436800, 3999440895,
+STORE, 3999444992, 3999461375,
+STORE, 3999461376, 3999584255,
+STORE, 3999584256, 3999760383,
+STORE, 3999760384, 4000219135,
+STORE, 4000219136, 4000235519,
+STORE, 4000235520, 4000251903,
+STORE, 4000251904, 4000501759,
+STORE, 4000501760, 4000505855,
+STORE, 4000505856, 4000509951,
+STORE, 4000509952, 4000518143,
+STORE, 4000518144, 4000522239,
+STORE, 4000522240, 4000587775,
+STORE, 4000587776, 4000645119,
+STORE, 4000645120, 4000813055,
+STORE, 4000813056, 4000817151,
+STORE, 4000821248, 4000837631,
+STORE, 4000837632, 4000870399,
+STORE, 4000870400, 4000874495,
+STORE, 4000874496, 4000878591,
+STORE, 4000878592, 4000882687,
+STORE, 4000882688, 4000886783,
+STORE, 4000886784, 4000890879,
+STORE, 4000890880, 4000907263,
+STORE, 4000907264, 4001214463,
+STORE, 4001214464, 4001558527,
+STORE, 4001558528, 4002484223,
+STORE, 4002484224, 4002525183,
+STORE, 4002525184, 4002529279,
+STORE, 4002529280, 4002533375,
+STORE, 4002533376, 4002537471,
+STORE, 4002537472, 4002660351,
+STORE, 4002660352, 4002779135,
+STORE, 4002779136, 4002791423,
+STORE, 4002791424, 4002799615,
+STORE, 4002799616, 4002807807,
+STORE, 4002807808, 4002811903,
+STORE, 4002811904, 4002828287,
+STORE, 4002828288, 4002910207,
+STORE, 4002910208, 4003028991,
+STORE, 4003028992, 4003037183,
+STORE, 4003037184, 4003045375,
+STORE, 4003045376, 4003049471,
+STORE, 4003049472, 4003053567,
+STORE, 4003053568, 4003057663,
+STORE, 4003057664, 4003065855,
+STORE, 4003065856, 4003135487,
+STORE, 4003135488, 4003446783,
+STORE, 4003446784, 4003450879,
+STORE, 4003450880, 4003454975,
+STORE, 4003454976, 4003459071,
+STORE, 4003459072, 4003463167,
+STORE, 4003463168, 4003495935,
+STORE, 4003495936, 4003569663,
+STORE, 4003569664, 4003573759,
+STORE, 4003573760, 4003704831,
+STORE, 4003704832, 4003708927,
+STORE, 4003708928, 4003713023,
+STORE, 4003713024, 4003737599,
+STORE, 4003737600, 4003770367,
+STORE, 4003770368, 4003876863,
+STORE, 4003876864, 4003880959,
+STORE, 4003880960, 4003885055,
+STORE, 4003885056, 4003889151,
+STORE, 4003889152, 4003893247,
+STORE, 4003893248, 4003897343,
+STORE, 4003897344, 4003962879,
+STORE, 4003962880, 4004069375,
+STORE, 4004069376, 4004093951,
+STORE, 4004093952, 4004118527,
+STORE, 4004118528, 4004122623,
+STORE, 4004122624, 4004126719,
+STORE, 4004126720, 4004155391,
+STORE, 4004155392, 4004286463,
+STORE, 4004286464, 4004384767,
+STORE, 4004384768, 4004388863,
+STORE, 4004388864, 4004646911,
+STORE, 4004646912, 4004655103,
+STORE, 4004655104, 4004659199,
+STORE, 4004659200, 4004667391,
+STORE, 4004667392, 4004683775,
+STORE, 4004683776, 4004814847,
+STORE, 4004814848, 4004818943,
+STORE, 4004818944, 4004823039,
+STORE, 4004823040, 4004827135,
+STORE, 4004827136, 4004835327,
+STORE, 4004835328, 4004954111,
+STORE, 4004954112, 4005085183,
+STORE, 4005085184, 4005306367,
+STORE, 4005306368, 4005765119,
+STORE, 4005765120, 4005789695,
+STORE, 4005789696, 4005793791,
+STORE, 4005793792, 4005801983,
+STORE, 4005801984, 4005920767,
+STORE, 4005920768, 4005945343,
+STORE, 4005945344, 4005949439,
+STORE, 4005949440, 4005986303,
+STORE, 4005986304, 4005990399,
+STORE, 4005990400, 4005994495,
+STORE, 4005994496, 4006002687,
+STORE, 4006002688, 4006109183,
+STORE, 4006109184, 4006117375,
+STORE, 4006117376, 4006121471,
+STORE, 4006121472, 4006133759,
+STORE, 4006133760, 4006137855,
+STORE, 4006137856, 4006141951,
+STORE, 4006141952, 4006150143,
+STORE, 4006150144, 4006391807,
+STORE, 4006391808, 4006445055,
+STORE, 4006445056, 4006563839,
+STORE, 4006563840, 4006572031,
+STORE, 4006572032, 4006576127,
+STORE, 4006576128, 4006584319,
+STORE, 4006584320, 4006694911,
+STORE, 4006694912, 4006739967,
+STORE, 4006739968, 4006776831,
+STORE, 4006776832, 4006785023,
+STORE, 4006785024, 4006789119,
+STORE, 4006789120, 4006797311,
+STORE, 4006797312, 4006813695,
+STORE, 4006813696, 4006846463,
+STORE, 4006846464, 4006977535,
+STORE, 4006977536, 4007006207,
+STORE, 4007006208, 4007010303,
+STORE, 4007010304, 4007067647,
+STORE, 4007067648, 4007075839,
+STORE, 4007075840, 4007084031,
+STORE, 4007084032, 4007100415,
+STORE, 4007100416, 4007116799,
+STORE, 4007116800, 4007133183,
+STORE, 4007133184, 4007153663,
+STORE, 4007153664, 4007178239,
+STORE, 4007178240, 4007202815,
+STORE, 4007202816, 4007206911,
+STORE, 4007206912, 4007272447,
+STORE, 4007272448, 4007276543,
+STORE, 4007276544, 4007280639,
+STORE, 4007280640, 4007284735,
+STORE, 4007284736, 4007292927,
+STORE, 4007292928, 4007423999,
+STORE, 4007424000, 4007448575,
+STORE, 4007448576, 4007452671,
+STORE, 4007452672, 4007505919,
+STORE, 4007505920, 4007510015,
+STORE, 4007510016, 4007514111,
+STORE, 4007514112, 4007645183,
+STORE, 4007645184, 4007776255,
+STORE, 4007776256, 4007780351,
+STORE, 4007780352, 4007784447,
+STORE, 4007784448, 4007788543,
+STORE, 4007788544, 4007809023,
+STORE, 4007809024, 4007829503,
+STORE, 4007829504, 4007960575,
+STORE, 4007960576, 4008091647,
+STORE, 4008091648, 4008296447,
+STORE, 4008296448, 4008890367,
+STORE, 4008890368, 4008898559,
+STORE, 4008898560, 4008902655,
+STORE, 4008902656, 4008996863,
+STORE, 4008996864, 4009041919,
+STORE, 4009041920, 4009082879,
+STORE, 4009082880, 4009091071,
+STORE, 4009091072, 4009107455,
+STORE, 4009107456, 4009349119,
+STORE, 4009349120, 4009373695,
+STORE, 4009373696, 4009414655,
+STORE, 4009414656, 4009422847,
+STORE, 4009422848, 4009426943,
+STORE, 4009426944, 4009447423,
+STORE, 4009447424, 4009471999,
+STORE, 4009472000, 4009512959,
+STORE, 4009512960, 4009594879,
+STORE, 4009594880, 4009598975,
+STORE, 4009598976, 4009697279,
+STORE, 4009697280, 4009713663,
+STORE, 4009713664, 4009717759,
+STORE, 4009717760, 4009721855,
+STORE, 4009721856, 4009730047,
+STORE, 4009730048, 4009861119,
+STORE, 4009861120, 4009951231,
+STORE, 4009951232, 4010131455,
+STORE, 4010131456, 4010135551,
+STORE, 4010135552, 4010139647,
+STORE, 4010139648, 4010143743,
+STORE, 4010143744, 4010164223,
+STORE, 4010164224, 4010295295,
+STORE, 4010295296, 4010299391,
+STORE, 4010299392, 4010491903,
+STORE, 4010491904, 4010495999,
+STORE, 4010496000, 4010668031,
+STORE, 4010668032, 4011028479,
+STORE, 4011028480, 4011053055,
+STORE, 4011053056, 4011057151,
+STORE, 4011057152, 4011118591,
+STORE, 4011118592, 4011126783,
+STORE, 4011126784, 4011130879,
+STORE, 4011130880, 4011143167,
+STORE, 4011143168, 4011147263,
+STORE, 4011147264, 4011167743,
+STORE, 4011167744, 4011171839,
+STORE, 4011171840, 4011360255,
+STORE, 4011360256, 4011364351,
+STORE, 4011364352, 4011626495,
+STORE, 4011626496, 4012216319,
+STORE, 4012216320, 4012228607,
+STORE, 4012228608, 4012232703,
+STORE, 4012232704, 4012236799,
+STORE, 4012236800, 4012240895,
+STORE, 4012240896, 4012261375,
+STORE, 4012261376, 4012392447,
+STORE, 4012392448, 4012466175,
+STORE, 4012466176, 4012597247,
+STORE, 4012597248, 4012601343,
+STORE, 4012601344, 4012605439,
+STORE, 4012605440, 4012609535,
+STORE, 4012609536, 4012679167,
+STORE, 4012679168, 4013563903,
+STORE, 4013563904, 4015366143,
+STORE, 4015366144, 4015411199,
+STORE, 4015411200, 4015415295,
+STORE, 4015415296, 4015419391,
+STORE, 4015419392, 4015542271,
+STORE, 4015542272, 4015550463,
+STORE, 4015550464, 4015558655,
+STORE, 4015558656, 4015562751,
+STORE, 4015562752, 4015583231,
+STORE, 4015583232, 4015587327,
+STORE, 4015587328, 4015603711,
+STORE, 4015665152, 4015669247,
+STORE, 4015669248, 4015812607,
+STORE, 4015812608, 4015816703,
+STORE, 4015816704, 4016111615,
+STORE, 4016111616, 4016467967,
+STORE, 4016467968, 4016508927,
+STORE, 4016508928, 4016517119,
+STORE, 4016517120, 4016525311,
+STORE, 4016525312, 4016586751,
+STORE, 4016586752, 4016664575,
+STORE, 4016664576, 4016697343,
+STORE, 4016697344, 4016742399,
+STORE, 4016742400, 4016746495,
+STORE, 4016746496, 4016750591,
+STORE, 4016750592, 4016758783,
+STORE, 4016799744, 4016844799,
+STORE, 4016844800, 4016902143,
+STORE, 4016902144, 4016992255,
+STORE, 4016992256, 4017000447,
+STORE, 4017000448, 4017004543,
+STORE, 4017004544, 4017008639,
+STORE, 4017008640, 4017016831,
+STORE, 4017016832, 4017020927,
+STORE, 4017020928, 4017127423,
+STORE, 4017127424, 4017131519,
+STORE, 4017131520, 4017229823,
+STORE, 4017229824, 4017422335,
+STORE, 4017422336, 4017438719,
+STORE, 4017438720, 4017442815,
+STORE, 4017442816, 4017446911,
+STORE, 4017446912, 4017455103,
+STORE, 4017455104, 4017766399,
+STORE, 4017766400, 4017909759,
+STORE, 4017909760, 4018081791,
+STORE, 4018081792, 4018089983,
+STORE, 4018089984, 4018094079,
+STORE, 4018094080, 4018098175,
+STORE, 4018098176, 4018327551,
+STORE, 4018327552, 4018331647,
+STORE, 4018331648, 4018339839,
+STORE, 4018339840, 4018348031,
+STORE, 4018348032, 4018610175,
+STORE, 4018610176, 4018626559,
+STORE, 4018626560, 4018647039,
+STORE, 4018647040, 4018651135,
+STORE, 4018651136, 4018749439,
+STORE, 4018749440, 4018761727,
+STORE, 4018761728, 4018802687,
+STORE, 4018802688, 4018806783,
+STORE, 4018806784, 4018810879,
+STORE, 4018810880, 4018814975,
+STORE, 4018814976, 4018823167,
+STORE, 4018823168, 4018954239,
+STORE, 4018954240, 4019007487,
+STORE, 4019007488, 4019068927,
+STORE, 4019068928, 4019077119,
+STORE, 4019077120, 4019081215,
+STORE, 4019081216, 4019093503,
+STORE, 4019093504, 4019208191,
+STORE, 4019208192, 4019232767,
+STORE, 4019232768, 4019265535,
+STORE, 4019265536, 4019269631,
+STORE, 4019269632, 4019277823,
+STORE, 4019277824, 4019458047,
+STORE, 4019458048, 4019519487,
+STORE, 4019519488, 4019613695,
+STORE, 4019613696, 4019621887,
+STORE, 4019621888, 4019625983,
+STORE, 4019625984, 4019630079,
+STORE, 4019630080, 4019744767,
+STORE, 4019744768, 4019822591,
+STORE, 4019822592, 4019929087,
+STORE, 4019929088, 4019941375,
+STORE, 4019941376, 4019945471,
+STORE, 4019945472, 4019961855,
+STORE, 4019961856, 4019994623,
+STORE, 4019994624, 4019998719,
+STORE, 4019998720, 4020002815,
+STORE, 4020002816, 4020006911,
+STORE, 4020006912, 4020011007,
+STORE, 4020011008, 4020256767,
+STORE, 4020256768, 4020326399,
+STORE, 4020326400, 4020457471,
+STORE, 4020457472, 4020469759,
+STORE, 4020469760, 4020473855,
+STORE, 4020473856, 4020482047,
+STORE, 4020482048, 4020711423,
+STORE, 4020711424, 4020715519,
+STORE, 4020715520, 4020719615,
+STORE, 4020719616, 4020723711,
+STORE, 4020723712, 4020805631,
+STORE, 4020805632, 4021051391,
+STORE, 4021051392, 4021460991,
+STORE, 4021460992, 4021469183,
+STORE, 4021469184, 4021473279,
+STORE, 4021473280, 4021571583,
+STORE, 4021571584, 4021633023,
+STORE, 4021633024, 4021727231,
+STORE, 4021727232, 4021735423,
+STORE, 4021735424, 4021739519,
+STORE, 4021739520, 4021747711,
+STORE, 4021747712, 4021829631,
+STORE, 4021829632, 4021866495,
+STORE, 4021866496, 4021919743,
+STORE, 4021919744, 4021927935,
+STORE, 4021927936, 4021932031,
+STORE, 4021932032, 4021944319,
+STORE, 4021944320, 4022157311,
+STORE, 4022157312, 4022161407,
+STORE, 4022161408, 4022173695,
+STORE, 4022173696, 4022177791,
+STORE, 4022177792, 4022472703,
+STORE, 4022472704, 4022509567,
+STORE, 4022509568, 4022583295,
+STORE, 4022583296, 4022587391,
+STORE, 4022587392, 4022591487,
+STORE, 4022591488, 4022607871,
+STORE, 4022607872, 4022657023,
+STORE, 4022657024, 4022722559,
+STORE, 4022722560, 4022730751,
+STORE, 4022730752, 4022734847,
+STORE, 4022734848, 4022865919,
+STORE, 4022865920, 4022943743,
+STORE, 4022943744, 4023062527,
+STORE, 4023062528, 4023074815,
+STORE, 4023074816, 4023078911,
+STORE, 4023078912, 4023128063,
+STORE, 4023128064, 4023218175,
+STORE, 4023218176, 4023361535,
+STORE, 4023361536, 4023373823,
+STORE, 4023373824, 4023377919,
+STORE, 4023377920, 4023558143,
+STORE, 4023558144, 4023631871,
+STORE, 4023631872, 4023816191,
+STORE, 4023816192, 4023820287,
+STORE, 4023820288, 4023824383,
+STORE, 4023824384, 4023832575,
+STORE, 4023832576, 4024078335,
+STORE, 4024078336, 4024197119,
+STORE, 4024197120, 4024389631,
+STORE, 4024389632, 4024406015,
+STORE, 4024406016, 4024410111,
+STORE, 4024410112, 4024422399,
+STORE, 4024422400, 4024619007,
+STORE, 4024619008, 4024639487,
+STORE, 4024639488, 4024655871,
+STORE, 4024655872, 4024664063,
+STORE, 4024664064, 4024668159,
+STORE, 4024668160, 4024676351,
+STORE, 4024676352, 4024905727,
+STORE, 4024905728, 4024909823,
+STORE, 4024909824, 4024918015,
+STORE, 4024918016, 4024922111,
+STORE, 4024922112, 4024930303,
+STORE, 4024930304, 4025110527,
+STORE, 4025110528, 4025176063,
+STORE, 4025176064, 4025208831,
+STORE, 4025208832, 4025212927,
+STORE, 4025212928, 4025217023,
+STORE, 4025217024, 4025348095,
+STORE, 4025348096, 4025372671,
+STORE, 4025372672, 4025458687,
+STORE, 4025458688, 4025466879,
+STORE, 4025466880, 4025565183,
+STORE, 4025565184, 4025757695,
+STORE, 4025757696, 4026249215,
+STORE, 4026249216, 4026261503,
+STORE, 4026261504, 4026265599,
+STORE, 4026265600, 4026269695,
+STORE, 4026269696, 4026302463,
+STORE, 4026302464, 4026306559,
+STORE, 4026306560, 4026314751,
+STORE, 4026314752, 4026318847,
+STORE, 4026318848, 4026322943,
+STORE, 4026322944, 4026327039,
+STORE, 4026327040, 4026654719,
+STORE, 4026654720, 4026671103,
+STORE, 4026671104, 4026720255,
+STORE, 4026720256, 4026724351,
+STORE, 4026724352, 4026728447,
+STORE, 4026728448, 4026732543,
+STORE, 4026732544, 4026863615,
+STORE, 4026863616, 4027027455,
+STORE, 4027027456, 4027031551,
+STORE, 4027031552, 4027514879,
+STORE, 4027514880, 4027531263,
+STORE, 4027531264, 4027535359,
+STORE, 4027535360, 4027539455,
+STORE, 4027539456, 4027785215,
+STORE, 4027785216, 4027789311,
+STORE, 4027789312, 4027793407,
+STORE, 4027793408, 4027797503,
+STORE, 4027797504, 4027863039,
+STORE, 4027863040, 4027899903,
+STORE, 4027899904, 4027949055,
+STORE, 4027949056, 4027957247,
+STORE, 4027957248, 4027961343,
+STORE, 4027961344, 4027965439,
+STORE, 4027965440, 4028194815,
+STORE, 4028194816, 4028252159,
+STORE, 4028252160, 4028338175,
+STORE, 4028338176, 4028350463,
+STORE, 4028350464, 4028354559,
+STORE, 4028354560, 4028452863,
+STORE, 4028452864, 4028489727,
+STORE, 4028489728, 4028530687,
+STORE, 4028530688, 4028538879,
+STORE, 4028538880, 4028542975,
+STORE, 4028542976, 4028551167,
+STORE, 4028551168, 4028665855,
+STORE, 4028665856, 4029349887,
+STORE, 4029349888, 4030468095,
+STORE, 4030468096, 4030513151,
+STORE, 4030513152, 4030517247,
+STORE, 4030517248, 4030525439,
+STORE, 4030525440, 4030529535,
+STORE, 4030529536, 4030758911,
+STORE, 4030758912, 4030828543,
+STORE, 4030828544, 4030943231,
+STORE, 4030943232, 4030951423,
+STORE, 4030951424, 4030955519,
+STORE, 4030955520, 4030967807,
+STORE, 4030967808, 4031131647,
+STORE, 4031131648, 4031135743,
+STORE, 4031135744, 4031139839,
+STORE, 4031139840, 4031148031,
+STORE, 4031148032, 4031152127,
+STORE, 4031152128, 4031160319,
+STORE, 4031160320, 4031504383,
+STORE, 4031504384, 4031598591,
+STORE, 4031598592, 4031754239,
+STORE, 4031754240, 4031766527,
+STORE, 4031766528, 4031770623,
+STORE, 4031770624, 4031774719,
+STORE, 4031774720, 4031782911,
+STORE, 4031782912, 4031799295,
+STORE, 4031799296, 4031856639,
+STORE, 4031856640, 4031983615,
+STORE, 4031983616, 4031987711,
+STORE, 4031987712, 4031991807,
+STORE, 4031991808, 4032270335,
+STORE, 4032270336, 4032274431,
+STORE, 4032274432, 4032282623,
+STORE, 4032282624, 4032286719,
+STORE, 4032286720, 4032290815,
+STORE, 4032290816, 4032389119,
+STORE, 4032389120, 4032397311,
+STORE, 4032397312, 4032405503,
+STORE, 4032405504, 4032413695,
+STORE, 4032413696, 4032417791,
+STORE, 4032417792, 4032565247,
+STORE, 4032565248, 4032593919,
+STORE, 4032593920, 4032737279,
+STORE, 4032737280, 4032741375,
+STORE, 4032741376, 4032745471,
+STORE, 4032745472, 4032770047,
+STORE, 4032770048, 4032933887,
+STORE, 4032933888, 4032999423,
+STORE, 4032999424, 4033032191,
+STORE, 4033032192, 4033036287,
+STORE, 4033036288, 4033040383,
+STORE, 4033040384, 4033105919,
+STORE, 4033105920, 4033396735,
+STORE, 4033396736, 4033822719,
+STORE, 4033822720, 4033839103,
+STORE, 4033839104, 4033843199,
+STORE, 4033843200, 4033851391,
+STORE, 4033851392, 4033863679,
+STORE, 4033863680, 4033880063,
+STORE, 4033880064, 4033933311,
+STORE, 4033933312, 4034023423,
+STORE, 4034023424, 4034031615,
+STORE, 4034031616, 4034035711,
+STORE, 4034035712, 4034043903,
+STORE, 4034043904, 4034142207,
+STORE, 4034142208, 4034191359,
+STORE, 4034191360, 4034260991,
+STORE, 4034260992, 4034269183,
+STORE, 4034269184, 4034273279,
+STORE, 4034273280, 4034281471,
+STORE, 4034281472, 4034412543,
+STORE, 4034412544, 4034445311,
+STORE, 4034445312, 4034490367,
+STORE, 4034490368, 4034494463,
+STORE, 4034494464, 4034498559,
+STORE, 4034498560, 4034662399,
+STORE, 4034662400, 4034666495,
+STORE, 4034666496, 4034670591,
+STORE, 4034670592, 4034674687,
+STORE, 4034674688, 4034678783,
+STORE, 4034678784, 4034682879,
+STORE, 4034682880, 4034781183,
+STORE, 4034781184, 4035043327,
+STORE, 4035043328, 4035047423,
+STORE, 4035047424, 4035055615,
+STORE, 4035055616, 4035059711,
+STORE, 4035059712, 4035063807,
+STORE, 4035063808, 4035067903,
+STORE, 4035067904, 4035100671,
+STORE, 4035100672, 4035375103,
+STORE, 4035375104, 4035383295,
+STORE, 4035383296, 4035395583,
+STORE, 4035395584, 4035399679,
+STORE, 4035399680, 4035403775,
+STORE, 4035403776, 4035407871,
+STORE, 4035407872, 4035411967,
+STORE, 4035411968, 4035477503,
+STORE, 4035477504, 4035608575,
+STORE, 4035608576, 4035641343,
+STORE, 4035641344, 4035682303,
+STORE, 4035682304, 4035686399,
+STORE, 4035686400, 4035690495,
+STORE, 4035690496, 4035694591,
+STORE, 4035694592, 4035743743,
+STORE, 4035743744, 4035784703,
+STORE, 4035784704, 4035829759,
+STORE, 4035829760, 4035837951,
+STORE, 4035837952, 4035842047,
+STORE, 4035842048, 4035846143,
+STORE, 4035846144, 4035850239,
+STORE, 4035850240, 4036001791,
+STORE, 4036001792, 4036005887,
+STORE, 4036005888, 4036214783,
+STORE, 4036214784, 4036218879,
+STORE, 4036218880, 4036603903,
+STORE, 4036603904, 4036648959,
+STORE, 4036648960, 4036653055,
+STORE, 4036653056, 4036657151,
+STORE, 4036657152, 4036665343,
+STORE, 4036665344, 4036780031,
+STORE, 4036780032, 4036829183,
+STORE, 4036829184, 4036984831,
+STORE, 4036984832, 4036993023,
+STORE, 4036993024, 4036997119,
+STORE, 4036997120, 4037001215,
+STORE, 4037001216, 4037009407,
+STORE, 4037009408, 4037025791,
+STORE, 4037025792, 4037095423,
+STORE, 4037095424, 4037181439,
+STORE, 4037181440, 4037193727,
+STORE, 4037193728, 4037197823,
+STORE, 4037197824, 4037206015,
+STORE, 4037206016, 4037320703,
+STORE, 4037320704, 4037337087,
+STORE, 4037337088, 4037349375,
+STORE, 4037349376, 4037357567,
+STORE, 4037357568, 4037361663,
+STORE, 4037369856, 4037386239,
+STORE, 4037386240, 4037672959,
+STORE, 4037672960, 4037689343,
+STORE, 4037689344, 4037730303,
+STORE, 4037730304, 4037734399,
+STORE, 4037734400, 4037738495,
+STORE, 4037738496, 4037742591,
+STORE, 4037742592, 4037758975,
+STORE, 4037758976, 4037890047,
+STORE, 4037890048, 4037931007,
+STORE, 4037931008, 4037976063,
+STORE, 4037976064, 4037984255,
+STORE, 4037984256, 4037988351,
+STORE, 4037988352, 4038053887,
+STORE, 4038053888, 4038184959,
+STORE, 4038184960, 4038189055,
+STORE, 4038189056, 4038197247,
+STORE, 4038197248, 4038201343,
+STORE, 4038201344, 4038205439,
+STORE, 4038205440, 4038209535,
+STORE, 4038217728, 4038250495,
+STORE, 4038250496, 4038512639,
+STORE, 4038512640, 4038516735,
+STORE, 4038516736, 4038520831,
+STORE, 4038520832, 4038524927,
+STORE, 4038524928, 4038529023,
+STORE, 4038529024, 4038533119,
+STORE, 4038541312, 4038623231,
+STORE, 4038623232, 4038754303,
+STORE, 4038754304, 4038885375,
+STORE, 4038885376, 4038889471,
+STORE, 4038897664, 4038963199,
+STORE, 4038963200, 4038967295,
+STORE, 4038967296, 4038983679,
+STORE, 4038983680, 4039114751,
+STORE, 4039114752, 4039245823,
+STORE, 4039245824, 4039376895,
+STORE, 4039376896, 4040687615,
+STORE, 4040687616, 4040691711,
+STORE, 4040691712, 4040806399,
+STORE, 4040806400, 4040937471,
+STORE, 4040937472, 4040941567,
+STORE, 4040945664, 4040949759,
+STORE, 4040949760, 4041080831,
+STORE, 4041080832, 4041211903,
+STORE, 4041211904, 4043046911,
+STORE, 4043046912, 4043051007,
+STORE, 4043051008, 4043055103,
+STORE, 4043055104, 4043137023,
+STORE, 4043137024, 4043141119,
+STORE, 4043141120, 4043145215,
+STORE, 4043145216, 4043153407,
+STORE, 4043153408, 4043186175,
+STORE, 4043186176, 4043317247,
+STORE, 4043317248, 4043448319,
+STORE, 4043448320, 4043579391,
+STORE, 4043579392, 4043583487,
+STORE, 4043583488, 4043599871,
+STORE, 4043599872, 4043661311,
+STORE, 4043661312, 4043792383,
+STORE, 4043792384, 4043796479,
+STORE, 4043796480, 4043800575,
+STORE, 4043800576, 4043816959,
+STORE, 4043816960, 4043821055,
+STORE, 4043821056, 4043825151,
+STORE, 4043825152, 4043829247,
+STORE, 4043829248, 4043833343,
+STORE, 4043833344, 4047241215,
+STORE, 4047241216, 4047249407,
+STORE, 4047249408, 4047253503,
+STORE, 4047253504, 4047323135,
+STORE, 4047323136, 4047327231,
+STORE, 4047327232, 4047458303,
+STORE, 4047458304, 4047589375,
+STORE, 4047589376, 4047720447,
+STORE, 4047720448, 4047773695,
+STORE, 4047773696, 4047790079,
+STORE, 4047790080, 4047921151,
+STORE, 4047921152, 4048052223,
+STORE, 4048052224, 4048183295,
+STORE, 4048183296, 4049002495,
+STORE, 4049002496, 4049133567,
+STORE, 4049133568, 4049154047,
+STORE, 4049154048, 4049158143,
+STORE, 4049158144, 4049162239,
+STORE, 4049162240, 4049166335,
+STORE, 4049166336, 4049174527,
+STORE, 4049174528, 4049182719,
+STORE, 4049182720, 4049186815,
+STORE, 4049186816, 4049190911,
+STORE, 4049190912, 4049195007,
+STORE, 4049195008, 4049203199,
+STORE, 4049203200, 4049207295,
+STORE, 4049207296, 4049211391,
+STORE, 4049211392, 4049215487,
+STORE, 4049215488, 4049219583,
+STORE, 4049219584, 4049227775,
+STORE, 4049227776, 4049231871,
+STORE, 4049231872, 4049235967,
+STORE, 4049235968, 4049244159,
+STORE, 4049244160, 4049248255,
+STORE, 4049248256, 4049252351,
+STORE, 4049252352, 4049256447,
+STORE, 4049256448, 4049268735,
+STORE, 4049268736, 4049272831,
+STORE, 4049272832, 4049313791,
+STORE, 4049313792, 4049723391,
+STORE, 4049723392, 4049727487,
+STORE, 4049727488, 4049858559,
+STORE, 4049858560, 4049989631,
+STORE, 4049989632, 4049993727,
+STORE, 4049993728, 4050026495,
+STORE, 4050026496, 4050030591,
+STORE, 4050030592, 4050161663,
+STORE, 4050161664, 4050169855,
+STORE, 4050169856, 4050223103,
+STORE, 4050223104, 4050632703,
+STORE, 4050632704, 4050636799,
+STORE, 4050636800, 4050640895,
+STORE, 4050640896, 4050644991,
+STORE, 4050644992, 4050661375,
+STORE, 4050661376, 4050665471,
+STORE, 4050665472, 4050673663,
+STORE, 4050673664, 4050677759,
+STORE, 4050677760, 4050694143,
+STORE, 4050694144, 4050702335,
+STORE, 4050702336, 4050956287,
+STORE, 4050956288, 4051963903,
+STORE, 4051963904, 4051980287,
+STORE, 4051980288, 4051988479,
+STORE, 4051988480, 4052000767,
+STORE, 4052000768, 4052004863,
+STORE, 4052004864, 4052029439,
+STORE, 4284014592, 4284018687,
+STORE, 4284018688, 4292403199,
+SNULL, 4041080832, 4041211903,
+SNULL, 3795763200, 3795894271,
+STORE, 3629522944, 3696631807,
+SNULL, 3663077375, 3696631807,
+STORE, 3629522944, 3663077375,
+STORE, 3663077376, 3696631807,
+SNULL, 3663077376, 3696631807,
+STORE, 3663077376, 3696631807,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3626471424, 3627524095,
+SNULL, 3626471424, 3626475519,
+STORE, 3626475520, 3627524095,
+STORE, 3626471424, 3626475519,
+SNULL, 3627519999, 3627524095,
+STORE, 3626475520, 3627519999,
+STORE, 3627520000, 3627524095,
+STORE, 3625418752, 3626475519,
+SNULL, 3625418752, 3625422847,
+STORE, 3625422848, 3626475519,
+STORE, 3625418752, 3625422847,
+SNULL, 3626467327, 3626475519,
+STORE, 3625422848, 3626467327,
+STORE, 3626467328, 3626475519,
+STORE, 3624366080, 3625422847,
+SNULL, 3624366080, 3624370175,
+STORE, 3624370176, 3625422847,
+STORE, 3624366080, 3624370175,
+SNULL, 3625414655, 3625422847,
+STORE, 3624370176, 3625414655,
+STORE, 3625414656, 3625422847,
+STORE, 4041191424, 4041211903,
+SNULL, 4041195519, 4041211903,
+STORE, 4041191424, 4041195519,
+STORE, 4041195520, 4041211903,
+STORE, 4041170944, 4041191423,
+SNULL, 4041175039, 4041191423,
+STORE, 4041170944, 4041175039,
+STORE, 4041175040, 4041191423,
+SNULL, 3625426943, 3626467327,
+STORE, 3625422848, 3625426943,
+STORE, 3625426944, 3626467327,
+STORE, 4041162752, 4041170943,
+SNULL, 3626479615, 3627519999,
+STORE, 3626475520, 3626479615,
+STORE, 3626479616, 3627519999,
+STORE, 4041154560, 4041162751,
+STORE, 4041154560, 4041170943,
+STORE, 4041134080, 4041154559,
+SNULL, 4041138175, 4041154559,
+STORE, 4041134080, 4041138175,
+STORE, 4041138176, 4041154559,
+SNULL, 3624374271, 3625414655,
+STORE, 3624370176, 3624374271,
+STORE, 3624374272, 3625414655,
+STORE, 4041125888, 4041134079,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+STORE, 4041121792, 4041125887,
+SNULL, 4041121792, 4041125887,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+STORE, 3222274048, 3223326719,
+SNULL, 3222274048, 3222278143,
+STORE, 3222278144, 3223326719,
+STORE, 3222274048, 3222278143,
+SNULL, 3223322623, 3223326719,
+STORE, 3222278144, 3223322623,
+STORE, 3223322624, 3223326719,
+STORE, 3221221376, 3222278143,
+SNULL, 3221221376, 3221225471,
+STORE, 3221225472, 3222278143,
+STORE, 3221221376, 3221225471,
+SNULL, 3222269951, 3222278143,
+STORE, 3221225472, 3222269951,
+STORE, 3222269952, 3222278143,
+STORE, 3220168704, 3221225471,
+SNULL, 3220168704, 3220172799,
+STORE, 3220172800, 3221225471,
+STORE, 3220168704, 3220172799,
+SNULL, 3221217279, 3221225471,
+STORE, 3220172800, 3221217279,
+STORE, 3221217280, 3221225471,
+STORE, 4041117696, 4041125887,
+STORE, 4041117696, 4041134079,
+STORE, 3219083264, 3220172799,
+SNULL, 3219083264, 3219087359,
+STORE, 3219087360, 3220172799,
+STORE, 3219083264, 3219087359,
+SNULL, 3220164607, 3220172799,
+STORE, 3219087360, 3220164607,
+STORE, 3220164608, 3220172799,
+STORE, 4041109504, 4041117695,
+STORE, 4041109504, 4041134079,
+STORE, 3217997824, 3219087359,
+SNULL, 3217997824, 3218001919,
+STORE, 3218001920, 3219087359,
+STORE, 3217997824, 3218001919,
+SNULL, 3219079167, 3219087359,
+STORE, 3218001920, 3219079167,
+STORE, 3219079168, 3219087359,
+STORE, 4041101312, 4041109503,
+STORE, 4041101312, 4041134079,
+STORE, 3216912384, 3218001919,
+SNULL, 3216912384, 3216916479,
+STORE, 3216916480, 3218001919,
+STORE, 3216912384, 3216916479,
+SNULL, 3217993727, 3218001919,
+STORE, 3216916480, 3217993727,
+STORE, 3217993728, 3218001919,
+STORE, 4041093120, 4041101311,
+STORE, 4041093120, 4041134079,
+STORE, 3215826944, 3216916479,
+SNULL, 3215826944, 3215831039,
+STORE, 3215831040, 3216916479,
+STORE, 3215826944, 3215831039,
+SNULL, 3216908287, 3216916479,
+STORE, 3215831040, 3216908287,
+STORE, 3216908288, 3216916479,
+STORE, 4016779264, 4016799743,
+SNULL, 4016783359, 4016799743,
+STORE, 4016779264, 4016783359,
+STORE, 4016783360, 4016799743,
+STORE, 4016758784, 4016779263,
+SNULL, 4016762879, 4016779263,
+STORE, 4016758784, 4016762879,
+STORE, 4016762880, 4016779263,
+SNULL, 3222282239, 3223322623,
+STORE, 3222278144, 3222282239,
+STORE, 3222282240, 3223322623,
+STORE, 4041084928, 4041093119,
+STORE, 4041084928, 4041134079,
+SNULL, 3221229567, 3222269951,
+STORE, 3221225472, 3221229567,
+STORE, 3221229568, 3222269951,
+STORE, 4015644672, 4015665151,
+STORE, 4038889472, 4038897663,
+SNULL, 4015648767, 4015665151,
+STORE, 4015644672, 4015648767,
+STORE, 4015648768, 4015665151,
+STORE, 4015624192, 4015644671,
+SNULL, 4015628287, 4015644671,
+STORE, 4015624192, 4015628287,
+STORE, 4015628288, 4015644671,
+SNULL, 3219091455, 3220164607,
+STORE, 3219087360, 3219091455,
+STORE, 3219091456, 3220164607,
+STORE, 4015603712, 4015624191,
+SNULL, 4015607807, 4015624191,
+STORE, 4015603712, 4015607807,
+STORE, 4015607808, 4015624191,
+SNULL, 3218006015, 3219079167,
+STORE, 3218001920, 3218006015,
+STORE, 3218006016, 3219079167,
+STORE, 3949674496, 3949694975,
+SNULL, 3949678591, 3949694975,
+STORE, 3949674496, 3949678591,
+STORE, 3949678592, 3949694975,
+SNULL, 3216920575, 3217993727,
+STORE, 3216916480, 3216920575,
+STORE, 3216920576, 3217993727,
+STORE, 3948924928, 3948945407,
+SNULL, 3948929023, 3948945407,
+STORE, 3948924928, 3948929023,
+STORE, 3948929024, 3948945407,
+SNULL, 3215835135, 3216908287,
+STORE, 3215831040, 3215835135,
+STORE, 3215835136, 3216908287,
+SNULL, 3220176895, 3221217279,
+STORE, 3220172800, 3220176895,
+STORE, 3220176896, 3221217279,
+STORE, 3214786560, 3215826943,
+STORE, 3213733888, 3214786559,
+SNULL, 3213733888, 3213737983,
+STORE, 3213737984, 3214786559,
+STORE, 3213733888, 3213737983,
+SNULL, 3214782463, 3214786559,
+STORE, 3213737984, 3214782463,
+STORE, 3214782464, 3214786559,
+STORE, 4038533120, 4038541311,
+STORE, 3948421120, 3948441599,
+SNULL, 3948425215, 3948441599,
+STORE, 3948421120, 3948425215,
+STORE, 3948425216, 3948441599,
+SNULL, 3213742079, 3214782463,
+STORE, 3213737984, 3213742079,
+STORE, 3213742080, 3214782463,
+STORE, 4038209536, 4038217727,
+STORE, 3212681216, 3213737983,
+SNULL, 3212681216, 3212685311,
+STORE, 3212685312, 3213737983,
+STORE, 3212681216, 3212685311,
+SNULL, 3213729791, 3213737983,
+STORE, 3212685312, 3213729791,
+STORE, 3213729792, 3213737983,
+STORE, 3795763200, 3795894271,
+STORE, 3946872832, 3946893311,
+SNULL, 3946876927, 3946893311,
+STORE, 3946872832, 3946876927,
+STORE, 3946876928, 3946893311,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+SNULL, 3212689407, 3213729791,
+STORE, 3212685312, 3212689407,
+STORE, 3212689408, 3213729791,
+STORE, 4041080832, 4041084927,
+STORE, 4040941568, 4040945663,
+STORE, 4037361664, 4037369855,
+STORE, 4000817152, 4000821247,
+STORE, 3999440896, 3999444991,
+STORE, 3212161024, 3212681215,
+SNULL, 3212161024, 3212439551,
+STORE, 3212439552, 3212681215,
+STORE, 3212161024, 3212439551,
+SNULL, 3212161024, 3212439551,
+SNULL, 3212464127, 3212681215,
+STORE, 3212439552, 3212464127,
+STORE, 3212464128, 3212681215,
+SNULL, 3212464128, 3212681215,
+SNULL, 3212439552, 3212451839,
+STORE, 3212451840, 3212464127,
+STORE, 3212439552, 3212451839,
+SNULL, 3212439552, 3212451839,
+STORE, 3212439552, 3212451839,
+SNULL, 3212451840, 3212455935,
+STORE, 3212455936, 3212464127,
+STORE, 3212451840, 3212455935,
+SNULL, 3212451840, 3212455935,
+STORE, 3212451840, 3212455935,
+SNULL, 3212455936, 3212460031,
+STORE, 3212460032, 3212464127,
+STORE, 3212455936, 3212460031,
+SNULL, 3212455936, 3212460031,
+STORE, 3212455936, 3212460031,
+SNULL, 3212460032, 3212464127,
+STORE, 3212460032, 3212464127,
+STORE, 3997679616, 3997683711,
+SNULL, 4049235968, 4049240063,
+STORE, 4049240064, 4049244159,
+STORE, 4049235968, 4049240063,
+SNULL, 4049240064, 4049244159,
+STORE, 4049240064, 4049244159,
+SNULL, 3997679616, 3997683711,
+SNULL, 3999440896, 3999444991,
+SNULL, 4000817152, 4000821247,
+SNULL, 4040941568, 4040945663,
+SNULL, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+SNULL, 3212451840, 3212455935,
+STORE, 3212451840, 3212455935,
+STORE, 4041080832, 4041084927,
+STORE, 3623890944, 3624169471,
+SNULL, 4041080832, 4041084927,
+STORE, 4041080832, 4041084927,
+SNULL, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3211386880, 3212439551,
+SNULL, 3211386880, 3211390975,
+STORE, 3211390976, 3212439551,
+STORE, 3211386880, 3211390975,
+SNULL, 3212435455, 3212439551,
+STORE, 3211390976, 3212435455,
+STORE, 3212435456, 3212439551,
+STORE, 4040941568, 4040945663,
+STORE, 3937169408, 3937189887,
+STORE, 3623485440, 3623616511,
+SNULL, 717225983, 1388314623,
+STORE, 314572800, 717225983,
+STORE, 717225984, 1388314623,
+SNULL, 717225984, 1388314623,
+STORE, 3937112064, 3937132543,
+SNULL, 3937116159, 3937132543,
+STORE, 3937112064, 3937116159,
+STORE, 3937116160, 3937132543,
+SNULL, 3211395071, 3212435455,
+STORE, 3211390976, 3211395071,
+STORE, 3211395072, 3212435455,
+STORE, 4000817152, 4000821247,
+STORE, 3974823936, 3974832127,
+STORE, 3595284480, 3595431935,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+STORE, 3999440896, 3999444991,
+STORE, 3997679616, 3997683711,
+STORE, 3996295168, 3996299263,
+STORE, 3996090368, 3996094463,
+STORE, 3210866688, 3211386879,
+SNULL, 3210866688, 3211001855,
+STORE, 3211001856, 3211386879,
+STORE, 3210866688, 3211001855,
+SNULL, 3210866688, 3211001855,
+SNULL, 3211038719, 3211386879,
+STORE, 3211001856, 3211038719,
+STORE, 3211038720, 3211386879,
+SNULL, 3211038720, 3211386879,
+SNULL, 3211001856, 3211022335,
+STORE, 3211022336, 3211038719,
+STORE, 3211001856, 3211022335,
+SNULL, 3211001856, 3211022335,
+STORE, 3211001856, 3211022335,
+SNULL, 3211022336, 3211030527,
+STORE, 3211030528, 3211038719,
+STORE, 3211022336, 3211030527,
+SNULL, 3211022336, 3211030527,
+STORE, 3211022336, 3211030527,
+SNULL, 3211030528, 3211034623,
+STORE, 3211034624, 3211038719,
+STORE, 3211030528, 3211034623,
+SNULL, 3211030528, 3211034623,
+STORE, 3211030528, 3211034623,
+SNULL, 3211034624, 3211038719,
+STORE, 3211034624, 3211038719,
+STORE, 3994906624, 3994910719,
+SNULL, 4049240064, 4049244159,
+STORE, 4049240064, 4049244159,
+SNULL, 3994906624, 3994910719,
+SNULL, 3996090368, 3996094463,
+SNULL, 3996295168, 3996299263,
+SNULL, 3997679616, 3997683711,
+SNULL, 3999440896, 3999444991,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+SNULL, 3211022336, 3211030527,
+STORE, 3211022336, 3211030527,
+STORE, 3999440896, 3999444991,
+STORE, 3210199040, 3211001855,
+SNULL, 3999440896, 3999444991,
+STORE, 3999440896, 3999444991,
+SNULL, 3999440896, 3999444991,
+STORE, 3594821632, 3594952703,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 1914101759, 1969434623,
+STORE, 1914097664, 1914101759,
+STORE, 1914101760, 1969434623,
+STORE, 3567108096, 3567239167,
+STORE, 3973832704, 3973840895,
+STORE, 3209113600, 3210199039,
+SNULL, 3209113600, 3209117695,
+STORE, 3209117696, 3210199039,
+STORE, 3209113600, 3209117695,
+SNULL, 3210194943, 3210199039,
+STORE, 3209117696, 3210194943,
+STORE, 3210194944, 3210199039,
+STORE, 3935858688, 3935879167,
+SNULL, 3935862783, 3935879167,
+STORE, 3935858688, 3935862783,
+STORE, 3935862784, 3935879167,
+SNULL, 3209121791, 3210194943,
+STORE, 3209117696, 3209121791,
+STORE, 3209121792, 3210194943,
+STORE, 3528749056, 3528880127,
+STORE, 3968200704, 3968208895,
+STORE, 3208028160, 3209117695,
+SNULL, 3208028160, 3208032255,
+STORE, 3208032256, 3209117695,
+STORE, 3208028160, 3208032255,
+SNULL, 3209109503, 3209117695,
+STORE, 3208032256, 3209109503,
+STORE, 3209109504, 3209117695,
+STORE, 3888123904, 3888144383,
+SNULL, 3888127999, 3888144383,
+STORE, 3888123904, 3888127999,
+STORE, 3888128000, 3888144383,
+SNULL, 3208036351, 3209109503,
+STORE, 3208032256, 3208036351,
+STORE, 3208036352, 3209109503,
+SNULL, 3968200704, 3968208895,
+SNULL, 3888123904, 3888144383,
+SNULL, 3209109504, 3209113599,
+STORE, 3209113600, 3209117695,
+STORE, 3209109504, 3209113599,
+SNULL, 3208028160, 3209113599,
+STORE, 3208060928, 3209117695,
+SNULL, 3208060928, 3208065023,
+STORE, 3208065024, 3209117695,
+STORE, 3208060928, 3208065023,
+SNULL, 3209109503, 3209117695,
+STORE, 3208065024, 3209109503,
+STORE, 3209109504, 3209117695,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3888123904, 3888144383,
+SNULL, 3888127999, 3888144383,
+STORE, 3888123904, 3888127999,
+STORE, 3888128000, 3888144383,
+SNULL, 3208069119, 3209109503,
+STORE, 3208065024, 3208069119,
+STORE, 3208069120, 3209109503,
+STORE, 3968200704, 3968208895,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3527778304, 3527909375,
+STORE, 3999440896, 3999444991,
+STORE, 3997679616, 3997683711,
+STORE, 1914097664, 1914105855,
+STORE, 1914105856, 1969434623,
+STORE, 3957583872, 3957592063,
+STORE, 3206975488, 3208065023,
+SNULL, 3206975488, 3206979583,
+STORE, 3206979584, 3208065023,
+STORE, 3206975488, 3206979583,
+SNULL, 3208056831, 3208065023,
+STORE, 3206979584, 3208056831,
+STORE, 3208056832, 3208065023,
+STORE, 3956736000, 3956744191,
+STORE, 3205890048, 3206979583,
+SNULL, 3205890048, 3205894143,
+STORE, 3205894144, 3206979583,
+STORE, 3205890048, 3205894143,
+SNULL, 3206971391, 3206979583,
+STORE, 3205894144, 3206971391,
+STORE, 3206971392, 3206979583,
+STORE, 3806101504, 3806121983,
+SNULL, 3806105599, 3806121983,
+STORE, 3806101504, 3806105599,
+STORE, 3806105600, 3806121983,
+SNULL, 3206983679, 3208056831,
+STORE, 3206979584, 3206983679,
+STORE, 3206983680, 3208056831,
+STORE, 3806081024, 3806101503,
+SNULL, 3806085119, 3806101503,
+STORE, 3806081024, 3806085119,
+STORE, 3806085120, 3806101503,
+SNULL, 3205898239, 3206971391,
+STORE, 3205894144, 3205898239,
+STORE, 3205898240, 3206971391,
+STORE, 3956015104, 3956023295,
+STORE, 3204804608, 3205894143,
+SNULL, 3204804608, 3204808703,
+STORE, 3204808704, 3205894143,
+STORE, 3204804608, 3204808703,
+SNULL, 3205885951, 3205894143,
+STORE, 3204808704, 3205885951,
+STORE, 3205885952, 3205894143,
+STORE, 3803471872, 3803492351,
+STORE, 3803451392, 3803471871,
+STORE, 3803451392, 3803492351,
+SNULL, 3957583872, 3957592063,
+SNULL, 3806101504, 3806121983,
+SNULL, 3206975487, 3206979583,
+STORE, 3206971392, 3206975487,
+STORE, 3206975488, 3206979583,
+SNULL, 3208056832, 3208060927,
+STORE, 3208060928, 3208065023,
+STORE, 3208056832, 3208060927,
+SNULL, 3206975488, 3208060927,
+STORE, 3801845760, 3801878527,
+STORE, 3806101504, 3806121983,
+SNULL, 3806105599, 3806121983,
+STORE, 3806101504, 3806105599,
+STORE, 3806105600, 3806121983,
+SNULL, 3204812799, 3205885951,
+STORE, 3204808704, 3204812799,
+STORE, 3204812800, 3205885951,
+STORE, 1914097664, 1914109951,
+STORE, 1914109952, 1969434623,
+STORE, 3957583872, 3957592063,
+STORE, 3206971392, 3208065023,
+SNULL, 3206971392, 3206979583,
+STORE, 3206979584, 3208065023,
+STORE, 3206971392, 3206979583,
+SNULL, 3208056831, 3208065023,
+STORE, 3206979584, 3208056831,
+STORE, 3208056832, 3208065023,
+STORE, 3801825280, 3801845759,
+SNULL, 3801829375, 3801845759,
+STORE, 3801825280, 3801829375,
+STORE, 3801829376, 3801845759,
+SNULL, 3206983679, 3208056831,
+STORE, 3206979584, 3206983679,
+STORE, 3206983680, 3208056831,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3202707456, 3204804607,
+STORE, 3200610304, 3202707455,
+SNULL, 3202707456, 3204804607,
+SNULL, 3200610304, 3202707455,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3527647232, 3527778303,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+STORE, 3487059968, 3487584255,
+SNULL, 3487059968, 3487301631,
+STORE, 3487301632, 3487584255,
+STORE, 3487059968, 3487301631,
+SNULL, 3487059968, 3487301631,
+SNULL, 3487563775, 3487584255,
+STORE, 3487301632, 3487563775,
+STORE, 3487563776, 3487584255,
+SNULL, 3487563776, 3487584255,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3524046848, 3524177919,
+STORE, 3487170560, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3487039488, 3487170559,
+STORE, 3487039488, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3204280320, 3204804607,
+SNULL, 3204280320, 3204448255,
+STORE, 3204448256, 3204804607,
+STORE, 3204280320, 3204448255,
+SNULL, 3204280320, 3204448255,
+SNULL, 3204710399, 3204804607,
+STORE, 3204448256, 3204710399,
+STORE, 3204710400, 3204804607,
+SNULL, 3204710400, 3204804607,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3996295168, 3996299263,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+SNULL, 3996295168, 3996299263,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3486908416, 3487039487,
+STORE, 3486908416, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+STORE, 3201826816, 3202351103,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+SNULL, 3803471871, 3803492351,
+STORE, 3803451392, 3803471871,
+STORE, 3803471872, 3803492351,
+SNULL, 3803471872, 3803492351,
+SNULL, 3803451392, 3803471871,
+STORE, 3798999040, 3799101439,
+SNULL, 3798999040, 3799101439,
+STORE, 3952644096, 3952652287,
+STORE, 3203362816, 3204448255,
+SNULL, 3203362816, 3203366911,
+STORE, 3203366912, 3204448255,
+STORE, 3203362816, 3203366911,
+SNULL, 3204444159, 3204448255,
+STORE, 3203366912, 3204444159,
+STORE, 3204444160, 3204448255,
+STORE, 3803471872, 3803492351,
+SNULL, 3803475967, 3803492351,
+STORE, 3803471872, 3803475967,
+STORE, 3803475968, 3803492351,
+SNULL, 3203371007, 3204444159,
+STORE, 3203366912, 3203371007,
+STORE, 3203371008, 3204444159,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3200774144, 3201826815,
+SNULL, 3200774144, 3200778239,
+STORE, 3200778240, 3201826815,
+STORE, 3200774144, 3200778239,
+SNULL, 3201822719, 3201826815,
+STORE, 3200778240, 3201822719,
+STORE, 3201822720, 3201826815,
+STORE, 3803451392, 3803471871,
+SNULL, 3803455487, 3803471871,
+STORE, 3803451392, 3803455487,
+STORE, 3803455488, 3803471871,
+SNULL, 3200782335, 3201822719,
+STORE, 3200778240, 3200782335,
+STORE, 3200782336, 3201822719,
+STORE, 3949666304, 3949674495,
+STORE, 3949408256, 3949416447,
+STORE, 3199688704, 3200778239,
+SNULL, 3199688704, 3199692799,
+STORE, 3199692800, 3200778239,
+STORE, 3199688704, 3199692799,
+SNULL, 3200770047, 3200778239,
+STORE, 3199692800, 3200770047,
+STORE, 3200770048, 3200778239,
+STORE, 3799306240, 3799326719,
+SNULL, 3799310335, 3799326719,
+STORE, 3799306240, 3799310335,
+STORE, 3799310336, 3799326719,
+SNULL, 3199696895, 3200770047,
+STORE, 3199692800, 3199696895,
+STORE, 3199696896, 3200770047,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3799277568, 3799306239,
+SNULL, 3799277568, 3799306239,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+SNULL, 4041162751, 4041170943,
+STORE, 4041154560, 4041162751,
+STORE, 4041162752, 4041170943,
+SNULL, 4041162752, 4041170943,
+SNULL, 4041154560, 4041162751,
+SNULL, 4041191424, 4041211903,
+SNULL, 4041170944, 4041191423,
+SNULL, 3626471423, 3626475519,
+STORE, 3626467328, 3626471423,
+STORE, 3626471424, 3626475519,
+SNULL, 3626471424, 3627524095,
+SNULL, 3625418751, 3625422847,
+STORE, 3625414656, 3625418751,
+STORE, 3625418752, 3625422847,
+SNULL, 3625418752, 3626471423,
+STORE, 3627393024, 3627524095,
+STORE, 3627261952, 3627393023,
+STORE, 3627261952, 3627524095,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3195494400, 3197591551,
+SNULL, 3197591552, 3199688703,
+SNULL, 3195494400, 3197591551,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3195494400, 3197591551,
+SNULL, 3197591552, 3199688703,
+SNULL, 3195494400, 3197591551,
+STORE, 3798999040, 3799101439,
+SNULL, 3798999040, 3799101439,
+/*
+ * mmap: unmapped_area_topdown: ffff9a9f14ddaa80
+ * Gap was found: mt 4041162752 gap_end 4041183232
+ * mmap: window was 4052029440 - 4096 size 28672
+ * mmap: mas.min 4041154560 max 4041191423 mas.last 4041191423
+ * mmap: mas.index 4041162752 align mask 0 offset 0
+ * mmap: rb_find_vma find on 4041162752 => ffff9a9f03d19678 (ffff9a9f03d19678)
+ */
+ };
+
+ static const unsigned long set43[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734187720704, 140737488351231,
+SNULL, 140734187724800, 140737488351231,
+STORE, 140734187589632, 140734187724799,
+STORE, 4194304, 6443007,
+STORE, 4337664, 6443007,
+STORE, 4194304, 4337663,
+SNULL, 4337664, 6443007,
+STORE, 6430720, 6443007,
+STORE, 206158430208, 206160674815,
+STORE, 206158569472, 206160674815,
+STORE, 206158430208, 206158569471,
+SNULL, 206158569472, 206160674815,
+STORE, 206160662528, 206160670719,
+STORE, 206160670720, 206160674815,
+STORE, 140734188756992, 140734188765183,
+STORE, 140734188740608, 140734188756991,
+STORE, 140501948112896, 140501948116991,
+ };
+
+ int count = 0;
+ void *ptr = NULL;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(3);
+ check_erase2_testset(mt, set, ARRAY_SIZE(set));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set2, ARRAY_SIZE(set2));
+ start = 140735933894656;
+ MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(2);
+ mt_init_flags(mt, 0);
+ check_erase2_testset(mt, set3, ARRAY_SIZE(set3));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ check_erase2_testset(mt, set4, ARRAY_SIZE(set4));
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+ }
+ rcu_read_unlock();
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(100);
+ check_erase2_testset(mt, set5, ARRAY_SIZE(set5));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set6, ARRAY_SIZE(set6));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set7, ARRAY_SIZE(set7));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set8, ARRAY_SIZE(set8));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set9, ARRAY_SIZE(set9));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set10, ARRAY_SIZE(set10));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set11, ARRAY_SIZE(set11));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 12288, 140014592737280, 0x2000);
+ MT_BUG_ON(mt, mas.last != 140014592573439);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mas.tree = mt;
+ count = 0;
+ mas.index = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set12, ARRAY_SIZE(set12));
+ rcu_barrier();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+ BUG_ON(count > 12);
+ count++;
+ }
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set13, ARRAY_SIZE(set13));
+ mtree_erase(mt, 140373516443648);
+ rcu_read_lock();
+ mas_empty_area_rev(&mas, 0, 140373518663680, 4096);
+ rcu_read_unlock();
+ mtree_destroy(mt);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set14, ARRAY_SIZE(set14));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set15, ARRAY_SIZE(set15));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ /* set16 was to find a bug on limit updating at slot 0. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set16, ARRAY_SIZE(set16));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139921865637888, 0x6000);
+ MT_BUG_ON(mt, mas.last != 139921865547775);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set17 found a bug in walking backwards and not counting nulls at
+ * the end. This could cause a gap to be missed if the null had any
+ * size.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set17, ARRAY_SIZE(set17));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139953197334528, 0x1000);
+ MT_BUG_ON(mt, mas.last != 139953197322239);
+/* MT_BUG_ON(mt, mas.index != 139953197318144); */
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set18 found a bug in walking backwards and not setting the max from
+ * the node, but using the parent node. This was only an issue if the
+ * next slot in the parent had what we needed.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set18, ARRAY_SIZE(set18));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140222972858368, 2215936);
+ MT_BUG_ON(mt, mas.last != 140222968475647);
+ /*MT_BUG_ON(mt, mas.index != 140222966259712); */
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set19 found 2 bugs in prev.
+ * 1. If we hit root without finding anything, then there was an
+ * infinite loop.
+ * 2. The first ascending wasn't using the correct slot which may have
+ * caused missed entries.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set19, ARRAY_SIZE(set19));
+ rcu_barrier();
+ mas.index = 140656779083776;
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(140656779083776));
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != xa_mk_value(140656766251008));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set20 found a bug in mas_may_move_gap due to the slot being
+ * overwritten during the __mas_add operation and setting it to zero.
+ */
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set20, ARRAY_SIZE(set20));
+ rcu_barrier();
+ check_load(mt, 94849009414144, NULL);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set21, ARRAY_SIZE(set21));
+ rcu_barrier();
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(999);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set22, ARRAY_SIZE(set22));
+ rcu_barrier();
+ mt_validate(mt);
+ ptr = mtree_load(mt, 140551363362816);
+ MT_BUG_ON(mt, ptr == mtree_load(mt, 140551363420159));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set23, ARRAY_SIZE(set23));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set24, ARRAY_SIZE(set24));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set25, ARRAY_SIZE(set25));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Split on NULL followed by delete - causes gap issues. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set26, ARRAY_SIZE(set26));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140109042671616, 409600);
+ MT_BUG_ON(mt, mas.last != 140109040959487);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Split on NULL followed by delete - causes gap issues. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set27, ARRAY_SIZE(set27));
+ rcu_barrier();
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 140415537422336));
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set28, ARRAY_SIZE(set28));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139918413357056, 2097152);
+ /* Search for the size of gap then align it (offset 0) */
+ mas.index = (mas.last + 1 - 2097152 - 0) & (~2093056);
+ MT_BUG_ON(mt, mas.index != 139918401601536);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found issues with retry moving rebalanced nodes so the
+ * incorrect parent pivot was updated.
+ */
+ mt_set_non_kernel(999);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set29, ARRAY_SIZE(set29));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found issues with deleting all entries in a node when
+ * surrounded by entries in the next nodes, then deleting the entries
+ * surrounding the node filled with deleted entries.
+ */
+ mt_set_non_kernel(999);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set30, ARRAY_SIZE(set30));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found an issue with deleting all entries in a node that was
+ * the end node and mas_gap incorrectly set next = curr, and curr = prev
+ * then moved next to the left, losing data.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set31, ARRAY_SIZE(set31));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set32, ARRAY_SIZE(set32));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+/*
+ * mmap: empty_area_topdown: ffff88821c9cb600 Gap was found:
+ * mt 140582827569152 gap_end 140582869532672
+ * mmap: window was 140583656296448 - 4096 size 134217728
+ * mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879
+ * mmap: mas.index 140582827569152 align mask 0 offset 0
+ * mmap: rb_find_vma find on
+ * 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00)
+ */
+
+ /* move gap failed due to an entirely empty node */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set33, ARRAY_SIZE(set33));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140583656296448, 134217728);
+ MT_BUG_ON(mt, mas.last != 140583003750399);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Incorrect gap in tree caused by mas_prev not setting the limits
+ * correctly while walking down.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set34, ARRAY_SIZE(set34));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Empty leaf at the end of a parent caused incorrect gap. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set35, ARRAY_SIZE(set35));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ /* Empty leaf at the end of a parent caused incorrect gap. */
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set36, ARRAY_SIZE(set36));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set37, ARRAY_SIZE(set37));
+ rcu_barrier();
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712));
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set38, ARRAY_SIZE(set38));
+ rcu_barrier();
+ MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712));
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set39, ARRAY_SIZE(set39));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set40, ARRAY_SIZE(set40));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set41, ARRAY_SIZE(set41));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* move gap failed due to an entirely empty node. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set42, ARRAY_SIZE(set42));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 4052029440, 28672);
+ MT_BUG_ON(mt, mas.last != 4041211903);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* gap calc off by one */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set43, ARRAY_SIZE(set43));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+}
+#endif
+
+/* End of VM testcases */
+
+/* RCU stress testing */
+
+/* RCU reader helper function */
+static void rcu_reader_register(struct rcu_test_struct2 *test)
+{
+ rcu_register_thread();
+ uatomic_inc(&test->thread_count);
+
+ while (!test->start)
+ usleep(test->pause * 100);
+}
+
+static void rcu_reader_setup(struct rcu_reader_struct *reader,
+ unsigned int id, struct rcu_test_struct2 *test)
+{
+ reader->id = id;
+ reader->test = test;
+ reader->mod = reader->id % 10;
+ reader->del = (reader->mod + 1) % 10;
+ reader->flip = (reader->mod + 2) % 10;
+ reader->add = (reader->mod + 3) % 10;
+ reader->next = (reader->mod + 4) % 10;
+}
+/* RCU reader in increasing index */
+static void *rcu_reader_fwd(void *ptr)
+
+{
+ struct rcu_reader_struct *reader = (struct rcu_reader_struct *)ptr;
+ struct rcu_test_struct2 *test = reader->test;
+ unsigned long index = reader->id;
+ bool toggled, modified, deleted, added;
+ int i;
+ void *entry, *prev = NULL;
+ MA_STATE(mas, test->mt, 0, 0);
+
+ rcu_reader_register(test);
+ toggled = modified = deleted = added = false;
+
+ while (!test->stop) {
+ i = 0;
+ /* mas_for_each ?*/
+ rcu_read_lock();
+ mas_set(&mas, test->index[index]);
+ mas_for_each(&mas, entry, test->last[index + 9]) {
+ unsigned long r_start, r_end, alt_start;
+ void *expected, *alt;
+
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+
+ if (i == reader->del) {
+ if (!deleted) {
+ alt_start = test->index[index + reader->flip];
+ /* delete occurred. */
+ if (mas.index == alt_start) {
+ uatomic_inc(&test->seen_deleted);
+ deleted = true;
+ }
+ }
+ if (deleted) {
+ i = reader->flip;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ if (!added && (i == reader->add)) {
+ alt_start = test->index[index + reader->next];
+ if (mas.index == r_start) {
+ uatomic_inc(&test->seen_added);
+ added = true;
+ } else if (mas.index == alt_start) {
+ i = reader->next;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ RCU_MT_BUG_ON(test, mas.index != r_start);
+ RCU_MT_BUG_ON(test, mas.last != r_end);
+
+ if (i == reader->flip) {
+ alt = xa_mk_value(index + i + RCU_RANGE_COUNT);
+ if (prev) {
+ if (toggled && entry == expected)
+ uatomic_inc(&test->seen_toggle);
+ else if (!toggled && entry == alt)
+ uatomic_inc(&test->seen_toggle);
+ }
+
+ if (entry == expected)
+ toggled = false;
+ else if (entry == alt)
+ toggled = true;
+ else {
+ printk("!!%lu-%lu -> %p not %p or %p\n", mas.index, mas.last, entry, expected, alt);
+ RCU_MT_BUG_ON(test, 1);
+ }
+
+ prev = entry;
+ } else if (i == reader->mod) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+ if (entry != expected) {
+ if (!modified)
+ uatomic_inc(&test->seen_modified);
+ modified = true;
+ } else {
+ if (modified)
+ uatomic_inc(&test->seen_modified);
+ modified = false;
+ }
+
+ if (modified)
+ RCU_MT_BUG_ON(test, entry != alt);
+
+ } else {
+ if (entry != expected)
+ printk("!!%lu-%lu -> %p not %p\n", mas.index, mas.last, entry, expected);
+ RCU_MT_BUG_ON(test, entry != expected);
+ }
+
+ i++;
+ }
+ rcu_read_unlock();
+ usleep(test->pause);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+/* RCU reader in decreasing index */
+static void *rcu_reader_rev(void *ptr)
+{
+ struct rcu_reader_struct *reader = (struct rcu_reader_struct *)ptr;
+ struct rcu_test_struct2 *test = reader->test;
+ unsigned long index = reader->id;
+ bool toggled, modified, deleted, added;
+ int i;
+ void *prev = NULL;
+ MA_STATE(mas, test->mt, 0, 0);
+
+ rcu_reader_register(test);
+ toggled = modified = deleted = added = false;
+
+
+ while (!test->stop) {
+ void *entry;
+
+ i = 9;
+ mas_set(&mas, test->index[index + i]);
+
+ rcu_read_lock();
+ while (i--) {
+ unsigned long r_start, r_end, alt_start;
+ void *expected, *alt;
+ int line = __LINE__;
+
+ entry = mas_prev(&mas, test->index[index]);
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+
+ if (i == reader->del) {
+ alt_start = test->index[index + reader->mod];
+ if (mas.index == alt_start) {
+ line = __LINE__;
+ if (!deleted)
+ uatomic_inc(&test->seen_deleted);
+ deleted = true;
+ }
+ if (deleted) {
+ line = __LINE__;
+ i = reader->mod;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+ if (!added && (i == reader->add)) {
+ alt_start = test->index[index + reader->flip];
+ if (mas.index == r_start) {
+ line = __LINE__;
+ uatomic_inc(&test->seen_added);
+ added = true;
+ } else if (mas.index == alt_start) {
+ line = __LINE__;
+ i = reader->flip;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ if (i == reader->mod)
+ line = __LINE__;
+ else if (i == reader->flip)
+ line = __LINE__;
+
+ if (mas.index != r_start) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+ mt_dump(test->mt, mt_dump_dec);
+ printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
+ mas.index, mas.last, entry,
+ r_start, r_end, expected, alt,
+ line, i);
+ }
+ RCU_MT_BUG_ON(test, mas.index != r_start);
+ RCU_MT_BUG_ON(test, mas.last != r_end);
+
+ if (i == reader->mod) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+
+ if (entry != expected) {
+ if (!modified)
+ uatomic_inc(&test->seen_modified);
+ modified = true;
+ } else {
+ if (modified)
+ uatomic_inc(&test->seen_modified);
+ modified = false;
+ }
+ if (modified)
+ RCU_MT_BUG_ON(test, entry != alt);
+
+
+ } else if (i == reader->flip) {
+ alt = xa_mk_value(index + i +
+ RCU_RANGE_COUNT);
+ if (prev) {
+ if (toggled && entry == expected)
+ uatomic_inc(&test->seen_toggle);
+ else if (!toggled && entry == alt)
+ uatomic_inc(&test->seen_toggle);
+ }
+
+ if (entry == expected)
+ toggled = false;
+ else if (entry == alt)
+ toggled = true;
+ else {
+ printk("%lu-%lu %p != %p or %p\n",
+ mas.index, mas.last, entry,
+ expected, alt);
+ RCU_MT_BUG_ON(test, 1);
+ }
+
+ prev = entry;
+ } else {
+ if (entry != expected)
+ printk("%lu-%lu %p != %p\n", mas.index,
+ mas.last, entry, expected);
+ RCU_MT_BUG_ON(test, entry != expected);
+ }
+ }
+ rcu_read_unlock();
+ usleep(test->pause);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void rcu_stress_rev(struct maple_tree *mt, struct rcu_test_struct2 *test,
+ int count, struct rcu_reader_struct *test_reader)
+{
+ int i, j = 10000;
+ bool toggle = true;
+
+ test->start = true; /* Release the hounds! */
+ usleep(5);
+
+ while (j--) {
+ toggle = !toggle;
+ i = count;
+ while (i--) {
+ unsigned long start, end;
+ struct rcu_reader_struct *this = &test_reader[i];
+
+ /* Mod offset */
+ if (j == 600) {
+ start = test->index[this->id + this->mod];
+ end = test->last[this->id + this->mod];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(this->id + this->mod * 2 +
+ 1 + RCU_RANGE_COUNT),
+ GFP_KERNEL);
+ }
+
+ /* Toggle */
+ if (!(j % 5)) {
+ start = test->index[this->id + this->flip];
+ end = test->last[this->id + this->flip];
+ mtree_store_range(mt, start, end,
+ xa_mk_value((toggle ? start :
+ this->id + this->flip +
+ RCU_RANGE_COUNT)),
+ GFP_KERNEL);
+ }
+
+ /* delete */
+ if (j == 400) {
+ start = test->index[this->id + this->del];
+ end = test->last[this->id + this->del];
+ mtree_store_range(mt, start, end, NULL, GFP_KERNEL);
+ }
+
+ /* add */
+ if (j == 500) {
+ start = test->index[this->id + this->add];
+ end = test->last[this->id + this->add];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(start), GFP_KERNEL);
+ }
+ }
+ usleep(test->pause);
+ /* If a test fails, don't flood the console */
+ if (test->stop)
+ break;
+ }
+}
+
+static void rcu_stress_fwd(struct maple_tree *mt, struct rcu_test_struct2 *test,
+ int count, struct rcu_reader_struct *test_reader)
+{
+ int j, i;
+ bool toggle = true;
+
+ test->start = true; /* Release the hounds! */
+ usleep(5);
+ for (j = 0; j < 10000; j++) {
+ toggle = !toggle;
+ for (i = 0; i < count; i++) {
+ unsigned long start, end;
+ struct rcu_reader_struct *this = &test_reader[i];
+
+ /* Mod offset */
+ if (j == 600) {
+ start = test->index[this->id + this->mod];
+ end = test->last[this->id + this->mod];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(this->id + this->mod * 2 +
+ 1 + RCU_RANGE_COUNT),
+ GFP_KERNEL);
+ }
+
+ /* Toggle */
+ if (!(j % 5)) {
+ start = test->index[this->id + this->flip];
+ end = test->last[this->id + this->flip];
+ mtree_store_range(mt, start, end,
+ xa_mk_value((toggle ? start :
+ this->id + this->flip +
+ RCU_RANGE_COUNT)),
+ GFP_KERNEL);
+ }
+
+ /* delete */
+ if (j == 400) {
+ start = test->index[this->id + this->del];
+ end = test->last[this->id + this->del];
+ mtree_store_range(mt, start, end, NULL, GFP_KERNEL);
+ }
+
+ /* add */
+ if (j == 500) {
+ start = test->index[this->id + this->add];
+ end = test->last[this->id + this->add];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(start), GFP_KERNEL);
+ }
+ }
+ usleep(test->pause);
+ /* If a test fails, don't flood the console */
+ if (test->stop)
+ break;
+ }
+}
+
+/*
+ * This is to check:
+ * 1. Range that is not ever present
+ * 2. Range that is always present
+ * 3. Things being added but not removed.
+ * 4. Things being removed but not added.
+ * 5. Things are being added and removed, searches my succeed or fail
+ *
+ * This sets up two readers for every 10 entries; one forward and one reverse
+ * reading.
+ */
+static void rcu_stress(struct maple_tree *mt, bool forward)
+{
+ unsigned int count, i;
+ unsigned long r, seed;
+ pthread_t readers[RCU_RANGE_COUNT / 5];
+ struct rcu_test_struct2 test;
+ struct rcu_reader_struct test_reader[RCU_RANGE_COUNT / 5];
+ void *(*function)(void *);
+
+ /* Test setup */
+ test.mt = mt;
+ test.pause = 5;
+ test.seen_toggle = 0;
+ test.seen_deleted = 0;
+ test.seen_added = 0;
+ test.seen_modified = 0;
+ test.thread_count = 0;
+ test.start = test.stop = false;
+ seed = time(NULL);
+ srand(seed);
+ for (i = 0; i < RCU_RANGE_COUNT; i++) {
+ r = seed + rand();
+ mtree_store_range(mt, seed, r,
+ xa_mk_value(seed), GFP_KERNEL);
+
+ /* Record start and end of entry */
+ test.index[i] = seed;
+ test.last[i] = r;
+ seed = 1 + r + rand() % 10;
+ }
+
+ i = count = ARRAY_SIZE(readers);
+ while (i--) {
+ unsigned long id;
+
+ id = i / 2 * 10;
+ if (i % 2)
+ function = rcu_reader_fwd;
+ else
+ function = rcu_reader_rev;
+
+ rcu_reader_setup(&test_reader[i], id, &test);
+ if (pthread_create(&readers[i], NULL, *function,
+ &test_reader[i])) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ struct rcu_reader_struct *this = &test_reader[i];
+ int add = this->id + this->add;
+
+ /* Remove add entries from the tree for later addition */
+ mtree_store_range(mt, test.index[add], test.last[add],
+ NULL, GFP_KERNEL);
+ }
+
+ mt_set_in_rcu(mt);
+ do {
+ usleep(5);
+ } while (test.thread_count > ARRAY_SIZE(readers));
+
+ if (forward)
+ rcu_stress_fwd(mt, &test, count, test_reader);
+ else
+ rcu_stress_rev(mt, &test, count, test_reader);
+
+ test.stop = true;
+ while (count--)
+ pthread_join(readers[count], NULL);
+
+ mt_validate(mt);
+}
+
+
+struct rcu_test_struct {
+ struct maple_tree *mt; /* the maple tree */
+ int count; /* Number of times to check value(s) */
+ unsigned long index; /* The first index to check */
+ void *entry1; /* The first entry value */
+ void *entry2; /* The second entry value */
+ void *entry3; /* The third entry value */
+
+ bool update_2;
+ bool update_3;
+ unsigned long range_start;
+ unsigned long range_end;
+ unsigned int loop_sleep;
+ unsigned int val_sleep;
+
+ unsigned int failed; /* failed detection for other threads */
+ unsigned int seen_entry2; /* Number of threads that have seen the new value */
+ unsigned int seen_entry3; /* Number of threads that have seen the new value */
+ unsigned int seen_both; /* Number of threads that have seen both new values */
+ unsigned int seen_toggle;
+ unsigned int seen_added;
+ unsigned int seen_removed;
+ unsigned long last; /* The end of the range to write. */
+
+ unsigned long removed; /* The index of the removed entry */
+ unsigned long added; /* The index of the removed entry */
+ unsigned long toggle; /* The index of the removed entry */
+};
+
+static inline
+int eval_rcu_entry(struct rcu_test_struct *test, void *entry, bool *update_2,
+ bool *update_3)
+{
+ if (entry == test->entry1)
+ return 0;
+
+ if (entry == test->entry2) {
+ if (!(*update_2)) {
+ uatomic_inc(&test->seen_entry2);
+ *update_2 = true;
+ if (update_3)
+ uatomic_inc(&test->seen_both);
+ }
+ return 0;
+ }
+
+ if (entry == test->entry3) {
+ if (!(*update_3)) {
+ uatomic_inc(&test->seen_entry3);
+ *update_3 = true;
+ if (update_2)
+ uatomic_inc(&test->seen_both);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * rcu_val() - Read a given value in the tree test->count times using the
+ * regular API
+ *
+ * @ptr: The pointer to the rcu_test_struct
+ */
+static void *rcu_val(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct *)ptr;
+ unsigned long count = test->count;
+ bool update_2 = false;
+ bool update_3 = false;
+ void *entry;
+
+ rcu_register_thread();
+ while (count--) {
+ usleep(test->val_sleep);
+ /*
+ * No locking required, regular API locking is handled in the
+ * maple tree code
+ */
+ entry = mtree_load(test->mt, test->index);
+ MT_BUG_ON(test->mt, eval_rcu_entry(test, entry, &update_2,
+ &update_3));
+ }
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/*
+ * rcu_loop() - Loop over a section of the maple tree, checking for an expected
+ * value using the advanced API
+ *
+ * @ptr - The pointer to the rcu_test_struct
+ */
+static void *rcu_loop(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct *)ptr;
+ unsigned long count = test->count;
+ void *entry, *expected;
+ bool update_2 = false;
+ bool update_3 = false;
+ MA_STATE(mas, test->mt, test->range_start, test->range_start);
+
+ rcu_register_thread();
+
+ /*
+ * Loop through the test->range_start - test->range_end test->count
+ * times
+ */
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, test->range_end) {
+ /* The expected value is based on the start range. */
+ expected = xa_mk_value(mas.index ? mas.index / 10 : 0);
+
+ /* Out of the interesting range */
+ if (mas.index < test->index || mas.index > test->last) {
+ if (entry != expected) {
+ printk("%lx - %lx = %p not %p\n",
+ mas.index, mas.last, entry, expected);
+ }
+ MT_BUG_ON(test->mt, entry != expected);
+ continue;
+ }
+
+ if (entry == expected)
+ continue; /* Not seen. */
+
+ /* In the interesting range */
+ MT_BUG_ON(test->mt, eval_rcu_entry(test, entry,
+ &update_2,
+ &update_3));
+ }
+ rcu_read_unlock();
+ mas_set(&mas, test->range_start);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static noinline
+void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals)
+{
+
+ int i;
+ void *(*function)(void *);
+ pthread_t readers[20];
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i % 2)
+ function = rcu_loop;
+ else
+ function = rcu_val;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5); /* small yield to ensure all threads are at least started. */
+ mtree_store_range(mt, vals->index, vals->last, vals->entry2,
+ GFP_KERNEL);
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ /* Make sure the test caught at least one update. */
+ MT_BUG_ON(mt, !vals->seen_entry2);
+}
+
+static void *rcu_slot_store_reader(void *ptr)
+{
+ struct rcu_test_struct3 *test = ptr;
+ MA_STATE(mas, test->mt, test->index, test->index);
+
+ rcu_register_thread();
+
+ rcu_read_lock();
+ while (!test->stop) {
+ mas_walk(&mas);
+ /* The length of growth to both sides must be equal. */
+ RCU_MT_BUG_ON(test, (test->index - mas.index) !=
+ (mas.last - test->last));
+ }
+ rcu_read_unlock();
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static noinline void run_check_rcu_slot_store(struct maple_tree *mt)
+{
+ pthread_t readers[20];
+ int range_cnt = 200, i, limit = 10000;
+ unsigned long len = ULONG_MAX / range_cnt, start, end;
+ struct rcu_test_struct3 test = {.stop = false, .mt = mt};
+
+ start = range_cnt / 2 * len;
+ end = start + len - 1;
+ test.index = start;
+ test.last = end;
+
+ for (i = 0; i < range_cnt; i++) {
+ mtree_store_range(mt, i * len, i * len + len - 1,
+ xa_mk_value(i * 100), GFP_KERNEL);
+ }
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (pthread_create(&readers[i], NULL, rcu_slot_store_reader,
+ &test)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5);
+
+ while (limit--) {
+ /* Step by step, expand the most middle range to both sides. */
+ mtree_store_range(mt, --start, ++end, xa_mk_value(100),
+ GFP_KERNEL);
+ }
+
+ test.stop = true;
+
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ mt_validate(mt);
+}
+
+static noinline
+void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
+{
+
+ int i;
+ void *(*function)(void *);
+ pthread_t readers[30];
+ unsigned int index = vals->index;
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i % 2)
+ function = rcu_loop;
+ else
+ function = rcu_val;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(3); /* small yield to ensure all threads are at least started. */
+
+ while (index <= vals->last) {
+ mtree_store(mt, index,
+ (index % 2 ? vals->entry2 : vals->entry3),
+ GFP_KERNEL);
+ index++;
+ usleep(2);
+ }
+
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ /* Make sure the test caught at least one update. */
+ MT_BUG_ON(mt, !vals->seen_entry2);
+ MT_BUG_ON(mt, !vals->seen_entry3);
+ MT_BUG_ON(mt, !vals->seen_both);
+}
+
+static noinline void __init check_rcu_simulated(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 1000;
+ unsigned long target = 4320;
+ unsigned long val = 0xDEAD;
+
+ MA_STATE(mas_writer, mt, 0, 0);
+ MA_STATE(mas_reader, mt, target, target);
+
+ rcu_register_thread();
+
+ mt_set_in_rcu(mt);
+ mas_lock(&mas_writer);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_writer.index = i * 10;
+ mas_writer.last = i * 10 + 5;
+ mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL);
+ }
+ mas_unlock(&mas_writer);
+
+ /* Overwrite one entry with a new value. */
+ mas_set_range(&mas_writer, target, target + 5);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+
+ /* Overwrite 1/2 the entry */
+ mas_set_range(&mas_writer, target, target + 2);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite last 1/2 the entry */
+ mas_set_range(&mas_writer, target + 2, target + 5);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ rcu_read_unlock();
+
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite more than the entry */
+ mas_set_range(&mas_writer, target - 5, target + 15);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite more than the node. */
+ mas_set_range(&mas_writer, target - 400, target + 400);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite the tree */
+ mas_set_range(&mas_writer, 0, ULONG_MAX);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Clear out tree & recreate it */
+ mas_lock(&mas_writer);
+ mas_set_range(&mas_writer, 0, ULONG_MAX);
+ mas_store_gfp(&mas_writer, NULL, GFP_KERNEL);
+ mas_set_range(&mas_writer, 0, 0);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_writer.index = i * 10;
+ mas_writer.last = i * 10 + 5;
+ mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL);
+ }
+ mas_unlock(&mas_writer);
+
+ /* next check */
+ /* Overwrite one entry with a new value. */
+ mas_reset(&mas_reader);
+ mas_set_range(&mas_writer, target, target + 5);
+ mas_set_range(&mas_reader, target, target);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_prev(&mas_reader, 0);
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_next(&mas_reader, ULONG_MAX) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+
+ /* prev check */
+ /* Overwrite one entry with a new value. */
+ mas_reset(&mas_reader);
+ mas_set_range(&mas_writer, target, target + 5);
+ mas_set_range(&mas_reader, target, target);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_next(&mas_reader, ULONG_MAX);
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_prev(&mas_reader, 0) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ rcu_unregister_thread();
+}
+
+static noinline void __init check_rcu_threaded(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 1000;
+ struct rcu_test_struct vals;
+
+ vals.val_sleep = 200;
+ vals.loop_sleep = 110;
+
+ rcu_register_thread();
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+ /* Store across several slots. */
+ vals.count = 1000;
+ vals.mt = mt;
+ vals.index = 8650;
+ vals.last = 8666;
+ vals.entry1 = xa_mk_value(865);
+ vals.entry2 = xa_mk_value(8650);
+ vals.entry3 = xa_mk_value(8650);
+ vals.range_start = 0;
+ vals.range_end = ULONG_MAX;
+ vals.seen_entry2 = 0;
+ vals.seen_entry3 = 0;
+
+ run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* 4390-4395: value 439 (0x1b7) [0x36f] */
+ /* Store across several slots. */
+ /* Spanning store. */
+ vals.count = 10000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.entry3 = xa_mk_value(439);
+ vals.seen_entry2 = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+
+ /* Check expanding range in RCU mode */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ run_check_rcu_slot_store(mt);
+ mtree_destroy(mt);
+
+ /* Forward writer for rcu stress */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ rcu_stress(mt, true);
+ mtree_destroy(mt);
+
+ /* Reverse writer for rcu stress */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ rcu_stress(mt, false);
+ mtree_destroy(mt);
+
+ /* Slow reader test with spanning store. */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* 4390-4395: value 439 (0x1b7) [0x36f] */
+ /* Store across several slots. */
+ /* Spanning store. */
+ vals.count = 15000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.entry3 = xa_mk_value(4391);
+ vals.seen_toggle = 0;
+ vals.seen_added = 0;
+ vals.seen_removed = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ vals.removed = 4360;
+ vals.added = 4396;
+ vals.toggle = 4347;
+ vals.val_sleep = 400;
+ vals.loop_sleep = 200;
+ vals.seen_entry2 = 0;
+ vals.seen_entry3 = 0;
+ vals.seen_both = 0;
+ vals.entry3 = xa_mk_value(438);
+
+ run_check_rcu_slowread(mt, &vals);
+ rcu_unregister_thread();
+}
+/* End of RCU stress testing */
+
+/* Check tree structure by depth first searching */
+static void mas_dfs_preorder(struct ma_state *mas)
+{
+
+ struct maple_enode *prev;
+ unsigned char end, slot = 0;
+ unsigned long *pivots;
+
+ if (mas->status == ma_start) {
+ mas_start(mas);
+ return;
+ }
+
+ if (mte_is_leaf(mas->node) && mte_is_root(mas->node))
+ goto done;
+
+walk_up:
+ end = mas_data_end(mas);
+ if (mte_is_leaf(mas->node) ||
+ (slot > end)) {
+ if (mte_is_root(mas->node))
+ goto done;
+
+ slot = mte_parent_slot(mas->node) + 1;
+ mas_ascend(mas);
+ goto walk_up;
+ }
+
+ prev = mas->node;
+ mas->node = mas_get_slot(mas, slot);
+ if (!mas->node || slot > end) {
+ if (mte_is_root(prev))
+ goto done;
+
+ mas->node = prev;
+ slot = mte_parent_slot(mas->node) + 1;
+ mas_ascend(mas);
+ goto walk_up;
+ }
+ pivots = ma_pivots(mte_to_node(prev), mte_node_type(prev));
+ mas->max = mas_safe_pivot(mas, pivots, slot, mte_node_type(prev));
+ mas->min = mas_safe_min(mas, pivots, slot);
+
+ return;
+done:
+ mas->status = ma_none;
+}
+
+
+static void check_dfs_preorder(struct maple_tree *mt)
+{
+ unsigned long e, count = 0, max = 1000;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ if (MAPLE_32BIT)
+ e = 37;
+ else
+ e = 74;
+
+ check_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ MT_BUG_ON(mt, count != e);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_reset(&mas);
+ count = 0;
+ if (!MAPLE_32BIT)
+ e = 77;
+
+ check_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ /*printk("count %lu\n", count); */
+ MT_BUG_ON(mt, count != e);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_reset(&mas);
+ count = 0;
+ check_rev_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ /*printk("count %lu\n", count); */
+ MT_BUG_ON(mt, count != e);
+ mtree_destroy(mt);
+
+ rcu_barrier();
+ /*
+ * pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n",
+ * max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ * mt_nr_tallocated());
+ */
+
+}
+/* End of depth first search tests */
+
+/* get height of the lowest non-leaf node with free space */
+static unsigned char get_vacant_height(struct ma_wr_state *wr_mas, void *entry)
+{
+ struct ma_state *mas = wr_mas->mas;
+ char vacant_height = 0;
+ enum maple_type type;
+ unsigned long *pivots;
+ unsigned long min = 0;
+ unsigned long max = ULONG_MAX;
+ unsigned char offset;
+
+ /* start traversal */
+ mas_reset(mas);
+ mas_start(mas);
+ if (!xa_is_node(mas_root(mas)))
+ return 0;
+
+ type = mte_node_type(mas->node);
+ wr_mas->type = type;
+ while (!ma_is_leaf(type)) {
+ mas_node_walk(mas, mte_to_node(mas->node), type, &min, &max);
+ offset = mas->offset;
+ mas->end = mas_data_end(mas);
+ pivots = ma_pivots(mte_to_node(mas->node), type);
+
+ if (pivots) {
+ if (offset)
+ min = pivots[mas->offset - 1];
+ if (offset < mas->end)
+ max = pivots[mas->offset];
+ }
+ wr_mas->r_max = offset < mas->end ? pivots[offset] : mas->max;
+
+ /* detect spanning write */
+ if (mas_is_span_wr(wr_mas))
+ break;
+
+ if (mas->end < mt_slot_count(mas->node) - 1)
+ vacant_height = mas->depth + 1;
+
+ mas_descend(mas);
+ type = mte_node_type(mas->node);
+ mas->depth++;
+ }
+
+ return vacant_height;
+}
+
+static int mas_allocated(struct ma_state *mas)
+{
+ int total = 0;
+
+ if (mas->alloc)
+ total++;
+
+ if (mas->sheaf)
+ total += kmem_cache_sheaf_size(mas->sheaf);
+
+ return total;
+}
+/* Preallocation testing */
+static noinline void __init check_prealloc(struct maple_tree *mt)
+{
+ unsigned long i, max = 100;
+ unsigned long allocated;
+ unsigned char height;
+ unsigned char vacant_height;
+ struct maple_node *mn;
+ void *ptr = check_prealloc;
+ MA_STATE(mas, mt, 10, 20);
+ MA_WR_STATE(wr_mas, &mas, ptr);
+
+ mt_set_non_kernel(1000);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ /* Spanning store */
+ mas_set_range(&mas, 470, 500);
+
+ mas_wr_preallocate(&wr_mas, ptr);
+ MT_BUG_ON(mt, mas.store_type != wr_spanning_store);
+ MT_BUG_ON(mt, mas_is_err(&mas));
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ mas_wr_preallocate(&wr_mas, ptr);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mn->parent = ma_parent_ptr(mn);
+ ma_free_rcu(mn);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ /* Slot store does not need allocations */
+ mas_set_range(&mas, 6, 9);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ mas_set_range(&mas, 6, 10);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ /* Split */
+ mas_set_range(&mas, 54, 54);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 2);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mt_set_non_kernel(1);
+ /* Spanning store */
+ mas_set_range(&mas, 1, 100);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas_destroy(&mas);
+
+
+ /* Spanning store */
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ vacant_height = get_vacant_height(&wr_mas, ptr);
+ MT_BUG_ON(mt, allocated == 0);
+ /*
+ * vacant height cannot be used to compute the number of nodes needed
+ * as the root contains two entries which means it is on the verge of
+ * insufficiency. The worst case full height of the tree is needed.
+ */
+ MT_BUG_ON(mt, allocated != height * 3 + 1);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mas_set_range(&mas, 0, 200);
+ mt_set_non_kernel(1);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ /* Chaining multiple preallocations */
+ mt_set_in_rcu(mt);
+ mas_set_range(&mas, 800, 805); /* Slot store, should be 0 allocations */
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas.last = 809; /* Node store */
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 1);
+ mas_store_prealloc(&mas, ptr);
+}
+/* End of preallocation testing */
+
+/* Spanning writes, writes that span nodes and layers of the tree */
+static noinline void __init check_spanning_write(struct maple_tree *mt)
+{
+ unsigned long i, max = 5000;
+ MA_STATE(mas, mt, 1200, 2380);
+
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ for (i = 1; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_set_range(&mas, 9, 50006); /* Will expand to 0 - ULONG_MAX */
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a right cousin rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 12900); /* Spans more than 2 levels */
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test non-alloc tree spanning store */
+ mt_init_flags(mt, 0);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 300);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 15);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a right sibling rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 12865);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 15);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a left sibling rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 90, 13665);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 95);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a left cousin rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 46805, 49995);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 46815);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Test spanning store that requires a left cousin rebalance all the way
+ * to root
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 32395, 49995);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 46815);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Test spanning store that requires a right cousin rebalance all the
+ * way to root
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mas_set_range(&mas, 38875, 43190);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 38900);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store ending at full node (depth 2)*/
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mtree_lock(mt);
+ mas_set(&mas, 47606);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47607);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47608);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47609);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ /* Ensure the parent node is full */
+ mas_ascend(&mas);
+ MT_BUG_ON(mt, (mas_data_end(&mas)) != mt_slot_count(mas.node) - 1);
+ mas_set_range(&mas, 11516, 48940);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning write with many levels of no siblings */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mas_set_range(&mas, 43200, 49999);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 43200);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 100; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_set_range(&mas, 76, 875);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mtree_unlock(mt);
+}
+/* End of spanning write testing */
+
+/* Writes to a NULL area that are adjacent to other NULLs */
+static noinline void __init check_null_expand(struct maple_tree *mt)
+{
+ unsigned long i, max = 100;
+ unsigned char data_end;
+ MA_STATE(mas, mt, 959, 959);
+
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ /* Test expanding null at start. */
+ mas_lock(&mas);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 959, 963);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 963) != NULL);
+ MT_BUG_ON(mt, data_end != mas_data_end(&mas));
+
+ /* Test expanding null at end. */
+ mas_set(&mas, 880);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 884, 887);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 884) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 889) != NULL);
+#if CONFIG_64BIT
+ MT_BUG_ON(mt, data_end != mas_data_end(&mas));
+#endif
+
+ /* Test expanding null at start and end. */
+ mas_set(&mas, 890);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 900, 905);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 899) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 900) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 905) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 906) != NULL);
+#if CONFIG_64BIT
+ MT_BUG_ON(mt, data_end - 2 != mas_data_end(&mas));
+#endif
+
+ /* Test expanding null across multiple slots. */
+ mas_set(&mas, 800);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 810, 825);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 809) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 810) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 825) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 826) != NULL);
+#if CONFIG_64BIT
+ MT_BUG_ON(mt, data_end - 4 != mas_data_end(&mas));
+#endif
+ mas_unlock(&mas);
+}
+/* End of NULL area expansions */
+
+/* Checking for no memory is best done outside the kernel */
+static noinline void __init check_nomem(struct maple_tree *mt)
+{
+ MA_STATE(ms, mt, 1, 1);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ /* Ensure no bypassing of allocation failures */
+ mt_set_non_kernel(0);
+
+ /* Storing something at 1 requires memory allocation */
+ MT_BUG_ON(mt, mtree_insert(mt, 1, &ms, GFP_ATOMIC) != -ENOMEM);
+ /* Storing something at 0 does not */
+ MT_BUG_ON(mt, mtree_insert(mt, 0, &ms, GFP_ATOMIC) != 0);
+
+ /*
+ * Simulate two threads racing; the first one fails to allocate
+ * memory to insert an entry at 1, then the second one succeeds
+ * in allocating memory to insert an entry at 2. The first one
+ * then needs to free the node it allocated. LeakSanitizer will
+ * notice this, as will the 'nr_allocated' debugging aid in the
+ * userspace test suite.
+ */
+ mtree_lock(mt);
+ mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */
+ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM));
+ mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
+ MT_BUG_ON(mt, ms.status != ma_start);
+ mtree_unlock(mt);
+ MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0);
+ mtree_lock(mt);
+ mas_store(&ms, &ms); /* insert 1 -> &ms */
+ mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
+ mtree_unlock(mt);
+ mas_destroy(&ms);
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_locky(struct maple_tree *mt)
+{
+ MA_STATE(ms, mt, 2, 2);
+ MA_STATE(reader, mt, 2, 2);
+
+ mt_set_non_kernel(2);
+ mt_set_in_rcu(mt);
+ mas_lock(&ms);
+ mas_store(&ms, &ms);
+ mas_set_range(&ms, 1, 3);
+ mas_store(&ms, &reader);
+ mas_unlock(&ms);
+ mt_clear_in_rcu(mt);
+}
+
+/*
+ * Compares two nodes except for the addresses stored in the nodes.
+ * Returns zero if they are the same, otherwise returns non-zero.
+ */
+static int __init compare_node(struct maple_enode *enode_a,
+ struct maple_enode *enode_b)
+{
+ struct maple_node *node_a, *node_b;
+ struct maple_node a, b;
+ void **slots_a, **slots_b; /* Do not use the rcu tag. */
+ enum maple_type type;
+ int i;
+
+ if (((unsigned long)enode_a & MAPLE_NODE_MASK) !=
+ ((unsigned long)enode_b & MAPLE_NODE_MASK)) {
+ pr_err("The lower 8 bits of enode are different.\n");
+ return -1;
+ }
+
+ type = mte_node_type(enode_a);
+ node_a = mte_to_node(enode_a);
+ node_b = mte_to_node(enode_b);
+ a = *node_a;
+ b = *node_b;
+
+ /* Do not compare addresses. */
+ if (ma_is_root(node_a) || ma_is_root(node_b)) {
+ a.parent = (struct maple_pnode *)((unsigned long)a.parent &
+ MA_ROOT_PARENT);
+ b.parent = (struct maple_pnode *)((unsigned long)b.parent &
+ MA_ROOT_PARENT);
+ } else {
+ a.parent = (struct maple_pnode *)((unsigned long)a.parent &
+ MAPLE_NODE_MASK);
+ b.parent = (struct maple_pnode *)((unsigned long)b.parent &
+ MAPLE_NODE_MASK);
+ }
+
+ if (a.parent != b.parent) {
+ pr_err("The lower 8 bits of parents are different. %p %p\n",
+ a.parent, b.parent);
+ return -1;
+ }
+
+ /*
+ * If it is a leaf node, the slots do not contain the node address, and
+ * no special processing of slots is required.
+ */
+ if (ma_is_leaf(type))
+ goto cmp;
+
+ slots_a = ma_slots(&a, type);
+ slots_b = ma_slots(&b, type);
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ if (!slots_a[i] && !slots_b[i])
+ break;
+
+ if (!slots_a[i] || !slots_b[i]) {
+ pr_err("The number of slots is different.\n");
+ return -1;
+ }
+
+ /* Do not compare addresses in slots. */
+ ((unsigned long *)slots_a)[i] &= MAPLE_NODE_MASK;
+ ((unsigned long *)slots_b)[i] &= MAPLE_NODE_MASK;
+ }
+
+cmp:
+ /*
+ * Compare all contents of two nodes, including parent (except address),
+ * slots (except address), pivots, gaps and metadata.
+ */
+ return memcmp(&a, &b, sizeof(struct maple_node));
+}
+
+/*
+ * Compare two trees and return 0 if they are the same, non-zero otherwise.
+ */
+static int __init compare_tree(struct maple_tree *mt_a, struct maple_tree *mt_b)
+{
+ MA_STATE(mas_a, mt_a, 0, 0);
+ MA_STATE(mas_b, mt_b, 0, 0);
+
+ if (mt_a->ma_flags != mt_b->ma_flags) {
+ pr_err("The flags of the two trees are different.\n");
+ return -1;
+ }
+
+ mas_dfs_preorder(&mas_a);
+ mas_dfs_preorder(&mas_b);
+
+ if (mas_is_ptr(&mas_a) || mas_is_ptr(&mas_b)) {
+ if (!(mas_is_ptr(&mas_a) && mas_is_ptr(&mas_b))) {
+ pr_err("One is ma_root and the other is not.\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ while (!mas_is_none(&mas_a) || !mas_is_none(&mas_b)) {
+
+ if (mas_is_none(&mas_a) || mas_is_none(&mas_b)) {
+ pr_err("One is ma_none and the other is not.\n");
+ return -1;
+ }
+
+ if (mas_a.min != mas_b.min ||
+ mas_a.max != mas_b.max) {
+ pr_err("mas->min, mas->max do not match.\n");
+ return -1;
+ }
+
+ if (compare_node(mas_a.node, mas_b.node)) {
+ pr_err("The contents of nodes %p and %p are different.\n",
+ mas_a.node, mas_b.node);
+ mt_dump(mt_a, mt_dump_dec);
+ mt_dump(mt_b, mt_dump_dec);
+ return -1;
+ }
+
+ mas_dfs_preorder(&mas_a);
+ mas_dfs_preorder(&mas_b);
+ }
+
+ return 0;
+}
+
+static __init void mas_subtree_max_range(struct ma_state *mas)
+{
+ unsigned long limit = mas->max;
+ MA_STATE(newmas, mas->tree, 0, 0);
+ void *entry;
+
+ mas_for_each(mas, entry, limit) {
+ if (mas->last - mas->index >=
+ newmas.last - newmas.index) {
+ newmas = *mas;
+ }
+ }
+
+ *mas = newmas;
+}
+
+/*
+ * build_full_tree() - Build a full tree.
+ * @mt: The tree to build.
+ * @flags: Use @flags to build the tree.
+ * @height: The height of the tree to build.
+ *
+ * Build a tree with full leaf nodes and internal nodes. Note that the height
+ * should not exceed 3, otherwise it will take a long time to build.
+ * Return: zero if the build is successful, non-zero if it fails.
+ */
+static __init int build_full_tree(struct maple_tree *mt, unsigned int flags,
+ int height)
+{
+ MA_STATE(mas, mt, 0, 0);
+ unsigned long step;
+ int ret = 0, cnt = 1;
+ enum maple_type type;
+
+ mt_init_flags(mt, flags);
+ mtree_insert_range(mt, 0, ULONG_MAX, xa_mk_value(5), GFP_KERNEL);
+
+ mtree_lock(mt);
+
+ while (1) {
+ mas_set(&mas, 0);
+ if (mt_height(mt) < height) {
+ mas.max = ULONG_MAX;
+ goto store;
+ }
+
+ while (1) {
+ mas_dfs_preorder(&mas);
+ if (mas_is_none(&mas))
+ goto unlock;
+
+ type = mte_node_type(mas.node);
+ if (mas_data_end(&mas) + 1 < mt_slots[type]) {
+ mas_set(&mas, mas.min);
+ goto store;
+ }
+ }
+store:
+ mas_subtree_max_range(&mas);
+ step = mas.last - mas.index;
+ if (step < 1) {
+ ret = -1;
+ goto unlock;
+ }
+
+ step /= 2;
+ mas.last = mas.index + step;
+ mas_store_gfp(&mas, xa_mk_value(5),
+ GFP_KERNEL);
+ ++cnt;
+ }
+unlock:
+ mtree_unlock(mt);
+
+ MT_BUG_ON(mt, mt_height(mt) != height);
+ /* pr_info("height:%u number of elements:%d\n", mt_height(mt), cnt); */
+ return ret;
+}
+
+static noinline void __init check_mtree_dup(struct maple_tree *mt)
+{
+ DEFINE_MTREE(new);
+ int i, j, ret, count = 0;
+ unsigned int rand_seed = 17, rand;
+
+ /* store a value at [0, 0] */
+ mt_init_flags(mt, 0);
+ mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* The two trees have different attributes. */
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret != -EINVAL);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* The new tree is not empty */
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ mtree_store(&new, 5, xa_mk_value(5), GFP_KERNEL);
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret != -EINVAL);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* Test for duplicating full trees. */
+ for (i = 1; i <= 3; i++) {
+ ret = build_full_tree(mt, 0, i);
+ MT_BUG_ON(mt, ret);
+ mt_init_flags(&new, 0);
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ for (i = 1; i <= 3; i++) {
+ ret = build_full_tree(mt, MT_FLAGS_ALLOC_RANGE, i);
+ MT_BUG_ON(mt, ret);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* Test for normal duplicating. */
+ for (i = 0; i < 1000; i += 3) {
+ if (i & 1) {
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ } else {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ }
+
+ for (j = 0; j < i; j++) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+
+ ret = mtree_dup(mt, &new, GFP_KERNEL);
+ MT_BUG_ON(&new, ret);
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* Test memory allocation failed. */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i < 30; i += 3) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+
+ /* Failed at the first node. */
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(0);
+ ret = mtree_dup(mt, &new, GFP_NOWAIT);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(&new, ret != -ENOMEM);
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+
+ /* Random maple tree fails at a random node. */
+ for (i = 0; i < 1000; i += 3) {
+ if (i & 1) {
+ mt_init_flags(mt, 0);
+ mt_init_flags(&new, 0);
+ } else {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE);
+ }
+
+ for (j = 0; j < i; j++) {
+ mtree_store_range(mt, j * 10, j * 10 + 5,
+ xa_mk_value(j), GFP_KERNEL);
+ }
+ /*
+ * The rand() library function is not used, so we can generate
+ * the same random numbers on any platform.
+ */
+ rand_seed = rand_seed * 1103515245 + 12345;
+ rand = rand_seed / 65536 % 128;
+ mt_set_non_kernel(rand);
+
+ ret = mtree_dup(mt, &new, GFP_NOWAIT);
+ mt_set_non_kernel(0);
+ if (ret != 0) {
+ MT_BUG_ON(&new, ret != -ENOMEM);
+ count++;
+ mtree_destroy(mt);
+ continue;
+ }
+
+ mt_validate(&new);
+ if (compare_tree(mt, &new))
+ MT_BUG_ON(&new, 1);
+
+ mtree_destroy(mt);
+ mtree_destroy(&new);
+ }
+
+ /* pr_info("mtree_dup() fail %d times\n", count); */
+ BUG_ON(!count);
+}
+
+extern void test_kmem_cache_bulk(void);
+
+static inline void check_spanning_store_height(struct maple_tree *mt)
+{
+ int index = 0;
+ int last = 140;
+ MA_STATE(mas, mt, 0, 0);
+ mas_lock(&mas);
+ while (mt_height(mt) != 3) {
+ mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL);
+ mas_set(&mas, ++index);
+ }
+
+ if (MAPLE_32BIT)
+ last = 155; /* 32 bit higher branching factor. */
+
+ mas_set_range(&mas, 90, last);
+ mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL);
+ MT_BUG_ON(mt, mas_mt_height(&mas) != 2);
+ mas_unlock(&mas);
+}
+
+/*
+ * Test to check the path of a spanning rebalance which results in
+ * a collapse where the rebalancing of the child node leads to
+ * insufficieny in the parent node.
+ */
+static void check_collapsing_rebalance(struct maple_tree *mt)
+{
+ int i = 0;
+ MA_STATE(mas, mt, ULONG_MAX, ULONG_MAX);
+
+ /* create a height 6 tree */
+ while (mt_height(mt) < 6) {
+ mtree_store_range(mt, i, i + 10, xa_mk_value(i), GFP_KERNEL);
+ i += 9;
+ }
+
+ /* delete all entries one at a time, starting from the right */
+ do {
+ mas_erase(&mas);
+ } while (mas_prev(&mas, 0) != NULL);
+
+ mtree_unlock(mt);
+}
+
+/* callback function used for check_nomem_writer_race() */
+static void writer2(void *maple_tree)
+{
+ struct maple_tree *mt = (struct maple_tree *)maple_tree;
+ MA_STATE(mas, mt, 6, 10);
+
+ mtree_lock(mas.tree);
+ mas_store(&mas, xa_mk_value(0xC));
+ mas_destroy(&mas);
+ mtree_unlock(mas.tree);
+}
+
+/*
+ * check_nomem_writer_race() - test a possible race in the mas_nomem() path
+ * @mt: The tree to build.
+ *
+ * There is a possible race condition in low memory conditions when mas_nomem()
+ * gives up its lock. A second writer can chagne the entry that the primary
+ * writer executing the mas_nomem() path is modifying. This test recreates this
+ * scenario to ensure we are handling it correctly.
+ */
+static void check_nomem_writer_race(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 5);
+
+ mt_set_non_kernel(0);
+ /* setup root with 2 values with NULL in between */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
+ mtree_lock(mt);
+ /* erase 0-5 */
+ mas_erase(&mas);
+
+ /* index 6-10 should retain the value from writer 2 */
+ check_load(mt, 6, xa_mk_value(0xC));
+ mtree_unlock(mt);
+
+ mt_set_non_kernel(0);
+ /* test for the same race but with mas_store_gfp() */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+
+ mas_set_range(&mas, 0, 5);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+
+ /* ensure write made by writer 2 is retained */
+ check_load(mt, 6, xa_mk_value(0xC));
+
+ mt_set_private(NULL);
+ mt_set_callback(NULL);
+ mtree_unlock(mt);
+}
+
+ /* test to simulate expanding a vma from [0x7fffffffe000, 0x7ffffffff000)
+ * to [0x7ffde4ca1000, 0x7ffffffff000) and then shrinking the vma to
+ * [0x7ffde4ca1000, 0x7ffde4ca2000)
+ */
+static inline int check_vma_modification(struct maple_tree *mt)
+{
+#if defined(CONFIG_64BIT)
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_lock(mt);
+ /* vma with old start and old end */
+ __mas_set_range(&mas, 0x7fffffffe000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
+ mas_store_prealloc(&mas, xa_mk_value(1));
+
+ /* next write occurs partly in previous range [0, 0x7fffffffe000)*/
+ mas_prev_range(&mas, 0);
+ /* expand vma to {0x7ffde4ca1000, 0x7ffffffff000) */
+ __mas_set_range(&mas, 0x7ffde4ca1000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
+ mas_store_prealloc(&mas, xa_mk_value(1));
+
+ /* shrink vma to [0x7ffde4ca1000, 7ffde4ca2000) */
+ __mas_set_range(&mas, 0x7ffde4ca2000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, NULL, GFP_KERNEL);
+ mas_store_prealloc(&mas, NULL);
+ mt_dump(mt, mt_dump_hex);
+
+ mas_destroy(&mas);
+ mtree_unlock(mt);
+#endif
+
+ return 0;
+}
+
+
+void farmer_tests(void)
+{
+ struct maple_node *node;
+ DEFINE_MTREE(tree);
+
+ mt_dump(&tree, mt_dump_dec);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU);
+ check_vma_modification(&tree);
+ mtree_destroy(&tree);
+
+ tree.ma_root = xa_mk_value(0);
+ mt_dump(&tree, mt_dump_dec);
+
+ node = mt_alloc_one(GFP_KERNEL);
+ node->parent = (void *)((unsigned long)(&tree) | 1);
+ node->slot[0] = xa_mk_value(0);
+ node->slot[1] = xa_mk_value(1);
+ node->mr64.pivot[0] = 0;
+ node->mr64.pivot[1] = 1;
+ node->mr64.pivot[2] = 0;
+ tree.ma_root = mt_mk_node(node, maple_leaf_64);
+ mt_dump(&tree, mt_dump_dec);
+
+ node->parent = ma_parent_ptr(node);
+ ma_free_rcu(node);
+
+ /* Check things that will make lockdep angry */
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_locky(&tree);
+ mtree_destroy(&tree);
+ test_kmem_cache_bulk();
+
+ mt_init_flags(&tree, 0);
+ check_dfs_preorder(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
+ check_nomem_writer_race(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_prealloc(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_write(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_store_height(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_collapsing_rebalance(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_null_expand(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_mtree_dup(&tree);
+ mtree_destroy(&tree);
+
+ /* RCU testing */
+ mt_init_flags(&tree, 0);
+ check_erase_testset(&tree);
+ mtree_destroy(&tree);
+
+ if (!MAPLE_32BIT) {
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rcu_simulated(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rcu_threaded(&tree);
+ mtree_destroy(&tree);
+ }
+
+
+#if defined(CONFIG_64BIT)
+ /* Captures from VMs that found previous errors */
+ mt_init_flags(&tree, 0);
+ check_erase2_sets(&tree);
+ mtree_destroy(&tree);
+#endif
+
+
+ /* No memory handling */
+ check_nomem(&tree);
+}
+
+static unsigned long get_last_index(struct ma_state *mas)
+{
+ struct maple_node *node = mas_mn(mas);
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long last_index = mas_data_end(mas);
+
+ BUG_ON(last_index == 0);
+
+ return pivots[last_index - 1] + 1;
+}
+
+/*
+ * Assert that we handle spanning stores that consume the entirety of the right
+ * leaf node correctly.
+ */
+static void test_spanning_store_regression(void)
+{
+ unsigned long from = 0, to = 0;
+ DEFINE_MTREE(tree);
+ MA_STATE(mas, &tree, 0, 0);
+
+ /*
+ * Build a 3-level tree. We require a parent node below the root node
+ * and 2 leaf nodes under it, so we can span the entirety of the right
+ * hand node.
+ */
+ build_full_tree(&tree, 0, 3);
+
+ /* Descend into position at depth 2. */
+ mas_reset(&mas);
+ mas_start(&mas);
+ mas_descend(&mas);
+ mas_descend(&mas);
+
+ /*
+ * We need to establish a tree like the below.
+ *
+ * Then we can try a store in [from, to] which results in a spanned
+ * store across nodes B and C, with the maple state at the time of the
+ * write being such that only the subtree at A and below is considered.
+ *
+ * Height
+ * 0 Root Node
+ * / \
+ * pivot = to / \ pivot = ULONG_MAX
+ * / \
+ * 1 A [-----] ...
+ * / \
+ * pivot = from / \ pivot = to
+ * / \
+ * 2 (LEAVES) B [-----] [-----] C
+ * ^--- Last pivot to.
+ */
+ while (true) {
+ unsigned long tmp = get_last_index(&mas);
+
+ if (mas_next_sibling(&mas)) {
+ from = tmp;
+ to = mas.max;
+ } else {
+ break;
+ }
+ }
+
+ BUG_ON(from == 0 && to == 0);
+
+ /* Perform the store. */
+ mas_set_range(&mas, from, to);
+ mas_store_gfp(&mas, xa_mk_value(0xdead), GFP_KERNEL);
+
+ /* If the regression occurs, the validation will fail. */
+ mt_validate(&tree);
+
+ /* Cleanup. */
+ __mt_destroy(&tree);
+}
+
+static void regression_tests(void)
+{
+ test_spanning_store_regression();
+}
+
+void maple_tree_tests(void)
+{
+#if !defined(BENCH)
+ regression_tests();
+ farmer_tests();
+#endif
+ maple_tree_seed();
+ maple_tree_harvest();
+}
+
+int __weak main(void)
+{
+ maple_tree_init();
+ maple_tree_tests();
+ rcu_barrier();
+ if (nr_allocated)
+ printf("nr_allocated = %d\n", nr_allocated);
+ return 0;
+}
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 9eae0fb5a67d..eaff1b036989 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
item_kill_tree(xa);
}
-bool stop_iteration = false;
+bool stop_iteration;
static void *creator_func(void *ptr)
{
@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
pthread_t worker_thread[num_threads];
int i;
+ stop_iteration = false;
pthread_create(&worker_thread[0], NULL, &creator_func, xa);
for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
@@ -211,6 +212,65 @@ static void multiorder_iteration_race(struct xarray *xa)
item_kill_tree(xa);
}
+static void *load_creator(void *ptr)
+{
+ /* 'order' is set up to ensure we have sibling entries */
+ unsigned int order;
+ struct radix_tree_root *tree = ptr;
+ int i;
+
+ rcu_register_thread();
+ item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
+ item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
+ for (i = 0; i < 10000; i++) {
+ for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
+ (1 << order);
+ item_insert_order(tree, index, order);
+ xa_set_mark(tree, index, XA_MARK_1);
+ item_delete_rcu(tree, index);
+ }
+ }
+ rcu_unregister_thread();
+
+ stop_iteration = true;
+ return NULL;
+}
+
+static void *load_worker(void *ptr)
+{
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
+
+ rcu_register_thread();
+ while (!stop_iteration) {
+ unsigned long find_index = (2 << RADIX_TREE_MAP_SHIFT) + 1;
+ struct item *item = xa_load(ptr, index);
+ assert(!xa_is_internal(item));
+ item = xa_find(ptr, &find_index, index, XA_MARK_1);
+ assert(!xa_is_internal(item));
+ }
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+static void load_race(struct xarray *xa)
+{
+ const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
+ pthread_t worker_thread[num_threads];
+ int i;
+
+ stop_iteration = false;
+ pthread_create(&worker_thread[0], NULL, &load_creator, xa);
+ for (i = 1; i < num_threads; i++)
+ pthread_create(&worker_thread[i], NULL, &load_worker, xa);
+
+ for (i = 0; i < num_threads; i++)
+ pthread_join(worker_thread[i], NULL);
+
+ item_kill_tree(xa);
+}
+
static DEFINE_XARRAY(array);
void multiorder_checks(void)
@@ -218,13 +278,23 @@ void multiorder_checks(void)
multiorder_iteration(&array);
multiorder_tagged_iteration(&array);
multiorder_iteration_race(&array);
+ load_race(&array);
radix_tree_cpu_dead(0);
}
-int __weak main(void)
+int __weak main(int argc, char **argv)
{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "ls:v")) != -1) {
+ if (opt == 'v')
+ test_verbose++;
+ }
+
+ rcu_register_thread();
radix_tree_init();
multiorder_checks();
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index a61c7bcbc72d..63f468bf8245 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -177,7 +177,7 @@ void regression1_test(void)
nr_threads = 2;
pthread_barrier_init(&worker_barrier, NULL, nr_threads);
- threads = malloc(nr_threads * sizeof(pthread_t *));
+ threads = malloc(nr_threads * sizeof(*threads));
for (i = 0; i < nr_threads; i++) {
arg = i;
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
index e61e43efe463..253208a8541b 100644
--- a/tools/testing/radix-tree/xarray.c
+++ b/tools/testing/radix-tree/xarray.c
@@ -4,16 +4,9 @@
* Copyright (c) 2018 Matthew Wilcox <willy@infradead.org>
*/
-#define XA_DEBUG
+#include "xarray-shared.h"
#include "test.h"
-#define module_init(x)
-#define module_exit(x)
-#define MODULE_AUTHOR(x)
-#define MODULE_LICENSE(x)
-#define dump_stack() assert(0)
-
-#include "../../../lib/xarray.c"
#undef XA_DEBUG
#include "../../../lib/test_xarray.c"
@@ -25,11 +18,13 @@ void xarray_tests(void)
int __weak main(void)
{
+ rcu_register_thread();
radix_tree_init();
xarray_tests();
radix_tree_cpu_dead(1);
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/rbtree/Makefile b/tools/testing/rbtree/Makefile
new file mode 100644
index 000000000000..d7bbae2af4c7
--- /dev/null
+++ b/tools/testing/rbtree/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+.PHONY: clean
+
+TARGETS = rbtree_test interval_tree_test
+OFILES = $(SHARED_OFILES) rbtree-shim.o interval_tree-shim.o maple-shim.o
+DEPS = ../../../include/linux/rbtree.h \
+ ../../../include/linux/rbtree_types.h \
+ ../../../include/linux/rbtree_augmented.h \
+ ../../../include/linux/interval_tree.h \
+ ../../../include/linux/interval_tree_generic.h \
+ ../../../lib/rbtree.c \
+ ../../../lib/interval_tree.c
+
+targets: $(TARGETS)
+
+include ../shared/shared.mk
+
+ifeq ($(DEBUG), 1)
+ CFLAGS += -g
+endif
+
+$(TARGETS): $(OFILES)
+
+rbtree-shim.o: $(DEPS)
+rbtree_test.o: ../../../lib/rbtree_test.c
+interval_tree-shim.o: $(DEPS)
+interval_tree-shim.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
+interval_tree_test.o: ../../../lib/interval_tree_test.c
+interval_tree_test.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
+
+clean:
+ $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/*
diff --git a/tools/testing/rbtree/interval_tree_test.c b/tools/testing/rbtree/interval_tree_test.c
new file mode 100644
index 000000000000..49bc5b534330
--- /dev/null
+++ b/tools/testing/rbtree/interval_tree_test.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * interval_tree.c: Userspace Interval Tree test-suite
+ * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
+ */
+#include <linux/math64.h>
+#include <linux/kern_levels.h>
+#include "shared.h"
+#include "maple-shared.h"
+
+#include "../../../lib/interval_tree_test.c"
+
+int usage(void)
+{
+ fprintf(stderr, "Userland interval tree test cases\n");
+ fprintf(stderr, " -n: Number of nodes in the interval tree\n");
+ fprintf(stderr, " -p: Number of iterations modifying the tree\n");
+ fprintf(stderr, " -q: Number of searches to the interval tree\n");
+ fprintf(stderr, " -s: Number of iterations searching the tree\n");
+ fprintf(stderr, " -a: Searches will iterate all nodes in the tree\n");
+ fprintf(stderr, " -m: Largest value for the interval's endpoint\n");
+ fprintf(stderr, " -r: Random seed\n");
+ exit(-1);
+}
+
+void interval_tree_tests(void)
+{
+ interval_tree_test_init();
+ interval_tree_test_exit();
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "n:p:q:s:am:r:")) != -1) {
+ if (opt == 'n')
+ nnodes = strtoul(optarg, NULL, 0);
+ else if (opt == 'p')
+ perf_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'q')
+ nsearches = strtoul(optarg, NULL, 0);
+ else if (opt == 's')
+ search_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'a')
+ search_all = true;
+ else if (opt == 'm')
+ max_endpoint = strtoul(optarg, NULL, 0);
+ else if (opt == 'r')
+ seed = strtoul(optarg, NULL, 0);
+ else
+ usage();
+ }
+
+ maple_tree_init();
+ interval_tree_tests();
+ return 0;
+}
diff --git a/tools/testing/rbtree/rbtree_test.c b/tools/testing/rbtree/rbtree_test.c
new file mode 100644
index 000000000000..585c970f679e
--- /dev/null
+++ b/tools/testing/rbtree/rbtree_test.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rbtree_test.c: Userspace Red Black Tree test-suite
+ * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
+ */
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/kern_levels.h>
+#include "shared.h"
+
+#include "../../../lib/rbtree_test.c"
+
+int usage(void)
+{
+ fprintf(stderr, "Userland rbtree test cases\n");
+ fprintf(stderr, " -n: Number of nodes in the rb-tree\n");
+ fprintf(stderr, " -p: Number of iterations modifying the rb-tree\n");
+ fprintf(stderr, " -c: Number of iterations modifying and verifying the rb-tree\n");
+ fprintf(stderr, " -r: Random seed\n");
+ exit(-1);
+}
+
+void rbtree_tests(void)
+{
+ rbtree_test_init();
+ rbtree_test_exit();
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "n:p:c:r:")) != -1) {
+ if (opt == 'n')
+ nnodes = strtoul(optarg, NULL, 0);
+ else if (opt == 'p')
+ perf_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'c')
+ check_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'r')
+ seed = strtoul(optarg, NULL, 0);
+ else
+ usage();
+ }
+
+ rbtree_tests();
+ return 0;
+}
diff --git a/tools/testing/rbtree/test.h b/tools/testing/rbtree/test.h
new file mode 100644
index 000000000000..f1f1b545b55a
--- /dev/null
+++ b/tools/testing/rbtree/test.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+void rbtree_tests(void);
+void interval_tree_tests(void);
diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
index f9a12005fcea..121ae78d6e88 100644
--- a/tools/testing/scatterlist/linux/mm.h
+++ b/tools/testing/scatterlist/linux/mm.h
@@ -51,7 +51,6 @@ static inline unsigned long page_to_phys(struct page *page)
#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define __min(t1, t2, min1, min2, x, y) ({ \
t1 min1 = (x); \
@@ -74,7 +73,7 @@ static inline unsigned long page_to_phys(struct page *page)
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
-#define preemptible() (1)
+#define pagefault_disabled() (0)
static inline void *kmap(struct page *page)
{
@@ -127,7 +126,7 @@ kmalloc_array(unsigned int n, unsigned int size, unsigned int flags)
#define kmemleak_free(a)
#define PageSlab(p) (0)
-#define flush_kernel_dcache_page(p)
+#define flush_dcache_page(p)
#define MAX_ERRNO 4095
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
index 71c960dcd8a4..08465a701cd5 100644
--- a/tools/testing/scatterlist/main.c
+++ b/tools/testing/scatterlist/main.c
@@ -55,7 +55,6 @@ int main(void)
struct test *test, tests[] = {
{ -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
{ 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
- { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax + 1, 1 },
{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
{ 0, 1, pfn(0), NULL, 1, sgmax, 1 },
{ 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
@@ -86,32 +85,46 @@ int main(void)
for (i = 0, test = tests; test->expected_segments; test++, i++) {
int left_pages = test->pfn_app ? test->num_pages : 0;
+ struct sg_append_table append = {};
struct page *pages[MAX_PAGES];
- struct sg_table st;
- struct scatterlist *sg;
+ int ret;
set_pages(pages, test->pfn, test->num_pages);
- sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0,
- test->size, test->max_seg, NULL, left_pages, GFP_KERNEL);
- assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
+ if (test->pfn_app)
+ ret = sg_alloc_append_table_from_pages(
+ &append, pages, test->num_pages, 0, test->size,
+ test->max_seg, left_pages, GFP_KERNEL);
+ else
+ ret = sg_alloc_table_from_pages_segment(
+ &append.sgt, pages, test->num_pages, 0,
+ test->size, test->max_seg, GFP_KERNEL);
+
+ assert(ret == test->alloc_ret);
if (test->alloc_ret)
continue;
if (test->pfn_app) {
set_pages(pages, test->pfn_app, test->num_pages);
- sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0,
- test->size, test->max_seg, sg, 0, GFP_KERNEL);
+ ret = sg_alloc_append_table_from_pages(
+ &append, pages, test->num_pages, 0, test->size,
+ test->max_seg, 0, GFP_KERNEL);
- assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
+ assert(ret == test->alloc_ret);
}
- VALIDATE(st.nents == test->expected_segments, &st, test);
+ VALIDATE(append.sgt.nents == test->expected_segments,
+ &append.sgt, test);
if (!test->pfn_app)
- VALIDATE(st.orig_nents == test->expected_segments, &st, test);
-
- sg_free_table(&st);
+ VALIDATE(append.sgt.orig_nents ==
+ test->expected_segments,
+ &append.sgt, test);
+
+ if (test->pfn_app)
+ sg_free_append_table(&append);
+ else
+ sg_free_table(&append.sgt);
}
assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 055a5019b13c..674aaa02e396 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -3,7 +3,7 @@ gpiogpio-event-mon
gpiogpio-hammer
gpioinclude/
gpiolsgpio
-tpm2/SpaceTest.log
+kselftest_install/
# Python bytecode and cache
__pycache__/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index afbab4aeef3c..56e44a98d6a5 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,56 +1,108 @@
# SPDX-License-Identifier: GPL-2.0
-TARGETS = arm64
+TARGETS += acct
+TARGETS += alsa
+TARGETS += amd-pstate
+TARGETS += arm64
TARGETS += bpf
TARGETS += breakpoints
+TARGETS += cachestat
TARGETS += capabilities
TARGETS += cgroup
TARGETS += clone3
+TARGETS += connector
TARGETS += core
TARGETS += cpufreq
TARGETS += cpu-hotplug
+TARGETS += damon
+TARGETS += devices/error_logs
+TARGETS += devices/probe
+TARGETS += dmabuf-heaps
TARGETS += drivers/dma-buf
+TARGETS += drivers/ntsync
+TARGETS += drivers/s390x/uvdevice
+TARGETS += drivers/net
+TARGETS += drivers/net/bonding
+TARGETS += drivers/net/team
+TARGETS += drivers/net/virtio_net
+TARGETS += drivers/platform/x86/intel/ifs
+TARGETS += dt
TARGETS += efivarfs
TARGETS += exec
+TARGETS += fchmodat2
TARGETS += filesystems
TARGETS += filesystems/binderfs
TARGETS += filesystems/epoll
+TARGETS += filesystems/fat
+TARGETS += filesystems/overlayfs
+TARGETS += filesystems/statmount
+TARGETS += filesystems/mount-notify
+TARGETS += filesystems/fuse
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
TARGETS += futex
TARGETS += gpio
+TARGETS += hid
TARGETS += intel_pstate
+TARGETS += iommu
TARGETS += ipc
TARGETS += ir
TARGETS += kcmp
TARGETS += kexec
+TARGETS += kselftest_harness
TARGETS += kvm
+TARGETS += landlock
TARGETS += lib
TARGETS += livepatch
+TARGETS += liveupdate
TARGETS += lkdtm
+TARGETS += lsm
TARGETS += membarrier
TARGETS += memfd
TARGETS += memory-hotplug
TARGETS += mincore
TARGETS += mount
+TARGETS += mount_setattr
+TARGETS += move_mount_set_group
TARGETS += mqueue
+TARGETS += mseal_system_mappings
+TARGETS += nci
TARGETS += net
+TARGETS += net/af_unix
+TARGETS += net/can
TARGETS += net/forwarding
+TARGETS += net/hsr
TARGETS += net/mptcp
-TARGETS += netfilter
+TARGETS += net/netfilter
+TARGETS += net/openvswitch
+TARGETS += net/ovpn
+TARGETS += net/packetdrill
+TARGETS += net/rds
+TARGETS += net/tcp_ao
+TARGETS += nolibc
TARGETS += nsfs
+TARGETS += pci_endpoint
+TARGETS += pcie_bwctrl
+TARGETS += perf_events
TARGETS += pidfd
TARGETS += pid_namespace
+TARGETS += power_supply
TARGETS += powerpc
+TARGETS += prctl
TARGETS += proc
TARGETS += pstore
TARGETS += ptrace
TARGETS += openat2
+TARGETS += resctrl
+TARGETS += riscv
+TARGETS += rlimits
TARGETS += rseq
TARGETS += rtc
+TARGETS += rust
+TARGETS += sched_ext
TARGETS += seccomp
TARGETS += sgx
-TARGETS += sigaltstack
+TARGETS += signal
TARGETS += size
TARGETS += sparc64
TARGETS += splice
@@ -59,16 +111,24 @@ TARGETS += sync
TARGETS += syscall_user_dispatch
TARGETS += sysctl
TARGETS += tc-testing
+TARGETS += tdx
+TARGETS += thermal/intel/power_floor
+TARGETS += thermal/intel/workload_hint
TARGETS += timens
ifneq (1, $(quicktest))
TARGETS += timers
endif
TARGETS += tmpfs
TARGETS += tpm2
-TARGETS += user
+TARGETS += tty
+TARGETS += ublk
+TARGETS += uevent
+TARGETS += user_events
TARGETS += vDSO
-TARGETS += vm
+TARGETS += mm
+TARGETS += vfio
TARGETS += x86
+TARGETS += x86/bugs
TARGETS += zram
#Please keep the TARGETS list alphabetically sorted
# Run "make quicktest=1 run_tests" or
@@ -77,8 +137,17 @@ TARGETS += zram
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
-# User can optionally provide a TARGETS skiplist.
-SKIP_TARGETS ?=
+# Networking tests want the net/lib target, include it automatically
+ifneq ($(filter net drivers/net drivers/net/hw,$(TARGETS)),)
+ifeq ($(filter net/lib,$(TARGETS)),)
+ INSTALL_DEP_TARGETS := net/lib
+endif
+endif
+
+# User can optionally provide a TARGETS skiplist. By default we skip
+# targets using BPF since it has cutting edge build time dependencies
+# which require more effort to install.
+SKIP_TARGETS ?= bpf sched_ext
ifneq ($(SKIP_TARGETS),)
TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
override TARGETS := $(TMP)
@@ -105,66 +174,46 @@ ifdef building_out_of_srctree
override LDFLAGS =
endif
-ifneq ($(O),)
- BUILD := $(O)/kselftest
+top_srcdir ?= ../../..
+
+ifeq ("$(origin O)", "command line")
+ KBUILD_OUTPUT := $(O)
+endif
+
+ifneq ($(KBUILD_OUTPUT),)
+ # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
+ # expand a shell special character '~'. We use a somewhat tedious way here.
+ abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
+ $(if $(abs_objtree),, \
+ $(error failed to create output directory "$(KBUILD_OUTPUT)"))
+ # $(realpath ...) resolves symlinks
+ abs_objtree := $(realpath $(abs_objtree))
+ BUILD := $(abs_objtree)/kselftest
+ KHDR_INCLUDES := -isystem ${abs_objtree}/usr/include
else
- ifneq ($(KBUILD_OUTPUT),)
- BUILD := $(KBUILD_OUTPUT)/kselftest
- else
- BUILD := $(shell pwd)
- DEFAULT_INSTALL_HDR_PATH := 1
- endif
+ BUILD := $(CURDIR)
+ abs_srctree := $(shell cd $(top_srcdir) && pwd)
+ KHDR_INCLUDES := -isystem ${abs_srctree}/usr/include
+ DEFAULT_INSTALL_HDR_PATH := 1
endif
# Prepare for headers install
-top_srcdir ?= ../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
-export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
-
-# build and run gpio when output directory is the src dir.
-# gpio has dependency on tools/gpio and builds tools/gpio
-# objects in the src directory in all cases making the src
-# repo dirty even when objects are relocated.
-ifneq (1,$(DEFAULT_INSTALL_HDR_PATH))
- TMP := $(filter-out gpio, $(TARGETS))
- TARGETS := $(TMP)
-endif
+export KHDR_INCLUDES
# set default goal to all, so make without a target runs all, even when
# all isn't the first target in the file.
.DEFAULT_GOAL := all
-# Install headers here once for all tests. KSFT_KHDR_INSTALL_DONE
-# is used to avoid running headers_install from lib.mk.
-# Invoke headers install with --no-builtin-rules to avoid circular
-# dependency in "make kselftest" case. In this case, second level
-# make inherits builtin-rules which will use the rule generate
-# Makefile.o and runs into
-# "Circular Makefile.o <- prepare dependency dropped."
-# and headers_install fails and test compile fails.
-#
-# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
-# invokes them as sub-makes and --no-builtin-rules is not necessary,
-# but doesn't cause any failures. Keep it simple and use the same
-# flags in both cases.
-# Local build cases: "make kselftest", "make -C" - headers are installed
-# in the default INSTALL_HDR_PATH usr/include.
-khdr:
-ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
- $(MAKE) --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
-else
- $(MAKE) --no-builtin-rules INSTALL_HDR_PATH=$$BUILD/usr \
- ARCH=$(ARCH) -C $(top_srcdir) headers_install
-endif
-
-all: khdr
+all:
@ret=1; \
- for TARGET in $(TARGETS); do \
+ for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \
+ O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
done; exit $$ret;
@@ -172,7 +221,10 @@ all: khdr
run_tests: all
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(BUILD) \
+ O=$(abs_objtree); \
done;
hotplug:
@@ -217,12 +269,18 @@ ifdef INSTALL_PATH
install -m 744 kselftest/module.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
+ install -m 744 kselftest/ktap_helpers.sh $(INSTALL_PATH)/kselftest/
+ install -m 744 kselftest/ksft.py $(INSTALL_PATH)/kselftest/
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
rm -f $(TEST_LIST)
@ret=1; \
- for TARGET in $(TARGETS); do \
+ for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install \
+ INSTALL_PATH=$(INSTALL_PATH)/$$TARGET \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(INSTALL_PATH) \
+ O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
done; exit $$ret;
@@ -234,11 +292,19 @@ ifdef INSTALL_PATH
@# included in the generated runlist.
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- [ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
- echo -n "Emit Tests for $$TARGET\n"; \
+ [ ! -d $(INSTALL_PATH)/$$TARGET ] && printf "Skipping non-existent dir: $$TARGET\n" && continue; \
+ printf "Emit Tests for $$TARGET\n"; \
$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
-C $$TARGET emit_tests >> $(TEST_LIST); \
done;
+ @VERSION=$$(git describe HEAD 2>/dev/null); \
+ if [ -n "$$VERSION" ]; then \
+ echo "$$VERSION" > $(INSTALL_PATH)/VERSION; \
+ printf "Version saved to $(INSTALL_PATH)/VERSION\n"; \
+ else \
+ printf "Unable to get version from git describe\n"; \
+ fi
+ @echo "**Kselftest Installation is complete: $(INSTALL_PATH)**"
else
$(error Error: set INSTALL_PATH to use install)
endif
@@ -251,9 +317,9 @@ gen_tar: install
@echo "Created ${TAR_PATH}"
clean:
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar
+.PHONY: all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar
diff --git a/tools/testing/selftests/acct/.gitignore b/tools/testing/selftests/acct/.gitignore
new file mode 100644
index 000000000000..7e78aac19038
--- /dev/null
+++ b/tools/testing/selftests/acct/.gitignore
@@ -0,0 +1,3 @@
+acct_syscall
+config
+process_log \ No newline at end of file
diff --git a/tools/testing/selftests/acct/Makefile b/tools/testing/selftests/acct/Makefile
new file mode 100644
index 000000000000..7e025099cf65
--- /dev/null
+++ b/tools/testing/selftests/acct/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+TEST_GEN_PROGS := acct_syscall
+CFLAGS += -Wall
+
+include ../lib.mk \ No newline at end of file
diff --git a/tools/testing/selftests/acct/acct_syscall.c b/tools/testing/selftests/acct/acct_syscall.c
new file mode 100644
index 000000000000..421adbdc299d
--- /dev/null
+++ b/tools/testing/selftests/acct/acct_syscall.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* kselftest for acct() system call
+ * The acct() system call enables or disables process accounting.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/wait.h>
+
+#include "kselftest.h"
+
+int main(void)
+{
+ char filename[] = "process_log";
+ FILE *fp;
+ pid_t child_pid;
+ int sz;
+
+ // Setting up kselftest framework
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ // Check if test is run a root
+ if (geteuid()) {
+ ksft_exit_skip("This test needs root to run!\n");
+ return 1;
+ }
+
+ // Create file to log closed processes
+ fp = fopen(filename, "w");
+
+ if (!fp) {
+ ksft_test_result_error("%s.\n", strerror(errno));
+ ksft_finished();
+ return 1;
+ }
+
+ acct(filename);
+
+ // Handle error conditions
+ if (errno) {
+ ksft_test_result_error("%s.\n", strerror(errno));
+ fclose(fp);
+ ksft_finished();
+ return 1;
+ }
+
+ // Create child process and wait for it to terminate.
+
+ child_pid = fork();
+
+ if (child_pid < 0) {
+ ksft_test_result_error("Creating a child process to log failed\n");
+ acct(NULL);
+ return 1;
+ } else if (child_pid > 0) {
+ wait(NULL);
+ fseek(fp, 0L, SEEK_END);
+ sz = ftell(fp);
+
+ acct(NULL);
+
+ if (sz <= 0) {
+ ksft_test_result_fail("Terminated child process not logged\n");
+ ksft_exit_fail();
+ return 1;
+ }
+
+ ksft_test_result_pass("Successfully logged terminated process.\n");
+ fclose(fp);
+ ksft_exit_pass();
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/tools/testing/selftests/alsa/.gitignore b/tools/testing/selftests/alsa/.gitignore
new file mode 100644
index 000000000000..3dd8e1176b89
--- /dev/null
+++ b/tools/testing/selftests/alsa/.gitignore
@@ -0,0 +1,5 @@
+global-timer
+mixer-test
+pcm-test
+test-pcmtest-driver
+utimer-test
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
new file mode 100644
index 000000000000..8dab90ad22bb
--- /dev/null
+++ b/tools/testing/selftests/alsa/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+ifneq ($(shell pkg-config --exists alsa && echo 0 || echo 1),0)
+$(error Package alsa not found, please install alsa development package or \
+ add directory containing `alsa.pc` in PKG_CONFIG_PATH)
+endif
+
+CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
+LDLIBS += $(shell pkg-config --libs alsa)
+ifeq ($(LDLIBS),)
+LDLIBS += -lasound
+endif
+CFLAGS += -L$(OUTPUT) -Wl,-rpath=./
+
+LDLIBS+=-lpthread
+
+OVERRIDE_TARGETS = 1
+
+TEST_GEN_PROGS := mixer-test pcm-test test-pcmtest-driver utimer-test
+
+TEST_GEN_PROGS_EXTENDED := libatest.so global-timer
+
+TEST_FILES := conf.d pcm-test.conf
+
+include ../lib.mk
+
+$(OUTPUT)/libatest.so: conf.c alsa-local.h
+ $(CC) $(CFLAGS) -shared -fPIC $< $(LDLIBS) -o $@
+
+$(OUTPUT)/%: %.c $(OUTPUT)/libatest.so alsa-local.h
+ $(CC) $(CFLAGS) $< $(LDLIBS) -latest -o $@
diff --git a/tools/testing/selftests/alsa/alsa-local.h b/tools/testing/selftests/alsa/alsa-local.h
new file mode 100644
index 000000000000..29143ef52101
--- /dev/null
+++ b/tools/testing/selftests/alsa/alsa-local.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// kselftest configuration helpers for the hw specific configuration
+//
+// Original author: Jaroslav Kysela <perex@perex.cz>
+// Copyright (c) 2022 Red Hat Inc.
+
+#ifndef __ALSA_LOCAL_H
+#define __ALSA_LOCAL_H
+
+#include <alsa/asoundlib.h>
+
+snd_config_t *get_alsalib_config(void);
+
+snd_config_t *conf_load_from_file(const char *filename);
+void conf_load(void);
+void conf_free(void);
+snd_config_t *conf_by_card(int card);
+snd_config_t *conf_get_subtree(snd_config_t *root, const char *key1, const char *key2);
+int conf_get_count(snd_config_t *root, const char *key1, const char *key2);
+const char *conf_get_string(snd_config_t *root, const char *key1, const char *key2, const char *def);
+long conf_get_long(snd_config_t *root, const char *key1, const char *key2, long def);
+int conf_get_bool(snd_config_t *root, const char *key1, const char *key2, int def);
+void conf_get_string_array(snd_config_t *root, const char *key1, const char *key2,
+ const char **array, int array_size, const char *def);
+
+struct card_cfg_data {
+ int card;
+ snd_config_t *config;
+ const char *filename;
+ const char *config_id;
+ struct card_cfg_data *next;
+};
+
+extern struct card_cfg_data *conf_cards;
+
+#endif /* __ALSA_LOCAL_H */
diff --git a/tools/testing/selftests/alsa/conf.c b/tools/testing/selftests/alsa/conf.c
new file mode 100644
index 000000000000..317212078e36
--- /dev/null
+++ b/tools/testing/selftests/alsa/conf.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// kselftest configuration helpers for the hw specific configuration
+//
+// Original author: Jaroslav Kysela <perex@perex.cz>
+// Copyright (c) 2022 Red Hat Inc.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <assert.h>
+#include <dirent.h>
+#include <regex.h>
+#include <sys/stat.h>
+
+#include "kselftest.h"
+#include "alsa-local.h"
+
+#define SYSFS_ROOT "/sys"
+
+struct card_cfg_data *conf_cards;
+
+static const char *alsa_config =
+"ctl.hw {\n"
+" @args [ CARD ]\n"
+" @args.CARD.type string\n"
+" type hw\n"
+" card $CARD\n"
+"}\n"
+"pcm.hw {\n"
+" @args [ CARD DEV SUBDEV ]\n"
+" @args.CARD.type string\n"
+" @args.DEV.type integer\n"
+" @args.SUBDEV.type integer\n"
+" type hw\n"
+" card $CARD\n"
+" device $DEV\n"
+" subdevice $SUBDEV\n"
+"}\n"
+;
+
+#ifdef SND_LIB_VER
+#if SND_LIB_VERSION >= SND_LIB_VER(1, 2, 6)
+#define LIB_HAS_LOAD_STRING
+#endif
+#endif
+
+#ifndef LIB_HAS_LOAD_STRING
+static int snd_config_load_string(snd_config_t **config, const char *s,
+ size_t size)
+{
+ snd_input_t *input;
+ snd_config_t *dst;
+ int err;
+
+ assert(config && s);
+ if (size == 0)
+ size = strlen(s);
+ err = snd_input_buffer_open(&input, s, size);
+ if (err < 0)
+ return err;
+ err = snd_config_top(&dst);
+ if (err < 0) {
+ snd_input_close(input);
+ return err;
+ }
+ err = snd_config_load(dst, input);
+ snd_input_close(input);
+ if (err < 0) {
+ snd_config_delete(dst);
+ return err;
+ }
+ *config = dst;
+ return 0;
+}
+#endif
+
+snd_config_t *get_alsalib_config(void)
+{
+ snd_config_t *config;
+ int err;
+
+ err = snd_config_load_string(&config, alsa_config, strlen(alsa_config));
+ if (err < 0) {
+ ksft_print_msg("Unable to parse custom alsa-lib configuration: %s\n",
+ snd_strerror(err));
+ ksft_exit_fail();
+ }
+ return config;
+}
+
+static struct card_cfg_data *conf_data_by_card(int card, bool msg)
+{
+ struct card_cfg_data *conf;
+
+ for (conf = conf_cards; conf; conf = conf->next) {
+ if (conf->card == card) {
+ if (msg)
+ ksft_print_msg("using hw card config %s for card %d\n",
+ conf->filename, card);
+ return conf;
+ }
+ }
+ return NULL;
+}
+
+static void dump_config_tree(snd_config_t *top)
+{
+ snd_output_t *out;
+ int err;
+
+ err = snd_output_stdio_attach(&out, stdout, 0);
+ if (err < 0)
+ ksft_exit_fail_msg("stdout attach\n");
+ if (snd_config_save(top, out))
+ ksft_exit_fail_msg("config save\n");
+ snd_output_close(out);
+}
+
+snd_config_t *conf_load_from_file(const char *filename)
+{
+ snd_config_t *dst;
+ snd_input_t *input;
+ int err;
+
+ err = snd_input_stdio_open(&input, filename, "r");
+ if (err < 0)
+ ksft_exit_fail_msg("Unable to parse filename %s\n", filename);
+ err = snd_config_top(&dst);
+ if (err < 0)
+ ksft_exit_fail_msg("Out of memory\n");
+ err = snd_config_load(dst, input);
+ snd_input_close(input);
+ if (err < 0)
+ ksft_exit_fail_msg("Unable to parse filename %s\n", filename);
+ return dst;
+}
+
+static char *sysfs_get(const char *sysfs_root, const char *id)
+{
+ char path[PATH_MAX], link[PATH_MAX + 1];
+ struct stat sb;
+ ssize_t len;
+ char *e;
+ int fd;
+
+ if (id[0] == '/')
+ id++;
+ snprintf(path, sizeof(path), "%s/%s", sysfs_root, id);
+ if (lstat(path, &sb) != 0)
+ return NULL;
+ if (S_ISLNK(sb.st_mode)) {
+ len = readlink(path, link, sizeof(link) - 1);
+ if (len <= 0) {
+ ksft_exit_fail_msg("sysfs: cannot read link '%s': %s\n",
+ path, strerror(errno));
+ return NULL;
+ }
+ link[len] = '\0';
+ e = strrchr(link, '/');
+ if (e)
+ return strdup(e + 1);
+ return NULL;
+ }
+ if (S_ISDIR(sb.st_mode))
+ return NULL;
+ if ((sb.st_mode & S_IRUSR) == 0)
+ return NULL;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ return NULL;
+ ksft_exit_fail_msg("sysfs: open failed for '%s': %s\n",
+ path, strerror(errno));
+ }
+ len = read(fd, path, sizeof(path)-1);
+ close(fd);
+ if (len < 0)
+ ksft_exit_fail_msg("sysfs: unable to read value '%s': %s\n",
+ path, strerror(errno));
+ while (len > 0 && path[len-1] == '\n')
+ len--;
+ path[len] = '\0';
+ e = strdup(path);
+ if (e == NULL)
+ ksft_exit_fail_msg("Out of memory\n");
+ return e;
+}
+
+static bool sysfs_match(const char *sysfs_root, snd_config_t *config)
+{
+ snd_config_t *node, *path_config, *regex_config;
+ snd_config_iterator_t i, next;
+ const char *path_string, *regex_string, *v;
+ regex_t re;
+ regmatch_t match[1];
+ int iter = 0, ret;
+
+ snd_config_for_each(i, next, config) {
+ node = snd_config_iterator_entry(i);
+ if (snd_config_search(node, "path", &path_config))
+ ksft_exit_fail_msg("Missing path field in the sysfs block\n");
+ if (snd_config_search(node, "regex", &regex_config))
+ ksft_exit_fail_msg("Missing regex field in the sysfs block\n");
+ if (snd_config_get_string(path_config, &path_string))
+ ksft_exit_fail_msg("Path field in the sysfs block is not a string\n");
+ if (snd_config_get_string(regex_config, &regex_string))
+ ksft_exit_fail_msg("Regex field in the sysfs block is not a string\n");
+ iter++;
+ v = sysfs_get(sysfs_root, path_string);
+ if (!v)
+ return false;
+ if (regcomp(&re, regex_string, REG_EXTENDED))
+ ksft_exit_fail_msg("Wrong regex '%s'\n", regex_string);
+ ret = regexec(&re, v, 1, match, 0);
+ regfree(&re);
+ if (ret)
+ return false;
+ }
+ return iter > 0;
+}
+
+static void assign_card_config(int card, const char *sysfs_card_root)
+{
+ struct card_cfg_data *data;
+ snd_config_t *sysfs_card_config;
+
+ for (data = conf_cards; data; data = data->next) {
+ snd_config_search(data->config, "sysfs", &sysfs_card_config);
+ if (!sysfs_match(sysfs_card_root, sysfs_card_config))
+ continue;
+
+ data->card = card;
+ break;
+ }
+}
+
+static void assign_card_configs(void)
+{
+ char fn[128];
+ int card;
+
+ for (card = 0; card < 32; card++) {
+ snprintf(fn, sizeof(fn), "%s/class/sound/card%d", SYSFS_ROOT, card);
+ if (access(fn, R_OK) == 0)
+ assign_card_config(card, fn);
+ }
+}
+
+static int filename_filter(const struct dirent *dirent)
+{
+ size_t flen;
+
+ if (dirent == NULL)
+ return 0;
+ if (dirent->d_type == DT_DIR)
+ return 0;
+ flen = strlen(dirent->d_name);
+ if (flen <= 5)
+ return 0;
+ if (strncmp(&dirent->d_name[flen-5], ".conf", 5) == 0)
+ return 1;
+ return 0;
+}
+
+static bool match_config(const char *filename)
+{
+ struct card_cfg_data *data;
+ snd_config_t *config, *sysfs_config, *card_config, *sysfs_card_config, *node;
+ snd_config_iterator_t i, next;
+
+ config = conf_load_from_file(filename);
+ if (snd_config_search(config, "sysfs", &sysfs_config) ||
+ snd_config_get_type(sysfs_config) != SND_CONFIG_TYPE_COMPOUND)
+ ksft_exit_fail_msg("Missing global sysfs block in filename %s\n", filename);
+ if (snd_config_search(config, "card", &card_config) ||
+ snd_config_get_type(card_config) != SND_CONFIG_TYPE_COMPOUND)
+ ksft_exit_fail_msg("Missing global card block in filename %s\n", filename);
+ if (!sysfs_match(SYSFS_ROOT, sysfs_config))
+ return false;
+ snd_config_for_each(i, next, card_config) {
+ node = snd_config_iterator_entry(i);
+ if (snd_config_search(node, "sysfs", &sysfs_card_config) ||
+ snd_config_get_type(sysfs_card_config) != SND_CONFIG_TYPE_COMPOUND)
+ ksft_exit_fail_msg("Missing card sysfs block in filename %s\n", filename);
+
+ data = malloc(sizeof(*data));
+ if (!data)
+ ksft_exit_fail_msg("Out of memory\n");
+ data->filename = filename;
+ data->config = node;
+ data->card = -1;
+ if (snd_config_get_id(node, &data->config_id))
+ ksft_exit_fail_msg("snd_config_get_id failed for card\n");
+ data->next = conf_cards;
+ conf_cards = data;
+ }
+ return true;
+}
+
+void conf_load(void)
+{
+ const char *fn = "conf.d";
+ struct dirent **namelist;
+ int n, j;
+
+ n = scandir(fn, &namelist, filename_filter, alphasort);
+ if (n < 0)
+ ksft_exit_fail_msg("scandir: %s\n", strerror(errno));
+ for (j = 0; j < n; j++) {
+ size_t sl = strlen(fn) + strlen(namelist[j]->d_name) + 2;
+ char *filename = malloc(sl);
+ if (filename == NULL)
+ ksft_exit_fail_msg("Out of memory\n");
+ sprintf(filename, "%s/%s", fn, namelist[j]->d_name);
+ if (match_config(filename))
+ filename = NULL;
+ free(filename);
+ free(namelist[j]);
+ }
+ free(namelist);
+
+ assign_card_configs();
+}
+
+void conf_free(void)
+{
+ struct card_cfg_data *conf;
+
+ while (conf_cards) {
+ conf = conf_cards;
+ conf_cards = conf->next;
+ snd_config_delete(conf->config);
+ }
+}
+
+snd_config_t *conf_by_card(int card)
+{
+ struct card_cfg_data *conf;
+
+ conf = conf_data_by_card(card, true);
+ if (conf)
+ return conf->config;
+ return NULL;
+}
+
+static int conf_get_by_keys(snd_config_t *root, const char *key1,
+ const char *key2, snd_config_t **result)
+{
+ int ret;
+
+ if (key1) {
+ ret = snd_config_search(root, key1, &root);
+ if (ret != -ENOENT && ret < 0)
+ return ret;
+ }
+ if (key2)
+ ret = snd_config_search(root, key2, &root);
+ if (ret >= 0)
+ *result = root;
+ return ret;
+}
+
+snd_config_t *conf_get_subtree(snd_config_t *root, const char *key1, const char *key2)
+{
+ int ret;
+
+ if (!root)
+ return NULL;
+ ret = conf_get_by_keys(root, key1, key2, &root);
+ if (ret == -ENOENT)
+ return NULL;
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ return root;
+}
+
+int conf_get_count(snd_config_t *root, const char *key1, const char *key2)
+{
+ snd_config_t *cfg;
+ snd_config_iterator_t i, next;
+ int count, ret;
+
+ if (!root)
+ return -1;
+ ret = conf_get_by_keys(root, key1, key2, &cfg);
+ if (ret == -ENOENT)
+ return -1;
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ if (snd_config_get_type(cfg) != SND_CONFIG_TYPE_COMPOUND)
+ ksft_exit_fail_msg("key '%s'.'%s' is not a compound\n", key1, key2);
+ count = 0;
+ snd_config_for_each(i, next, cfg)
+ count++;
+ return count;
+}
+
+const char *conf_get_string(snd_config_t *root, const char *key1, const char *key2, const char *def)
+{
+ snd_config_t *cfg;
+ const char *s;
+ int ret;
+
+ if (!root)
+ return def;
+ ret = conf_get_by_keys(root, key1, key2, &cfg);
+ if (ret == -ENOENT)
+ return def;
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ if (snd_config_get_string(cfg, &s))
+ ksft_exit_fail_msg("key '%s'.'%s' is not a string\n", key1, key2);
+ return s;
+}
+
+long conf_get_long(snd_config_t *root, const char *key1, const char *key2, long def)
+{
+ snd_config_t *cfg;
+ long l;
+ int ret;
+
+ if (!root)
+ return def;
+ ret = conf_get_by_keys(root, key1, key2, &cfg);
+ if (ret == -ENOENT)
+ return def;
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ if (snd_config_get_integer(cfg, &l))
+ ksft_exit_fail_msg("key '%s'.'%s' is not an integer\n", key1, key2);
+ return l;
+}
+
+int conf_get_bool(snd_config_t *root, const char *key1, const char *key2, int def)
+{
+ snd_config_t *cfg;
+ int ret;
+
+ if (!root)
+ return def;
+ ret = conf_get_by_keys(root, key1, key2, &cfg);
+ if (ret == -ENOENT)
+ return def;
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ ret = snd_config_get_bool(cfg);
+ if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' is not a bool\n", key1, key2);
+ return !!ret;
+}
+
+void conf_get_string_array(snd_config_t *root, const char *key1, const char *key2,
+ const char **array, int array_size, const char *def)
+{
+ snd_config_t *cfg;
+ char buf[16];
+ int ret, index;
+
+ ret = conf_get_by_keys(root, key1, key2, &cfg);
+ if (ret == -ENOENT)
+ cfg = NULL;
+ else if (ret < 0)
+ ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
+ for (index = 0; index < array_size; index++) {
+ if (cfg == NULL) {
+ array[index] = def;
+ } else {
+ sprintf(buf, "%i", index);
+ array[index] = conf_get_string(cfg, buf, NULL, def);
+ }
+ }
+}
diff --git a/tools/testing/selftests/alsa/conf.d/Lenovo_ThinkPad_P1_Gen2.conf b/tools/testing/selftests/alsa/conf.d/Lenovo_ThinkPad_P1_Gen2.conf
new file mode 100644
index 000000000000..5b40a916295d
--- /dev/null
+++ b/tools/testing/selftests/alsa/conf.d/Lenovo_ThinkPad_P1_Gen2.conf
@@ -0,0 +1,84 @@
+#
+# Example configuration for Lenovo ThinkPad P1 Gen2
+#
+
+#
+# Use regex match for the string read from the given sysfs path
+#
+# The sysfs root directory (/sys) is hardwired in the test code
+# (may be changed on demand).
+#
+# All strings must match.
+#
+sysfs [
+ {
+ path "class/dmi/id/product_sku"
+ regex "LENOVO_MT_20QU_BU_Think_FM_ThinkPad P1 Gen 2"
+ }
+]
+
+card.hda {
+ #
+ # Use regex match for the /sys/class/sound/card*/ tree (relative)
+ #
+ sysfs [
+ {
+ path "device/subsystem_device"
+ regex "0x229e"
+ }
+ {
+ path "device/subsystem_vendor"
+ regex "0x17aa"
+ }
+ ]
+
+ #
+ # PCM configuration
+ #
+ # pcm.0.0 - device 0 subdevice 0
+ #
+ pcm.0.0 {
+ PLAYBACK {
+ test.time1 {
+ access RW_INTERLEAVED # can be omitted - default
+ format S16_LE # can be omitted - default
+ rate 48000 # can be omitted - default
+ channels 2 # can be omitted - default
+ period_size 512
+ buffer_size 4096
+ }
+ test.time2 {
+ access RW_INTERLEAVED
+ format S16_LE
+ rate 48000
+ channels 2
+ period_size 24000
+ buffer_size 192000
+ }
+ test.time3 {
+ access RW_INTERLEAVED
+ format S16_LE
+ rate 44100
+ channels 2
+ period_size 24000
+ buffer_size 192000
+ }
+ }
+ CAPTURE {
+ # use default tests, check for the presence
+ }
+ }
+ #
+ # uncomment to force the missing device checks
+ #
+ #pcm.0.2 {
+ # PLAYBACK {
+ # # check for the presence
+ # }
+ #}
+ #pcm.0.3 {
+ # CAPTURE {
+ # # check for the presence
+ # }
+ #}
+}
diff --git a/tools/testing/selftests/alsa/global-timer.c b/tools/testing/selftests/alsa/global-timer.c
new file mode 100644
index 000000000000..c15ec0ba851a
--- /dev/null
+++ b/tools/testing/selftests/alsa/global-timer.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This tool is used by the utimer test, and it allows us to
+ * count the ticks of a global timer in a certain time frame
+ * (which is set by `timeout` parameter).
+ *
+ * Author: Ivan Orlov <ivan.orlov0322@gmail.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <alsa/asoundlib.h>
+#include <time.h>
+
+static int ticked;
+static void async_callback(snd_async_handler_t *ahandler)
+{
+ ticked++;
+}
+
+static char timer_name[64];
+static void bind_to_timer(int device, int subdevice, int timeout)
+{
+ snd_timer_t *handle;
+ snd_timer_params_t *params;
+ snd_async_handler_t *ahandler;
+
+ time_t end;
+
+ sprintf(timer_name, "hw:CLASS=%d,SCLASS=%d,DEV=%d,SUBDEV=%d",
+ SND_TIMER_CLASS_GLOBAL, SND_TIMER_SCLASS_NONE,
+ device, subdevice);
+
+ snd_timer_params_alloca(&params);
+
+ if (snd_timer_open(&handle, timer_name, SND_TIMER_OPEN_NONBLOCK) < 0) {
+ perror("Can't open the timer");
+ exit(EXIT_FAILURE);
+ }
+
+ snd_timer_params_set_auto_start(params, 1);
+ snd_timer_params_set_ticks(params, 1);
+ if (snd_timer_params(handle, params) < 0) {
+ perror("Can't set timer params");
+ exit(EXIT_FAILURE);
+ }
+
+ if (snd_async_add_timer_handler(&ahandler, handle, async_callback, NULL) < 0) {
+ perror("Can't create a handler");
+ exit(EXIT_FAILURE);
+ }
+ end = time(NULL) + timeout;
+ if (snd_timer_start(handle) < 0) {
+ perror("Failed to start the timer");
+ exit(EXIT_FAILURE);
+ }
+ printf("Timer has started\n");
+ while (time(NULL) <= end) {
+ /*
+ * Waiting for the timeout to elapse. Can't use sleep here, as it gets
+ * constantly interrupted by the signal from the timer (SIGIO)
+ */
+ }
+ snd_timer_stop(handle);
+ snd_timer_close(handle);
+}
+
+int main(int argc, char *argv[])
+{
+ int device, subdevice, timeout;
+
+ if (argc < 4) {
+ perror("Usage: %s <device> <subdevice> <timeout>");
+ return EXIT_FAILURE;
+ }
+
+ setlinebuf(stdout);
+
+ device = atoi(argv[1]);
+ subdevice = atoi(argv[2]);
+ timeout = atoi(argv[3]);
+
+ bind_to_timer(device, subdevice, timeout);
+
+ printf("Total ticks count: %d\n", ticked);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
new file mode 100644
index 000000000000..d4f845c32804
--- /dev/null
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -0,0 +1,1145 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// kselftest for the ALSA mixer API
+//
+// Original author: Mark Brown <broonie@kernel.org>
+// Copyright (c) 2021-2 Arm Limited
+
+// This test will iterate over all cards detected in the system, exercising
+// every mixer control it can find. This may conflict with other system
+// software if there is audio activity so is best run on a system with a
+// minimal active userspace.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <limits.h>
+#include <string.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <math.h>
+#include <errno.h>
+#include <assert.h>
+#include <alsa/asoundlib.h>
+#include <poll.h>
+#include <stdint.h>
+
+#include "kselftest.h"
+#include "alsa-local.h"
+
+#define TESTS_PER_CONTROL 7
+
+struct card_data {
+ snd_ctl_t *handle;
+ int card;
+ snd_ctl_card_info_t *info;
+ const char *card_name;
+ struct pollfd pollfd;
+ int num_ctls;
+ snd_ctl_elem_list_t *ctls;
+ struct card_data *next;
+};
+
+struct ctl_data {
+ const char *name;
+ snd_ctl_elem_id_t *id;
+ snd_ctl_elem_info_t *info;
+ snd_ctl_elem_value_t *def_val;
+ int elem;
+ int event_missing;
+ int event_spurious;
+ struct card_data *card;
+ struct ctl_data *next;
+};
+
+int num_cards;
+int num_controls;
+struct card_data *card_list;
+struct ctl_data *ctl_list;
+
+static void find_controls(void)
+{
+ char name[32];
+ int card, ctl, err;
+ struct card_data *card_data;
+ struct ctl_data *ctl_data;
+ snd_config_t *config;
+ char *card_name, *card_longname;
+
+ card = -1;
+ if (snd_card_next(&card) < 0 || card < 0)
+ return;
+
+ config = get_alsalib_config();
+
+ while (card >= 0) {
+ sprintf(name, "hw:%d", card);
+
+ card_data = malloc(sizeof(*card_data));
+ if (!card_data)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ err = snd_ctl_open_lconf(&card_data->handle, name, 0, config);
+ if (err < 0) {
+ ksft_print_msg("Failed to get hctl for card %d: %s\n",
+ card, snd_strerror(err));
+ goto next_card;
+ }
+
+ err = snd_card_get_name(card, &card_name);
+ if (err != 0)
+ card_name = "Unknown";
+ err = snd_card_get_longname(card, &card_longname);
+ if (err != 0)
+ card_longname = "Unknown";
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(card_data->handle, card_data->info);
+ if (err == 0) {
+ card_data->card_name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->card_name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->card_name)
+ card_data->card_name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->card_name, card_name, card_longname);
+
+ /* Count controls */
+ snd_ctl_elem_list_malloc(&card_data->ctls);
+ snd_ctl_elem_list(card_data->handle, card_data->ctls);
+ card_data->num_ctls = snd_ctl_elem_list_get_count(card_data->ctls);
+
+ /* Enumerate control information */
+ snd_ctl_elem_list_alloc_space(card_data->ctls, card_data->num_ctls);
+ snd_ctl_elem_list(card_data->handle, card_data->ctls);
+
+ card_data->card = num_cards++;
+ card_data->next = card_list;
+ card_list = card_data;
+
+ num_controls += card_data->num_ctls;
+
+ for (ctl = 0; ctl < card_data->num_ctls; ctl++) {
+ ctl_data = malloc(sizeof(*ctl_data));
+ if (!ctl_data)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ memset(ctl_data, 0, sizeof(*ctl_data));
+ ctl_data->card = card_data;
+ ctl_data->elem = ctl;
+ ctl_data->name = snd_ctl_elem_list_get_name(card_data->ctls,
+ ctl);
+
+ err = snd_ctl_elem_id_malloc(&ctl_data->id);
+ if (err < 0)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ err = snd_ctl_elem_info_malloc(&ctl_data->info);
+ if (err < 0)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ err = snd_ctl_elem_value_malloc(&ctl_data->def_val);
+ if (err < 0)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ snd_ctl_elem_list_get_id(card_data->ctls, ctl,
+ ctl_data->id);
+ snd_ctl_elem_info_set_id(ctl_data->info, ctl_data->id);
+ err = snd_ctl_elem_info(card_data->handle,
+ ctl_data->info);
+ if (err < 0) {
+ ksft_print_msg("%s getting info for %s\n",
+ snd_strerror(err),
+ ctl_data->name);
+ }
+
+ snd_ctl_elem_value_set_id(ctl_data->def_val,
+ ctl_data->id);
+
+ ctl_data->next = ctl_list;
+ ctl_list = ctl_data;
+ }
+
+ /* Set up for events */
+ err = snd_ctl_subscribe_events(card_data->handle, true);
+ if (err < 0) {
+ ksft_exit_fail_msg("snd_ctl_subscribe_events() failed for card %d: %d\n",
+ card, err);
+ }
+
+ err = snd_ctl_poll_descriptors_count(card_data->handle);
+ if (err != 1) {
+ ksft_exit_fail_msg("Unexpected descriptor count %d for card %d\n",
+ err, card);
+ }
+
+ err = snd_ctl_poll_descriptors(card_data->handle,
+ &card_data->pollfd, 1);
+ if (err != 1) {
+ ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for card %d: %d\n",
+ card, err);
+ }
+
+ next_card:
+ if (snd_card_next(&card) < 0) {
+ ksft_print_msg("snd_card_next");
+ break;
+ }
+ }
+
+ snd_config_delete(config);
+}
+
+/*
+ * Block for up to timeout ms for an event, returns a negative value
+ * on error, 0 for no event and 1 for an event.
+ */
+static int wait_for_event(struct ctl_data *ctl, int timeout)
+{
+ unsigned short revents;
+ snd_ctl_event_t *event;
+ int err;
+ unsigned int mask = 0;
+ unsigned int ev_id;
+
+ snd_ctl_event_alloca(&event);
+
+ do {
+ err = poll(&(ctl->card->pollfd), 1, timeout);
+ if (err < 0) {
+ ksft_print_msg("poll() failed for %s: %s (%d)\n",
+ ctl->name, strerror(errno), errno);
+ return -1;
+ }
+ /* Timeout */
+ if (err == 0)
+ return 0;
+
+ err = snd_ctl_poll_descriptors_revents(ctl->card->handle,
+ &(ctl->card->pollfd),
+ 1, &revents);
+ if (err < 0) {
+ ksft_print_msg("snd_ctl_poll_descriptors_revents() failed for %s: %d\n",
+ ctl->name, err);
+ return err;
+ }
+ if (revents & POLLERR) {
+ ksft_print_msg("snd_ctl_poll_descriptors_revents() reported POLLERR for %s\n",
+ ctl->name);
+ return -1;
+ }
+ /* No read events */
+ if (!(revents & POLLIN)) {
+ ksft_print_msg("No POLLIN\n");
+ continue;
+ }
+
+ err = snd_ctl_read(ctl->card->handle, event);
+ if (err < 0) {
+ ksft_print_msg("snd_ctl_read() failed for %s: %d\n",
+ ctl->name, err);
+ return err;
+ }
+
+ if (snd_ctl_event_get_type(event) != SND_CTL_EVENT_ELEM)
+ continue;
+
+ /* The ID returned from the event is 1 less than numid */
+ mask = snd_ctl_event_elem_get_mask(event);
+ ev_id = snd_ctl_event_elem_get_numid(event);
+ if (ev_id != snd_ctl_elem_info_get_numid(ctl->info)) {
+ ksft_print_msg("Event for unexpected ctl %s\n",
+ snd_ctl_event_elem_get_name(event));
+ continue;
+ }
+
+ if ((mask & SND_CTL_EVENT_MASK_REMOVE) == SND_CTL_EVENT_MASK_REMOVE) {
+ ksft_print_msg("Removal event for %s\n",
+ ctl->name);
+ return -1;
+ }
+ } while ((mask & SND_CTL_EVENT_MASK_VALUE) != SND_CTL_EVENT_MASK_VALUE);
+
+ return 1;
+}
+
+static bool ctl_value_index_valid(struct ctl_data *ctl,
+ snd_ctl_elem_value_t *val,
+ int index)
+{
+ long int_val;
+ long long int64_val;
+
+ switch (snd_ctl_elem_info_get_type(ctl->info)) {
+ case SND_CTL_ELEM_TYPE_NONE:
+ ksft_print_msg("%s.%d Invalid control type NONE\n",
+ ctl->name, index);
+ return false;
+
+ case SND_CTL_ELEM_TYPE_BOOLEAN:
+ int_val = snd_ctl_elem_value_get_boolean(val, index);
+ switch (int_val) {
+ case 0:
+ case 1:
+ break;
+ default:
+ ksft_print_msg("%s.%d Invalid boolean value %ld\n",
+ ctl->name, index, int_val);
+ return false;
+ }
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER:
+ int_val = snd_ctl_elem_value_get_integer(val, index);
+
+ if (int_val < snd_ctl_elem_info_get_min(ctl->info)) {
+ ksft_print_msg("%s.%d value %ld less than minimum %ld\n",
+ ctl->name, index, int_val,
+ snd_ctl_elem_info_get_min(ctl->info));
+ return false;
+ }
+
+ if (int_val > snd_ctl_elem_info_get_max(ctl->info)) {
+ ksft_print_msg("%s.%d value %ld more than maximum %ld\n",
+ ctl->name, index, int_val,
+ snd_ctl_elem_info_get_max(ctl->info));
+ return false;
+ }
+
+ /* Only check step size if there is one and we're in bounds */
+ if (snd_ctl_elem_info_get_step(ctl->info) &&
+ (int_val - snd_ctl_elem_info_get_min(ctl->info) %
+ snd_ctl_elem_info_get_step(ctl->info))) {
+ ksft_print_msg("%s.%d value %ld invalid for step %ld minimum %ld\n",
+ ctl->name, index, int_val,
+ snd_ctl_elem_info_get_step(ctl->info),
+ snd_ctl_elem_info_get_min(ctl->info));
+ return false;
+ }
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER64:
+ int64_val = snd_ctl_elem_value_get_integer64(val, index);
+
+ if (int64_val < snd_ctl_elem_info_get_min64(ctl->info)) {
+ ksft_print_msg("%s.%d value %lld less than minimum %lld\n",
+ ctl->name, index, int64_val,
+ snd_ctl_elem_info_get_min64(ctl->info));
+ return false;
+ }
+
+ if (int64_val > snd_ctl_elem_info_get_max64(ctl->info)) {
+ ksft_print_msg("%s.%d value %lld more than maximum %ld\n",
+ ctl->name, index, int64_val,
+ snd_ctl_elem_info_get_max(ctl->info));
+ return false;
+ }
+
+ /* Only check step size if there is one and we're in bounds */
+ if (snd_ctl_elem_info_get_step64(ctl->info) &&
+ (int64_val - snd_ctl_elem_info_get_min64(ctl->info)) %
+ snd_ctl_elem_info_get_step64(ctl->info)) {
+ ksft_print_msg("%s.%d value %lld invalid for step %lld minimum %lld\n",
+ ctl->name, index, int64_val,
+ snd_ctl_elem_info_get_step64(ctl->info),
+ snd_ctl_elem_info_get_min64(ctl->info));
+ return false;
+ }
+ break;
+
+ case SND_CTL_ELEM_TYPE_ENUMERATED:
+ int_val = snd_ctl_elem_value_get_enumerated(val, index);
+
+ if (int_val < 0) {
+ ksft_print_msg("%s.%d negative value %ld for enumeration\n",
+ ctl->name, index, int_val);
+ return false;
+ }
+
+ if (int_val >= snd_ctl_elem_info_get_items(ctl->info)) {
+ ksft_print_msg("%s.%d value %ld more than item count %u\n",
+ ctl->name, index, int_val,
+ snd_ctl_elem_info_get_items(ctl->info));
+ return false;
+ }
+ break;
+
+ default:
+ /* No tests for other types */
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * Check that the provided value meets the constraints for the
+ * provided control.
+ */
+static bool ctl_value_valid(struct ctl_data *ctl, snd_ctl_elem_value_t *val)
+{
+ int i;
+ bool valid = true;
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++)
+ if (!ctl_value_index_valid(ctl, val, i))
+ valid = false;
+
+ return valid;
+}
+
+/*
+ * Check that we can read the default value and it is valid. Write
+ * tests use the read value to restore the default.
+ */
+static void test_ctl_get_value(struct ctl_data *ctl)
+{
+ int err;
+
+ /* If the control is turned off let's be polite */
+ if (snd_ctl_elem_info_is_inactive(ctl->info)) {
+ ksft_print_msg("%s is inactive\n", ctl->name);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ /* Can't test reading on an unreadable control */
+ if (!snd_ctl_elem_info_is_readable(ctl->info)) {
+ ksft_print_msg("%s is not readable\n", ctl->name);
+ ksft_test_result_skip("get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ err = snd_ctl_elem_read(ctl->card->handle, ctl->def_val);
+ if (err < 0) {
+ ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
+ snd_strerror(err));
+ goto out;
+ }
+
+ if (!ctl_value_valid(ctl, ctl->def_val))
+ err = -EINVAL;
+
+out:
+ ksft_test_result(err >= 0, "get_value.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static bool strend(const char *haystack, const char *needle)
+{
+ size_t haystack_len = strlen(haystack);
+ size_t needle_len = strlen(needle);
+
+ if (needle_len > haystack_len)
+ return false;
+ return strcmp(haystack + haystack_len - needle_len, needle) == 0;
+}
+
+static void test_ctl_name(struct ctl_data *ctl)
+{
+ bool name_ok = true;
+
+ ksft_print_msg("%s.%d %s\n", ctl->card->card_name, ctl->elem,
+ ctl->name);
+
+ /* Only boolean controls should end in Switch */
+ if (strend(ctl->name, " Switch")) {
+ if (snd_ctl_elem_info_get_type(ctl->info) != SND_CTL_ELEM_TYPE_BOOLEAN) {
+ ksft_print_msg("%d.%d %s ends in Switch but is not boolean\n",
+ ctl->card->card, ctl->elem, ctl->name);
+ name_ok = false;
+ }
+ }
+
+ /* Writeable boolean controls should end in Switch */
+ if (snd_ctl_elem_info_get_type(ctl->info) == SND_CTL_ELEM_TYPE_BOOLEAN &&
+ snd_ctl_elem_info_is_writable(ctl->info)) {
+ if (!strend(ctl->name, " Switch")) {
+ ksft_print_msg("%d.%d %s is a writeable boolean but not a Switch\n",
+ ctl->card->card, ctl->elem, ctl->name);
+ name_ok = false;
+ }
+ }
+
+ ksft_test_result(name_ok, "name.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static void show_values(struct ctl_data *ctl, snd_ctl_elem_value_t *orig_val,
+ snd_ctl_elem_value_t *read_val)
+{
+ long long orig_int, read_int;
+ int i;
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ switch (snd_ctl_elem_info_get_type(ctl->info)) {
+ case SND_CTL_ELEM_TYPE_BOOLEAN:
+ orig_int = snd_ctl_elem_value_get_boolean(orig_val, i);
+ read_int = snd_ctl_elem_value_get_boolean(read_val, i);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER:
+ orig_int = snd_ctl_elem_value_get_integer(orig_val, i);
+ read_int = snd_ctl_elem_value_get_integer(read_val, i);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER64:
+ orig_int = snd_ctl_elem_value_get_integer64(orig_val,
+ i);
+ read_int = snd_ctl_elem_value_get_integer64(read_val,
+ i);
+ break;
+
+ case SND_CTL_ELEM_TYPE_ENUMERATED:
+ orig_int = snd_ctl_elem_value_get_enumerated(orig_val,
+ i);
+ read_int = snd_ctl_elem_value_get_enumerated(read_val,
+ i);
+ break;
+
+ default:
+ return;
+ }
+
+ ksft_print_msg("%s.%d orig %lld read %lld, is_volatile %d\n",
+ ctl->name, i, orig_int, read_int,
+ snd_ctl_elem_info_is_volatile(ctl->info));
+ }
+}
+
+static bool show_mismatch(struct ctl_data *ctl, int index,
+ snd_ctl_elem_value_t *read_val,
+ snd_ctl_elem_value_t *expected_val)
+{
+ long long expected_int, read_int;
+
+ /*
+ * We factor out the code to compare values representable as
+ * integers, ensure that check doesn't log otherwise.
+ */
+ expected_int = 0;
+ read_int = 0;
+
+ switch (snd_ctl_elem_info_get_type(ctl->info)) {
+ case SND_CTL_ELEM_TYPE_BOOLEAN:
+ expected_int = snd_ctl_elem_value_get_boolean(expected_val,
+ index);
+ read_int = snd_ctl_elem_value_get_boolean(read_val, index);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER:
+ expected_int = snd_ctl_elem_value_get_integer(expected_val,
+ index);
+ read_int = snd_ctl_elem_value_get_integer(read_val, index);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER64:
+ expected_int = snd_ctl_elem_value_get_integer64(expected_val,
+ index);
+ read_int = snd_ctl_elem_value_get_integer64(read_val,
+ index);
+ break;
+
+ case SND_CTL_ELEM_TYPE_ENUMERATED:
+ expected_int = snd_ctl_elem_value_get_enumerated(expected_val,
+ index);
+ read_int = snd_ctl_elem_value_get_enumerated(read_val,
+ index);
+ break;
+
+ default:
+ break;
+ }
+
+ if (expected_int != read_int) {
+ /*
+ * NOTE: The volatile attribute means that the hardware
+ * can voluntarily change the state of control element
+ * independent of any operation by software.
+ */
+ bool is_volatile = snd_ctl_elem_info_is_volatile(ctl->info);
+ ksft_print_msg("%s.%d expected %lld but read %lld, is_volatile %d\n",
+ ctl->name, index, expected_int, read_int, is_volatile);
+ return !is_volatile;
+ } else {
+ return false;
+ }
+}
+
+/*
+ * Write a value then if possible verify that we get the expected
+ * result. An optional expected value can be provided if we expect
+ * the write to fail, for verifying that invalid writes don't corrupt
+ * anything.
+ */
+static int write_and_verify(struct ctl_data *ctl,
+ snd_ctl_elem_value_t *write_val,
+ snd_ctl_elem_value_t *expected_val)
+{
+ int err, i;
+ bool error_expected, mismatch_shown;
+ snd_ctl_elem_value_t *initial_val, *read_val, *w_val;
+ snd_ctl_elem_value_alloca(&initial_val);
+ snd_ctl_elem_value_alloca(&read_val);
+ snd_ctl_elem_value_alloca(&w_val);
+
+ /*
+ * We need to copy the write value since writing can modify
+ * the value which causes surprises, and allocate an expected
+ * value if we expect to read back what we wrote.
+ */
+ snd_ctl_elem_value_copy(w_val, write_val);
+ if (expected_val) {
+ error_expected = true;
+ } else {
+ error_expected = false;
+ snd_ctl_elem_value_alloca(&expected_val);
+ snd_ctl_elem_value_copy(expected_val, write_val);
+ }
+
+ /* Store the value before we write */
+ if (snd_ctl_elem_info_is_readable(ctl->info)) {
+ snd_ctl_elem_value_set_id(initial_val, ctl->id);
+
+ err = snd_ctl_elem_read(ctl->card->handle, initial_val);
+ if (err < 0) {
+ ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
+ snd_strerror(err));
+ return err;
+ }
+ }
+
+ /*
+ * Do the write, if we have an expected value ignore the error
+ * and carry on to validate the expected value.
+ */
+ err = snd_ctl_elem_write(ctl->card->handle, w_val);
+ if (err < 0 && !error_expected) {
+ ksft_print_msg("snd_ctl_elem_write() failed: %s\n",
+ snd_strerror(err));
+ return err;
+ }
+
+ /* Can we do the verification part? */
+ if (!snd_ctl_elem_info_is_readable(ctl->info))
+ return err;
+
+ snd_ctl_elem_value_set_id(read_val, ctl->id);
+
+ err = snd_ctl_elem_read(ctl->card->handle, read_val);
+ if (err < 0) {
+ ksft_print_msg("snd_ctl_elem_read() failed: %s\n",
+ snd_strerror(err));
+ return err;
+ }
+
+ /*
+ * We can't verify any specific value for volatile controls
+ * but we should still check that whatever we read is a valid
+ * vale for the control.
+ */
+ if (snd_ctl_elem_info_is_volatile(ctl->info)) {
+ if (!ctl_value_valid(ctl, read_val)) {
+ ksft_print_msg("Volatile control %s has invalid value\n",
+ ctl->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /*
+ * Check for an event if the value changed, or confirm that
+ * there was none if it didn't. We rely on the kernel
+ * generating the notification before it returns from the
+ * write, this is currently true, should that ever change this
+ * will most likely break and need updating.
+ */
+ err = wait_for_event(ctl, 0);
+ if (snd_ctl_elem_value_compare(initial_val, read_val)) {
+ if (err < 1) {
+ ksft_print_msg("No event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_missing++;
+ }
+ } else {
+ if (err != 0) {
+ ksft_print_msg("Spurious event generated for %s\n",
+ ctl->name);
+ show_values(ctl, initial_val, read_val);
+ ctl->event_spurious++;
+ }
+ }
+
+ /*
+ * Use the libray to compare values, if there's a mismatch
+ * carry on and try to provide a more useful diagnostic than
+ * just "mismatch".
+ */
+ if (!snd_ctl_elem_value_compare(expected_val, read_val))
+ return 0;
+
+ mismatch_shown = false;
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++)
+ if (show_mismatch(ctl, i, read_val, expected_val))
+ mismatch_shown = true;
+
+ if (!mismatch_shown)
+ ksft_print_msg("%s read and written values differ\n",
+ ctl->name);
+
+ return -1;
+}
+
+/*
+ * Make sure we can write the default value back to the control, this
+ * should validate that at least some write works.
+ */
+static void test_ctl_write_default(struct ctl_data *ctl)
+{
+ int err;
+
+ /* If the control is turned off let's be polite */
+ if (snd_ctl_elem_info_is_inactive(ctl->info)) {
+ ksft_print_msg("%s is inactive\n", ctl->name);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ if (!snd_ctl_elem_info_is_writable(ctl->info)) {
+ ksft_print_msg("%s is not writeable\n", ctl->name);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ /* No idea what the default was for unreadable controls */
+ if (!snd_ctl_elem_info_is_readable(ctl->info)) {
+ ksft_print_msg("%s couldn't read default\n", ctl->name);
+ ksft_test_result_skip("write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ err = write_and_verify(ctl, ctl->def_val, NULL);
+
+ ksft_test_result(err >= 0, "write_default.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static bool test_ctl_write_valid_boolean(struct ctl_data *ctl)
+{
+ int err, i, j;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ snd_ctl_elem_value_set_id(val, ctl->id);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ for (j = 0; j < 2; j++) {
+ snd_ctl_elem_value_set_boolean(val, i, j);
+ err = write_and_verify(ctl, val, NULL);
+ if (err != 0)
+ fail = true;
+ }
+ }
+
+ return !fail;
+}
+
+static bool test_ctl_write_valid_integer(struct ctl_data *ctl)
+{
+ int err;
+ int i;
+ long j, step;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ snd_ctl_elem_value_set_id(val, ctl->id);
+
+ step = snd_ctl_elem_info_get_step(ctl->info);
+ if (!step)
+ step = 1;
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ for (j = snd_ctl_elem_info_get_min(ctl->info);
+ j <= snd_ctl_elem_info_get_max(ctl->info); j += step) {
+
+ snd_ctl_elem_value_set_integer(val, i, j);
+ err = write_and_verify(ctl, val, NULL);
+ if (err != 0)
+ fail = true;
+ }
+ }
+
+
+ return !fail;
+}
+
+static bool test_ctl_write_valid_integer64(struct ctl_data *ctl)
+{
+ int err, i;
+ long long j, step;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ snd_ctl_elem_value_set_id(val, ctl->id);
+
+ step = snd_ctl_elem_info_get_step64(ctl->info);
+ if (!step)
+ step = 1;
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ for (j = snd_ctl_elem_info_get_min64(ctl->info);
+ j <= snd_ctl_elem_info_get_max64(ctl->info); j += step) {
+
+ snd_ctl_elem_value_set_integer64(val, i, j);
+ err = write_and_verify(ctl, val, NULL);
+ if (err != 0)
+ fail = true;
+ }
+ }
+
+ return !fail;
+}
+
+static bool test_ctl_write_valid_enumerated(struct ctl_data *ctl)
+{
+ int err, i, j;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ snd_ctl_elem_value_set_id(val, ctl->id);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ for (j = 0; j < snd_ctl_elem_info_get_items(ctl->info); j++) {
+ snd_ctl_elem_value_set_enumerated(val, i, j);
+ err = write_and_verify(ctl, val, NULL);
+ if (err != 0)
+ fail = true;
+ }
+ }
+
+ return !fail;
+}
+
+static void test_ctl_write_valid(struct ctl_data *ctl)
+{
+ bool pass;
+
+ /* If the control is turned off let's be polite */
+ if (snd_ctl_elem_info_is_inactive(ctl->info)) {
+ ksft_print_msg("%s is inactive\n", ctl->name);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ if (!snd_ctl_elem_info_is_writable(ctl->info)) {
+ ksft_print_msg("%s is not writeable\n", ctl->name);
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ switch (snd_ctl_elem_info_get_type(ctl->info)) {
+ case SND_CTL_ELEM_TYPE_BOOLEAN:
+ pass = test_ctl_write_valid_boolean(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER:
+ pass = test_ctl_write_valid_integer(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER64:
+ pass = test_ctl_write_valid_integer64(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_ENUMERATED:
+ pass = test_ctl_write_valid_enumerated(ctl);
+ break;
+
+ default:
+ /* No tests for this yet */
+ ksft_test_result_skip("write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ /* Restore the default value to minimise disruption */
+ write_and_verify(ctl, ctl->def_val, NULL);
+
+ ksft_test_result(pass, "write_valid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static bool test_ctl_write_invalid_value(struct ctl_data *ctl,
+ snd_ctl_elem_value_t *val)
+{
+ int err;
+
+ /* Ideally this will fail... */
+ err = snd_ctl_elem_write(ctl->card->handle, val);
+ if (err < 0)
+ return false;
+
+ /* ...but some devices will clamp to an in range value */
+ err = snd_ctl_elem_read(ctl->card->handle, val);
+ if (err < 0) {
+ ksft_print_msg("%s failed to read: %s\n",
+ ctl->name, snd_strerror(err));
+ return true;
+ }
+
+ return !ctl_value_valid(ctl, val);
+}
+
+static bool test_ctl_write_invalid_boolean(struct ctl_data *ctl)
+{
+ int i;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_boolean(val, i, 2);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+ }
+
+ return !fail;
+}
+
+static bool test_ctl_write_invalid_integer(struct ctl_data *ctl)
+{
+ int i;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ if (snd_ctl_elem_info_get_min(ctl->info) != LONG_MIN) {
+ /* Just under range */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer(val, i,
+ snd_ctl_elem_info_get_min(ctl->info) - 1);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ /* Minimum representable value */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer(val, i, LONG_MIN);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+ }
+
+ if (snd_ctl_elem_info_get_max(ctl->info) != LONG_MAX) {
+ /* Just over range */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer(val, i,
+ snd_ctl_elem_info_get_max(ctl->info) + 1);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ /* Maximum representable value */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer(val, i, LONG_MAX);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+ }
+ }
+
+ return !fail;
+}
+
+static bool test_ctl_write_invalid_integer64(struct ctl_data *ctl)
+{
+ int i;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ if (snd_ctl_elem_info_get_min64(ctl->info) != LLONG_MIN) {
+ /* Just under range */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer64(val, i,
+ snd_ctl_elem_info_get_min64(ctl->info) - 1);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ /* Minimum representable value */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer64(val, i, LLONG_MIN);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+ }
+
+ if (snd_ctl_elem_info_get_max64(ctl->info) != LLONG_MAX) {
+ /* Just over range */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer64(val, i,
+ snd_ctl_elem_info_get_max64(ctl->info) + 1);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ /* Maximum representable value */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_integer64(val, i, LLONG_MAX);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+ }
+ }
+
+ return !fail;
+}
+
+static bool test_ctl_write_invalid_enumerated(struct ctl_data *ctl)
+{
+ int i;
+ bool fail = false;
+ snd_ctl_elem_value_t *val;
+ snd_ctl_elem_value_alloca(&val);
+
+ snd_ctl_elem_value_set_id(val, ctl->id);
+
+ for (i = 0; i < snd_ctl_elem_info_get_count(ctl->info); i++) {
+ /* One beyond maximum */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_enumerated(val, i,
+ snd_ctl_elem_info_get_items(ctl->info));
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ /* Maximum representable value */
+ snd_ctl_elem_value_copy(val, ctl->def_val);
+ snd_ctl_elem_value_set_enumerated(val, i, UINT_MAX);
+
+ if (test_ctl_write_invalid_value(ctl, val))
+ fail = true;
+
+ }
+
+ return !fail;
+}
+
+
+static void test_ctl_write_invalid(struct ctl_data *ctl)
+{
+ bool pass;
+
+ /* If the control is turned off let's be polite */
+ if (snd_ctl_elem_info_is_inactive(ctl->info)) {
+ ksft_print_msg("%s is inactive\n", ctl->name);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ if (!snd_ctl_elem_info_is_writable(ctl->info)) {
+ ksft_print_msg("%s is not writeable\n", ctl->name);
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ switch (snd_ctl_elem_info_get_type(ctl->info)) {
+ case SND_CTL_ELEM_TYPE_BOOLEAN:
+ pass = test_ctl_write_invalid_boolean(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER:
+ pass = test_ctl_write_invalid_integer(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_INTEGER64:
+ pass = test_ctl_write_invalid_integer64(ctl);
+ break;
+
+ case SND_CTL_ELEM_TYPE_ENUMERATED:
+ pass = test_ctl_write_invalid_enumerated(ctl);
+ break;
+
+ default:
+ /* No tests for this yet */
+ ksft_test_result_skip("write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+ return;
+ }
+
+ /* Restore the default value to minimise disruption */
+ write_and_verify(ctl, ctl->def_val, NULL);
+
+ ksft_test_result(pass, "write_invalid.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static void test_ctl_event_missing(struct ctl_data *ctl)
+{
+ ksft_test_result(!ctl->event_missing, "event_missing.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+static void test_ctl_event_spurious(struct ctl_data *ctl)
+{
+ ksft_test_result(!ctl->event_spurious, "event_spurious.%s.%d\n",
+ ctl->card->card_name, ctl->elem);
+}
+
+int main(void)
+{
+ struct ctl_data *ctl;
+
+ ksft_print_header();
+
+ find_controls();
+
+ ksft_set_plan(num_controls * TESTS_PER_CONTROL);
+
+ for (ctl = ctl_list; ctl != NULL; ctl = ctl->next) {
+ /*
+ * Must test get_value() before we write anything, the
+ * test stores the default value for later cleanup.
+ */
+ test_ctl_get_value(ctl);
+ test_ctl_name(ctl);
+ test_ctl_write_default(ctl);
+ test_ctl_write_valid(ctl);
+ test_ctl_write_invalid(ctl);
+ test_ctl_event_missing(ctl);
+ test_ctl_event_spurious(ctl);
+ }
+
+ ksft_exit_pass();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/alsa/pcm-test.c b/tools/testing/selftests/alsa/pcm-test.c
new file mode 100644
index 000000000000..ee04ccef7d7c
--- /dev/null
+++ b/tools/testing/selftests/alsa/pcm-test.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// kselftest for the ALSA PCM API
+//
+// Original author: Jaroslav Kysela <perex@perex.cz>
+// Copyright (c) 2022 Red Hat Inc.
+
+// This test will iterate over all cards detected in the system, exercising
+// every PCM device it can find. This may conflict with other system
+// software if there is audio activity so is best run on a system with a
+// minimal active userspace.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <assert.h>
+#include <pthread.h>
+
+#include "kselftest.h"
+#include "alsa-local.h"
+
+typedef struct timespec timestamp_t;
+
+struct card_data {
+ int card;
+ snd_ctl_card_info_t *info;
+ const char *name;
+ pthread_t thread;
+ struct card_data *next;
+};
+
+struct card_data *card_list;
+
+struct pcm_data {
+ snd_pcm_t *handle;
+ int card;
+ int device;
+ int subdevice;
+ const char *card_name;
+ snd_pcm_stream_t stream;
+ snd_config_t *pcm_config;
+ struct pcm_data *next;
+};
+
+struct pcm_data *pcm_list;
+
+int num_missing;
+struct pcm_data *pcm_missing;
+
+snd_config_t *default_pcm_config;
+
+/* Lock while reporting results since kselftest doesn't */
+pthread_mutex_t results_lock = PTHREAD_MUTEX_INITIALIZER;
+
+enum test_class {
+ TEST_CLASS_DEFAULT,
+ TEST_CLASS_SYSTEM,
+};
+
+void timestamp_now(timestamp_t *tstamp)
+{
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, tstamp))
+ ksft_exit_fail_msg("clock_get_time\n");
+}
+
+long long timestamp_diff_ms(timestamp_t *tstamp)
+{
+ timestamp_t now, diff;
+ timestamp_now(&now);
+ if (tstamp->tv_nsec > now.tv_nsec) {
+ diff.tv_sec = now.tv_sec - tstamp->tv_sec - 1;
+ diff.tv_nsec = (now.tv_nsec + 1000000000L) - tstamp->tv_nsec;
+ } else {
+ diff.tv_sec = now.tv_sec - tstamp->tv_sec;
+ diff.tv_nsec = now.tv_nsec - tstamp->tv_nsec;
+ }
+ return (diff.tv_sec * 1000) + ((diff.tv_nsec + 500000L) / 1000000L);
+}
+
+static long device_from_id(snd_config_t *node)
+{
+ const char *id;
+ char *end;
+ long v;
+
+ if (snd_config_get_id(node, &id))
+ ksft_exit_fail_msg("snd_config_get_id\n");
+ errno = 0;
+ v = strtol(id, &end, 10);
+ if (errno || *end)
+ return -1;
+ return v;
+}
+
+static void missing_device(int card, int device, int subdevice, snd_pcm_stream_t stream)
+{
+ struct pcm_data *pcm_data;
+
+ for (pcm_data = pcm_list; pcm_data != NULL; pcm_data = pcm_data->next) {
+ if (pcm_data->card != card)
+ continue;
+ if (pcm_data->device != device)
+ continue;
+ if (pcm_data->subdevice != subdevice)
+ continue;
+ if (pcm_data->stream != stream)
+ continue;
+ return;
+ }
+ pcm_data = calloc(1, sizeof(*pcm_data));
+ if (!pcm_data)
+ ksft_exit_fail_msg("Out of memory\n");
+ pcm_data->card = card;
+ pcm_data->device = device;
+ pcm_data->subdevice = subdevice;
+ pcm_data->stream = stream;
+ pcm_data->next = pcm_missing;
+ pcm_missing = pcm_data;
+ num_missing++;
+}
+
+static void missing_devices(int card, snd_config_t *card_config)
+{
+ snd_config_t *pcm_config, *node1, *node2;
+ snd_config_iterator_t i1, i2, next1, next2;
+ int device, subdevice;
+
+ pcm_config = conf_get_subtree(card_config, "pcm", NULL);
+ if (!pcm_config)
+ return;
+ snd_config_for_each(i1, next1, pcm_config) {
+ node1 = snd_config_iterator_entry(i1);
+ device = device_from_id(node1);
+ if (device < 0)
+ continue;
+ if (snd_config_get_type(node1) != SND_CONFIG_TYPE_COMPOUND)
+ continue;
+ snd_config_for_each(i2, next2, node1) {
+ node2 = snd_config_iterator_entry(i2);
+ subdevice = device_from_id(node2);
+ if (subdevice < 0)
+ continue;
+ if (conf_get_subtree(node2, "PLAYBACK", NULL))
+ missing_device(card, device, subdevice, SND_PCM_STREAM_PLAYBACK);
+ if (conf_get_subtree(node2, "CAPTURE", NULL))
+ missing_device(card, device, subdevice, SND_PCM_STREAM_CAPTURE);
+ }
+ }
+}
+
+static void find_pcms(void)
+{
+ char name[32], key[64];
+ char *card_name, *card_longname;
+ int card, dev, subdev, count, direction, err;
+ snd_pcm_stream_t stream;
+ struct pcm_data *pcm_data;
+ snd_ctl_t *handle;
+ snd_pcm_info_t *pcm_info;
+ snd_config_t *config, *card_config, *pcm_config;
+ struct card_data *card_data;
+
+ snd_pcm_info_alloca(&pcm_info);
+
+ card = -1;
+ if (snd_card_next(&card) < 0 || card < 0)
+ return;
+
+ config = get_alsalib_config();
+
+ while (card >= 0) {
+ card_data = calloc(1, sizeof(*card_data));
+ if (!card_data)
+ ksft_exit_fail_msg("Out of memory\n");
+
+ sprintf(name, "hw:%d", card);
+
+ err = snd_ctl_open_lconf(&handle, name, 0, config);
+ if (err < 0) {
+ ksft_print_msg("Failed to get hctl for card %d: %s\n",
+ card, snd_strerror(err));
+ goto next_card;
+ }
+
+ err = snd_card_get_name(card, &card_name);
+ if (err != 0)
+ card_name = "Unknown";
+ err = snd_card_get_longname(card, &card_longname);
+ if (err != 0)
+ card_longname = "Unknown";
+
+ err = snd_ctl_card_info_malloc(&card_data->info);
+ if (err != 0)
+ ksft_exit_fail_msg("Failed to allocate card info: %d\n",
+ err);
+
+ err = snd_ctl_card_info(handle, card_data->info);
+ if (err == 0) {
+ card_data->name = snd_ctl_card_info_get_id(card_data->info);
+ if (!card_data->name)
+ ksft_print_msg("Failed to get card ID\n");
+ } else {
+ ksft_print_msg("Failed to get card info: %d\n", err);
+ }
+
+ if (!card_data->name)
+ card_data->name = "Unknown";
+
+ ksft_print_msg("Card %d/%s - %s (%s)\n", card,
+ card_data->name, card_name, card_longname);
+
+ card_config = conf_by_card(card);
+
+ card_data->card = card;
+ card_data->next = card_list;
+ card_list = card_data;
+
+ dev = -1;
+ while (1) {
+ if (snd_ctl_pcm_next_device(handle, &dev) < 0)
+ ksft_exit_fail_msg("snd_ctl_pcm_next_device\n");
+ if (dev < 0)
+ break;
+
+ for (direction = 0; direction < 2; direction++) {
+ stream = direction ? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK;
+ sprintf(key, "pcm.%d.%s", dev, snd_pcm_stream_name(stream));
+ pcm_config = conf_get_subtree(card_config, key, NULL);
+ if (conf_get_bool(card_config, key, "skip", false)) {
+ ksft_print_msg("skipping pcm %d.%d.%s\n", card, dev, snd_pcm_stream_name(stream));
+ continue;
+ }
+ snd_pcm_info_set_device(pcm_info, dev);
+ snd_pcm_info_set_subdevice(pcm_info, 0);
+ snd_pcm_info_set_stream(pcm_info, stream);
+ err = snd_ctl_pcm_info(handle, pcm_info);
+ if (err == -ENOENT)
+ continue;
+ if (err < 0)
+ ksft_exit_fail_msg("snd_ctl_pcm_info: %d:%d:%d\n",
+ dev, 0, stream);
+
+ ksft_print_msg("%s.0 - %s\n", card_data->name,
+ snd_pcm_info_get_id(pcm_info));
+
+ count = snd_pcm_info_get_subdevices_count(pcm_info);
+ for (subdev = 0; subdev < count; subdev++) {
+ sprintf(key, "pcm.%d.%d.%s", dev, subdev, snd_pcm_stream_name(stream));
+ if (conf_get_bool(card_config, key, "skip", false)) {
+ ksft_print_msg("skipping pcm %d.%d.%d.%s\n", card, dev,
+ subdev, snd_pcm_stream_name(stream));
+ continue;
+ }
+ pcm_data = calloc(1, sizeof(*pcm_data));
+ if (!pcm_data)
+ ksft_exit_fail_msg("Out of memory\n");
+ pcm_data->card = card;
+ pcm_data->device = dev;
+ pcm_data->subdevice = subdev;
+ pcm_data->card_name = card_data->name;
+ pcm_data->stream = stream;
+ pcm_data->pcm_config = conf_get_subtree(card_config, key, NULL);
+ pcm_data->next = pcm_list;
+ pcm_list = pcm_data;
+ }
+ }
+ }
+
+ /* check for missing devices */
+ missing_devices(card, card_config);
+
+ next_card:
+ snd_ctl_close(handle);
+ if (snd_card_next(&card) < 0) {
+ ksft_print_msg("snd_card_next");
+ break;
+ }
+ }
+
+ snd_config_delete(config);
+}
+
+static void test_pcm_time(struct pcm_data *data, enum test_class class,
+ const char *test_name, snd_config_t *pcm_cfg)
+{
+ char name[64], msg[256];
+ const int duration_s = 2, margin_ms = 100;
+ const int duration_ms = duration_s * 1000;
+ const char *cs;
+ int i, err;
+ snd_pcm_t *handle = NULL;
+ snd_pcm_access_t access = SND_PCM_ACCESS_RW_INTERLEAVED;
+ snd_pcm_format_t format, old_format;
+ const char *alt_formats[8];
+ unsigned char *samples = NULL;
+ snd_pcm_sframes_t frames;
+ long long ms;
+ long rate, channels, period_size, buffer_size;
+ unsigned int rrate;
+ snd_pcm_uframes_t rperiod_size, rbuffer_size, start_threshold;
+ timestamp_t tstamp;
+ bool pass = false;
+ snd_pcm_hw_params_t *hw_params;
+ snd_pcm_sw_params_t *sw_params;
+ const char *test_class_name;
+ bool skip = true;
+ const char *desc;
+
+ switch (class) {
+ case TEST_CLASS_DEFAULT:
+ test_class_name = "default";
+ break;
+ case TEST_CLASS_SYSTEM:
+ test_class_name = "system";
+ break;
+ default:
+ ksft_exit_fail_msg("Unknown test class %d\n", class);
+ break;
+ }
+
+ desc = conf_get_string(pcm_cfg, "description", NULL, NULL);
+ if (desc)
+ ksft_print_msg("%s.%s.%s.%d.%d.%s - %s\n",
+ test_class_name, test_name,
+ data->card_name, data->device, data->subdevice,
+ snd_pcm_stream_name(data->stream),
+ desc);
+
+
+ snd_pcm_hw_params_alloca(&hw_params);
+ snd_pcm_sw_params_alloca(&sw_params);
+
+ cs = conf_get_string(pcm_cfg, "format", NULL, "S16_LE");
+ format = snd_pcm_format_value(cs);
+ if (format == SND_PCM_FORMAT_UNKNOWN)
+ ksft_exit_fail_msg("Wrong format '%s'\n", cs);
+ conf_get_string_array(pcm_cfg, "alt_formats", NULL,
+ alt_formats, ARRAY_SIZE(alt_formats), NULL);
+ rate = conf_get_long(pcm_cfg, "rate", NULL, 48000);
+ channels = conf_get_long(pcm_cfg, "channels", NULL, 2);
+ period_size = conf_get_long(pcm_cfg, "period_size", NULL, 4096);
+ buffer_size = conf_get_long(pcm_cfg, "buffer_size", NULL, 16384);
+
+ samples = malloc((rate * channels * snd_pcm_format_physical_width(format)) / 8);
+ if (!samples)
+ ksft_exit_fail_msg("Out of memory\n");
+ snd_pcm_format_set_silence(format, samples, rate * channels);
+
+ sprintf(name, "hw:%d,%d,%d", data->card, data->device, data->subdevice);
+ err = snd_pcm_open(&handle, name, data->stream, 0);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "Failed to get pcm handle: %s", snd_strerror(err));
+ goto __close;
+ }
+
+ err = snd_pcm_hw_params_any(handle, hw_params);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_any: %s", snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_hw_params_set_rate_resample(handle, hw_params, 0);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_rate_resample: %s", snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_hw_params_set_access(handle, hw_params, access);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_access %s: %s",
+ snd_pcm_access_name(access), snd_strerror(err));
+ goto __close;
+ }
+ i = -1;
+__format:
+ err = snd_pcm_hw_params_set_format(handle, hw_params, format);
+ if (err < 0) {
+ i++;
+ if (i < ARRAY_SIZE(alt_formats) && alt_formats[i]) {
+ old_format = format;
+ format = snd_pcm_format_value(alt_formats[i]);
+ if (format != SND_PCM_FORMAT_UNKNOWN) {
+ ksft_print_msg("%s.%s.%d.%d.%s.%s format %s -> %s\n",
+ test_name,
+ data->card_name, data->device, data->subdevice,
+ snd_pcm_stream_name(data->stream),
+ snd_pcm_access_name(access),
+ snd_pcm_format_name(old_format),
+ snd_pcm_format_name(format));
+ samples = realloc(samples, (rate * channels *
+ snd_pcm_format_physical_width(format)) / 8);
+ if (!samples)
+ ksft_exit_fail_msg("Out of memory\n");
+ snd_pcm_format_set_silence(format, samples, rate * channels);
+ goto __format;
+ }
+ }
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_format %s: %s",
+ snd_pcm_format_name(format), snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_hw_params_set_channels(handle, hw_params, channels);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_channels %ld: %s", channels, snd_strerror(err));
+ goto __close;
+ }
+ rrate = rate;
+ err = snd_pcm_hw_params_set_rate_near(handle, hw_params, &rrate, 0);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_rate %ld: %s", rate, snd_strerror(err));
+ goto __close;
+ }
+ if (rrate != rate) {
+ snprintf(msg, sizeof(msg), "rate mismatch %ld != %u", rate, rrate);
+ goto __close;
+ }
+ rperiod_size = period_size;
+ err = snd_pcm_hw_params_set_period_size_near(handle, hw_params, &rperiod_size, 0);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_period_size %ld: %s", period_size, snd_strerror(err));
+ goto __close;
+ }
+ rbuffer_size = buffer_size;
+ err = snd_pcm_hw_params_set_buffer_size_near(handle, hw_params, &rbuffer_size);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params_set_buffer_size %ld: %s", buffer_size, snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_hw_params(handle, hw_params);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_hw_params: %s", snd_strerror(err));
+ goto __close;
+ }
+
+ err = snd_pcm_sw_params_current(handle, sw_params);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_sw_params_current: %s", snd_strerror(err));
+ goto __close;
+ }
+ if (data->stream == SND_PCM_STREAM_PLAYBACK) {
+ start_threshold = (rbuffer_size / rperiod_size) * rperiod_size;
+ } else {
+ start_threshold = rperiod_size;
+ }
+ err = snd_pcm_sw_params_set_start_threshold(handle, sw_params, start_threshold);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_sw_params_set_start_threshold %ld: %s", (long)start_threshold, snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_sw_params_set_avail_min(handle, sw_params, rperiod_size);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_sw_params_set_avail_min %ld: %s", (long)rperiod_size, snd_strerror(err));
+ goto __close;
+ }
+ err = snd_pcm_sw_params(handle, sw_params);
+ if (err < 0) {
+ snprintf(msg, sizeof(msg), "snd_pcm_sw_params: %s", snd_strerror(err));
+ goto __close;
+ }
+
+ ksft_print_msg("%s.%s.%s.%d.%d.%s hw_params.%s.%s.%ld.%ld.%ld.%ld sw_params.%ld\n",
+ test_class_name, test_name,
+ data->card_name, data->device, data->subdevice,
+ snd_pcm_stream_name(data->stream),
+ snd_pcm_access_name(access),
+ snd_pcm_format_name(format),
+ (long)rate, (long)channels,
+ (long)rperiod_size, (long)rbuffer_size,
+ (long)start_threshold);
+
+ /* Set all the params, actually run the test */
+ skip = false;
+
+ timestamp_now(&tstamp);
+ for (i = 0; i < duration_s; i++) {
+ if (data->stream == SND_PCM_STREAM_PLAYBACK) {
+ frames = snd_pcm_writei(handle, samples, rate);
+ if (frames < 0) {
+ snprintf(msg, sizeof(msg),
+ "Write failed: expected %ld, wrote %li", rate, frames);
+ goto __close;
+ }
+ if (frames < rate) {
+ snprintf(msg, sizeof(msg),
+ "expected %ld, wrote %li", rate, frames);
+ goto __close;
+ }
+ } else {
+ frames = snd_pcm_readi(handle, samples, rate);
+ if (frames < 0) {
+ snprintf(msg, sizeof(msg),
+ "expected %ld, wrote %li", rate, frames);
+ goto __close;
+ }
+ if (frames < rate) {
+ snprintf(msg, sizeof(msg),
+ "expected %ld, wrote %li", rate, frames);
+ goto __close;
+ }
+ }
+ }
+
+ snd_pcm_drain(handle);
+ ms = timestamp_diff_ms(&tstamp);
+ if (ms < duration_ms - margin_ms || ms > duration_ms + margin_ms) {
+ snprintf(msg, sizeof(msg), "time mismatch: expected %dms got %lld", duration_ms, ms);
+ goto __close;
+ }
+
+ msg[0] = '\0';
+ pass = true;
+__close:
+ pthread_mutex_lock(&results_lock);
+
+ switch (class) {
+ case TEST_CLASS_SYSTEM:
+ test_class_name = "system";
+ /*
+ * Anything specified as specific to this system
+ * should always be supported.
+ */
+ ksft_test_result(!skip, "%s.%s.%s.%d.%d.%s.params\n",
+ test_class_name, test_name,
+ data->card_name, data->device,
+ data->subdevice,
+ snd_pcm_stream_name(data->stream));
+ break;
+ default:
+ break;
+ }
+
+ if (!skip)
+ ksft_test_result(pass, "%s.%s.%s.%d.%d.%s\n",
+ test_class_name, test_name,
+ data->card_name, data->device,
+ data->subdevice,
+ snd_pcm_stream_name(data->stream));
+ else
+ ksft_test_result_skip("%s.%s.%s.%d.%d.%s\n",
+ test_class_name, test_name,
+ data->card_name, data->device,
+ data->subdevice,
+ snd_pcm_stream_name(data->stream));
+
+ if (msg[0])
+ ksft_print_msg("%s\n", msg);
+
+ pthread_mutex_unlock(&results_lock);
+
+ free(samples);
+ if (handle)
+ snd_pcm_close(handle);
+}
+
+void run_time_tests(struct pcm_data *pcm, enum test_class class,
+ snd_config_t *cfg)
+{
+ const char *test_name, *test_type;
+ snd_config_t *pcm_cfg;
+ snd_config_iterator_t i, next;
+
+ if (!cfg)
+ return;
+
+ cfg = conf_get_subtree(cfg, "test", NULL);
+ if (cfg == NULL)
+ return;
+
+ snd_config_for_each(i, next, cfg) {
+ pcm_cfg = snd_config_iterator_entry(i);
+ if (snd_config_get_id(pcm_cfg, &test_name) < 0)
+ ksft_exit_fail_msg("snd_config_get_id\n");
+ test_type = conf_get_string(pcm_cfg, "type", NULL, "time");
+ if (strcmp(test_type, "time") == 0)
+ test_pcm_time(pcm, class, test_name, pcm_cfg);
+ else
+ ksft_exit_fail_msg("unknown test type '%s'\n", test_type);
+ }
+}
+
+void *card_thread(void *data)
+{
+ struct card_data *card = data;
+ struct pcm_data *pcm;
+
+ for (pcm = pcm_list; pcm != NULL; pcm = pcm->next) {
+ if (pcm->card != card->card)
+ continue;
+
+ run_time_tests(pcm, TEST_CLASS_DEFAULT, default_pcm_config);
+ run_time_tests(pcm, TEST_CLASS_SYSTEM, pcm->pcm_config);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ struct card_data *card;
+ struct card_cfg_data *conf;
+ struct pcm_data *pcm;
+ snd_config_t *global_config, *cfg;
+ int num_pcm_tests = 0, num_tests, num_std_pcm_tests;
+ int ret;
+ void *thread_ret;
+
+ ksft_print_header();
+
+ global_config = conf_load_from_file("pcm-test.conf");
+ default_pcm_config = conf_get_subtree(global_config, "pcm", NULL);
+ if (default_pcm_config == NULL)
+ ksft_exit_fail_msg("default pcm test configuration (pcm compound) is missing\n");
+
+ conf_load();
+
+ find_pcms();
+
+ for (conf = conf_cards; conf; conf = conf->next)
+ if (conf->card < 0)
+ num_missing++;
+
+ num_std_pcm_tests = conf_get_count(default_pcm_config, "test", NULL);
+
+ for (pcm = pcm_list; pcm != NULL; pcm = pcm->next) {
+ num_pcm_tests += num_std_pcm_tests;
+ cfg = pcm->pcm_config;
+ if (cfg == NULL)
+ continue;
+ /* Setting params is reported as a separate test */
+ num_tests = conf_get_count(cfg, "test", NULL) * 2;
+ if (num_tests > 0)
+ num_pcm_tests += num_tests;
+ }
+
+ ksft_set_plan(num_missing + num_pcm_tests);
+
+ for (conf = conf_cards; conf; conf = conf->next)
+ if (conf->card < 0)
+ ksft_test_result_fail("test.missing.%s.%s\n",
+ conf->filename, conf->config_id);
+
+ for (pcm = pcm_missing; pcm != NULL; pcm = pcm->next) {
+ ksft_test_result(false, "test.missing.%s.%d.%d.%s\n",
+ pcm->card_name, pcm->device, pcm->subdevice,
+ snd_pcm_stream_name(pcm->stream));
+ }
+
+ for (card = card_list; card != NULL; card = card->next) {
+ ret = pthread_create(&card->thread, NULL, card_thread, card);
+ if (ret != 0) {
+ ksft_exit_fail_msg("Failed to create card %d thread: %d (%s)\n",
+ card->card, ret,
+ strerror(errno));
+ }
+ }
+
+ for (card = card_list; card != NULL; card = card->next) {
+ ret = pthread_join(card->thread, &thread_ret);
+ if (ret != 0) {
+ ksft_exit_fail_msg("Failed to join card %d thread: %d (%s)\n",
+ card->card, ret,
+ strerror(errno));
+ }
+ }
+
+ snd_config_delete(global_config);
+ conf_free();
+
+ ksft_exit_pass();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/alsa/pcm-test.conf b/tools/testing/selftests/alsa/pcm-test.conf
new file mode 100644
index 000000000000..71bd3f78a6f2
--- /dev/null
+++ b/tools/testing/selftests/alsa/pcm-test.conf
@@ -0,0 +1,63 @@
+pcm.test.time1 {
+ description "8kHz mono large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 8000
+ channels 1
+ period_size 8000
+ buffer_size 32000
+}
+pcm.test.time2 {
+ description "8kHz stereo large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 8000
+ channels 2
+ period_size 8000
+ buffer_size 32000
+}
+pcm.test.time3 {
+ description "44.1kHz stereo large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 44100
+ channels 2
+ period_size 22500
+ buffer_size 192000
+}
+pcm.test.time4 {
+ description "48kHz stereo small periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 48000
+ channels 2
+ period_size 512
+ buffer_size 4096
+}
+pcm.test.time5 {
+ description "48kHz stereo large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 48000
+ channels 2
+ period_size 24000
+ buffer_size 192000
+}
+pcm.test.time6 {
+ description "48kHz 6 channel large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 48000
+ channels 2
+ period_size 48000
+ buffer_size 576000
+}
+pcm.test.time7 {
+ description "96kHz stereo large periods"
+ format S16_LE
+ alt_formats [ S32_LE ]
+ rate 96000
+ channels 2
+ period_size 48000
+ buffer_size 192000
+}
diff --git a/tools/testing/selftests/alsa/test-pcmtest-driver.c b/tools/testing/selftests/alsa/test-pcmtest-driver.c
new file mode 100644
index 000000000000..95065ef3b441
--- /dev/null
+++ b/tools/testing/selftests/alsa/test-pcmtest-driver.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is the test which covers PCM middle layer data transferring using
+ * the virtual pcm test driver (snd-pcmtest).
+ *
+ * Copyright 2023 Ivan Orlov <ivan.orlov0322@gmail.com>
+ */
+#include <string.h>
+#include <alsa/asoundlib.h>
+#include "kselftest_harness.h"
+
+#define CH_NUM 4
+
+struct pattern_buf {
+ char buf[1024];
+ int len;
+};
+
+struct pattern_buf patterns[CH_NUM];
+
+struct pcmtest_test_params {
+ unsigned long buffer_size;
+ unsigned long period_size;
+ unsigned long channels;
+ unsigned int rate;
+ snd_pcm_access_t access;
+ size_t sec_buf_len;
+ size_t sample_size;
+ int time;
+ snd_pcm_format_t format;
+};
+
+static int read_patterns(void)
+{
+ FILE *fp, *fpl;
+ int i;
+ char pf[64];
+ char plf[64];
+
+ for (i = 0; i < CH_NUM; i++) {
+ sprintf(plf, "/sys/kernel/debug/pcmtest/fill_pattern%d_len", i);
+ fpl = fopen(plf, "r");
+ if (!fpl)
+ return -1;
+ fscanf(fpl, "%u", &patterns[i].len);
+ fclose(fpl);
+
+ sprintf(pf, "/sys/kernel/debug/pcmtest/fill_pattern%d", i);
+ fp = fopen(pf, "r");
+ if (!fp)
+ return -1;
+ fread(patterns[i].buf, 1, patterns[i].len, fp);
+ fclose(fp);
+ }
+
+ return 0;
+}
+
+static int get_test_results(char *debug_name)
+{
+ int result;
+ FILE *f;
+ char fname[128];
+
+ sprintf(fname, "/sys/kernel/debug/pcmtest/%s", debug_name);
+
+ f = fopen(fname, "r");
+ if (!f) {
+ printf("Failed to open file\n");
+ return -1;
+ }
+ fscanf(f, "%d", &result);
+ fclose(f);
+
+ return result;
+}
+
+static size_t get_sec_buf_len(unsigned int rate, unsigned long channels, snd_pcm_format_t format)
+{
+ return rate * channels * snd_pcm_format_physical_width(format) / 8;
+}
+
+static int setup_handle(snd_pcm_t **handle, snd_pcm_sw_params_t *swparams,
+ snd_pcm_hw_params_t *hwparams, struct pcmtest_test_params *params,
+ int card, snd_pcm_stream_t stream)
+{
+ char pcm_name[32];
+ int err;
+
+ sprintf(pcm_name, "hw:%d,0,0", card);
+ err = snd_pcm_open(handle, pcm_name, stream, 0);
+ if (err < 0)
+ return err;
+ snd_pcm_hw_params_any(*handle, hwparams);
+ snd_pcm_hw_params_set_rate_resample(*handle, hwparams, 0);
+ snd_pcm_hw_params_set_access(*handle, hwparams, params->access);
+ snd_pcm_hw_params_set_format(*handle, hwparams, params->format);
+ snd_pcm_hw_params_set_channels(*handle, hwparams, params->channels);
+ snd_pcm_hw_params_set_rate_near(*handle, hwparams, &params->rate, 0);
+ snd_pcm_hw_params_set_period_size_near(*handle, hwparams, &params->period_size, 0);
+ snd_pcm_hw_params_set_buffer_size_near(*handle, hwparams, &params->buffer_size);
+ snd_pcm_hw_params(*handle, hwparams);
+ snd_pcm_sw_params_current(*handle, swparams);
+
+ snd_pcm_hw_params_set_rate_resample(*handle, hwparams, 0);
+ snd_pcm_sw_params_set_avail_min(*handle, swparams, params->period_size);
+ snd_pcm_hw_params_set_buffer_size_near(*handle, hwparams, &params->buffer_size);
+ snd_pcm_hw_params_set_period_size_near(*handle, hwparams, &params->period_size, 0);
+ snd_pcm_sw_params(*handle, swparams);
+ snd_pcm_hw_params(*handle, hwparams);
+
+ return 0;
+}
+
+FIXTURE(pcmtest) {
+ int card;
+ snd_pcm_sw_params_t *swparams;
+ snd_pcm_hw_params_t *hwparams;
+ struct pcmtest_test_params params;
+};
+
+FIXTURE_TEARDOWN(pcmtest) {
+}
+
+FIXTURE_SETUP(pcmtest) {
+ char *card_name;
+ int err;
+
+ if (geteuid())
+ SKIP(return, "This test needs root to run!");
+
+ err = read_patterns();
+ if (err)
+ SKIP(return, "Can't read patterns. Probably, module isn't loaded");
+
+ card_name = malloc(127);
+ ASSERT_NE(card_name, NULL);
+ self->params.buffer_size = 16384;
+ self->params.period_size = 4096;
+ self->params.channels = CH_NUM;
+ self->params.rate = 8000;
+ self->params.access = SND_PCM_ACCESS_RW_INTERLEAVED;
+ self->params.format = SND_PCM_FORMAT_S16_LE;
+ self->card = -1;
+ self->params.sample_size = snd_pcm_format_physical_width(self->params.format) / 8;
+
+ self->params.sec_buf_len = get_sec_buf_len(self->params.rate, self->params.channels,
+ self->params.format);
+ self->params.time = 4;
+
+ while (snd_card_next(&self->card) >= 0) {
+ if (self->card == -1)
+ break;
+ snd_card_get_name(self->card, &card_name);
+ if (!strcmp(card_name, "PCM-Test"))
+ break;
+ }
+ free(card_name);
+ ASSERT_NE(self->card, -1);
+}
+
+/*
+ * Here we are trying to send the looped monotonically increasing sequence of bytes to the driver.
+ * If our data isn't corrupted, the driver will set the content of 'pc_test' debugfs file to '1'
+ */
+TEST_F(pcmtest, playback) {
+ snd_pcm_t *handle;
+ unsigned char *it;
+ size_t write_res;
+ int test_results;
+ int i, cur_ch, pos_in_ch;
+ void *samples;
+ struct pcmtest_test_params *params = &self->params;
+
+ samples = calloc(self->params.sec_buf_len * self->params.time, 1);
+ ASSERT_NE(samples, NULL);
+
+ snd_pcm_sw_params_alloca(&self->swparams);
+ snd_pcm_hw_params_alloca(&self->hwparams);
+
+ ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams, params,
+ self->card, SND_PCM_STREAM_PLAYBACK), 0);
+ snd_pcm_format_set_silence(params->format, samples,
+ params->rate * params->channels * params->time);
+ it = samples;
+ for (i = 0; i < self->params.sec_buf_len * params->time; i++) {
+ cur_ch = (i / params->sample_size) % CH_NUM;
+ pos_in_ch = i / params->sample_size / CH_NUM * params->sample_size
+ + (i % params->sample_size);
+ it[i] = patterns[cur_ch].buf[pos_in_ch % patterns[cur_ch].len];
+ }
+ write_res = snd_pcm_writei(handle, samples, params->rate * params->time);
+ ASSERT_GE(write_res, 0);
+
+ snd_pcm_close(handle);
+ free(samples);
+ test_results = get_test_results("pc_test");
+ ASSERT_EQ(test_results, 1);
+}
+
+/*
+ * Here we test that the virtual alsa driver returns looped and monotonically increasing sequence
+ * of bytes. In the interleaved mode the buffer will contain samples in the following order:
+ * C0, C1, C2, C3, C0, C1, ...
+ */
+TEST_F(pcmtest, capture) {
+ snd_pcm_t *handle;
+ unsigned char *it;
+ size_t read_res;
+ int i, cur_ch, pos_in_ch;
+ void *samples;
+ struct pcmtest_test_params *params = &self->params;
+
+ samples = calloc(self->params.sec_buf_len * self->params.time, 1);
+ ASSERT_NE(samples, NULL);
+
+ snd_pcm_sw_params_alloca(&self->swparams);
+ snd_pcm_hw_params_alloca(&self->hwparams);
+
+ ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
+ params, self->card, SND_PCM_STREAM_CAPTURE), 0);
+ snd_pcm_format_set_silence(params->format, samples,
+ params->rate * params->channels * params->time);
+ read_res = snd_pcm_readi(handle, samples, params->rate * params->time);
+ ASSERT_GE(read_res, 0);
+ snd_pcm_close(handle);
+ it = (unsigned char *)samples;
+ for (i = 0; i < self->params.sec_buf_len * self->params.time; i++) {
+ cur_ch = (i / params->sample_size) % CH_NUM;
+ pos_in_ch = i / params->sample_size / CH_NUM * params->sample_size
+ + (i % params->sample_size);
+ ASSERT_EQ(it[i], patterns[cur_ch].buf[pos_in_ch % patterns[cur_ch].len]);
+ }
+ free(samples);
+}
+
+// Test capture in the non-interleaved access mode. The are buffers for each recorded channel
+TEST_F(pcmtest, ni_capture) {
+ snd_pcm_t *handle;
+ struct pcmtest_test_params params = self->params;
+ char **chan_samples;
+ size_t i, j, read_res;
+
+ chan_samples = calloc(CH_NUM, sizeof(*chan_samples));
+ ASSERT_NE(chan_samples, NULL);
+
+ snd_pcm_sw_params_alloca(&self->swparams);
+ snd_pcm_hw_params_alloca(&self->hwparams);
+
+ params.access = SND_PCM_ACCESS_RW_NONINTERLEAVED;
+
+ ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
+ &params, self->card, SND_PCM_STREAM_CAPTURE), 0);
+
+ for (i = 0; i < CH_NUM; i++)
+ chan_samples[i] = calloc(params.sec_buf_len * params.time, 1);
+
+ for (i = 0; i < 1; i++) {
+ read_res = snd_pcm_readn(handle, (void **)chan_samples, params.rate * params.time);
+ ASSERT_GE(read_res, 0);
+ }
+ snd_pcm_close(handle);
+
+ for (i = 0; i < CH_NUM; i++) {
+ for (j = 0; j < params.rate * params.time; j++)
+ ASSERT_EQ(chan_samples[i][j], patterns[i].buf[j % patterns[i].len]);
+ free(chan_samples[i]);
+ }
+ free(chan_samples);
+}
+
+TEST_F(pcmtest, ni_playback) {
+ snd_pcm_t *handle;
+ struct pcmtest_test_params params = self->params;
+ char **chan_samples;
+ size_t i, j, read_res;
+ int test_res;
+
+ chan_samples = calloc(CH_NUM, sizeof(*chan_samples));
+ ASSERT_NE(chan_samples, NULL);
+
+ snd_pcm_sw_params_alloca(&self->swparams);
+ snd_pcm_hw_params_alloca(&self->hwparams);
+
+ params.access = SND_PCM_ACCESS_RW_NONINTERLEAVED;
+
+ ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams,
+ &params, self->card, SND_PCM_STREAM_PLAYBACK), 0);
+
+ for (i = 0; i < CH_NUM; i++) {
+ chan_samples[i] = calloc(params.sec_buf_len * params.time, 1);
+ for (j = 0; j < params.sec_buf_len * params.time; j++)
+ chan_samples[i][j] = patterns[i].buf[j % patterns[i].len];
+ }
+
+ for (i = 0; i < 1; i++) {
+ read_res = snd_pcm_writen(handle, (void **)chan_samples, params.rate * params.time);
+ ASSERT_GE(read_res, 0);
+ }
+
+ snd_pcm_close(handle);
+ test_res = get_test_results("pc_test");
+ ASSERT_EQ(test_res, 1);
+
+ for (i = 0; i < CH_NUM; i++)
+ free(chan_samples[i]);
+ free(chan_samples);
+}
+
+/*
+ * Here we are testing the custom ioctl definition inside the virtual driver. If it triggers
+ * successfully, the driver sets the content of 'ioctl_test' debugfs file to '1'.
+ */
+TEST_F(pcmtest, reset_ioctl) {
+ snd_pcm_t *handle;
+ int test_res;
+ struct pcmtest_test_params *params = &self->params;
+
+ snd_pcm_sw_params_alloca(&self->swparams);
+ snd_pcm_hw_params_alloca(&self->hwparams);
+
+ ASSERT_EQ(setup_handle(&handle, self->swparams, self->hwparams, params,
+ self->card, SND_PCM_STREAM_CAPTURE), 0);
+ snd_pcm_reset(handle);
+ test_res = get_test_results("ioctl_test");
+ ASSERT_EQ(test_res, 1);
+ snd_pcm_close(handle);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/alsa/utimer-test.c b/tools/testing/selftests/alsa/utimer-test.c
new file mode 100644
index 000000000000..c45cb226bd8f
--- /dev/null
+++ b/tools/testing/selftests/alsa/utimer-test.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This test covers the functionality of userspace-driven ALSA timers. Such timers
+ * are purely virtual (so they don't directly depend on the hardware), and they could be
+ * created and triggered by userspace applications.
+ *
+ * Author: Ivan Orlov <ivan.orlov0322@gmail.com>
+ */
+#include "kselftest_harness.h"
+#include <sound/asound.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <string.h>
+
+#define FRAME_RATE 8000
+#define PERIOD_SIZE 4410
+#define UTIMER_DEFAULT_ID -1
+#define UTIMER_DEFAULT_FD -1
+#define NANO 1000000000ULL
+#define TICKS_COUNT 10
+#define TICKS_RECORDING_DELTA 5
+#define TIMER_OUTPUT_BUF_LEN 1024
+#define TIMER_FREQ_SEC 1
+#define RESULT_PREFIX_LEN strlen("Total ticks count: ")
+
+enum timer_app_event {
+ TIMER_APP_STARTED,
+ TIMER_APP_RESULT,
+ TIMER_NO_EVENT,
+};
+
+FIXTURE(timer_f) {
+ struct snd_timer_uinfo *utimer_info;
+};
+
+FIXTURE_SETUP(timer_f) {
+ int timer_dev_fd;
+
+ if (geteuid())
+ SKIP(return, "This test needs root to run!");
+
+ self->utimer_info = calloc(1, sizeof(*self->utimer_info));
+ ASSERT_NE(NULL, self->utimer_info);
+
+ /* Resolution is the time the period of frames takes in nanoseconds */
+ self->utimer_info->resolution = (NANO / FRAME_RATE * PERIOD_SIZE);
+
+ timer_dev_fd = open("/dev/snd/timer", O_RDONLY);
+ ASSERT_GE(timer_dev_fd, 0);
+
+ ASSERT_EQ(ioctl(timer_dev_fd, SNDRV_TIMER_IOCTL_CREATE, self->utimer_info), 0);
+ ASSERT_GE(self->utimer_info->fd, 0);
+
+ close(timer_dev_fd);
+}
+
+FIXTURE_TEARDOWN(timer_f) {
+ close(self->utimer_info->fd);
+ free(self->utimer_info);
+}
+
+static void *ticking_func(void *data)
+{
+ int i;
+ int *fd = (int *)data;
+
+ for (i = 0; i < TICKS_COUNT; i++) {
+ /* Well, trigger the timer! */
+ ioctl(*fd, SNDRV_TIMER_IOCTL_TRIGGER, NULL);
+ sleep(TIMER_FREQ_SEC);
+ }
+
+ return NULL;
+}
+
+static enum timer_app_event parse_timer_output(const char *s)
+{
+ if (strstr(s, "Timer has started"))
+ return TIMER_APP_STARTED;
+ if (strstr(s, "Total ticks count"))
+ return TIMER_APP_RESULT;
+
+ return TIMER_NO_EVENT;
+}
+
+static int parse_timer_result(const char *s)
+{
+ char *end;
+ long d;
+
+ d = strtol(s + RESULT_PREFIX_LEN, &end, 10);
+ if (end == s + RESULT_PREFIX_LEN)
+ return -1;
+
+ return d;
+}
+
+/*
+ * This test triggers the timer and counts ticks at the same time. The amount
+ * of the timer trigger calls should be equal to the amount of ticks received.
+ */
+TEST_F(timer_f, utimer) {
+ char command[64];
+ pthread_t ticking_thread;
+ int total_ticks = 0;
+ FILE *rfp;
+ char *buf = malloc(TIMER_OUTPUT_BUF_LEN);
+
+ ASSERT_NE(buf, NULL);
+
+ /* The timeout should be the ticks interval * count of ticks + some delta */
+ sprintf(command, "./global-timer %d %d %d", SNDRV_TIMER_GLOBAL_UDRIVEN,
+ self->utimer_info->id, TICKS_COUNT * TIMER_FREQ_SEC + TICKS_RECORDING_DELTA);
+
+ rfp = popen(command, "r");
+ while (fgets(buf, TIMER_OUTPUT_BUF_LEN, rfp)) {
+ buf[TIMER_OUTPUT_BUF_LEN - 1] = 0;
+ switch (parse_timer_output(buf)) {
+ case TIMER_APP_STARTED:
+ /* global-timer waits for timer to trigger, so start the ticking thread */
+ pthread_create(&ticking_thread, NULL, ticking_func,
+ &self->utimer_info->fd);
+ break;
+ case TIMER_APP_RESULT:
+ total_ticks = parse_timer_result(buf);
+ break;
+ case TIMER_NO_EVENT:
+ break;
+ }
+ }
+ pthread_join(ticking_thread, NULL);
+ ASSERT_EQ(total_ticks, TICKS_COUNT);
+ pclose(rfp);
+ free(buf);
+}
+
+TEST(wrong_timers_test) {
+ int timer_dev_fd;
+ int utimer_fd;
+ size_t i;
+ struct snd_timer_uinfo wrong_timer = {
+ .resolution = 0,
+ .id = UTIMER_DEFAULT_ID,
+ .fd = UTIMER_DEFAULT_FD,
+ };
+
+ timer_dev_fd = open("/dev/snd/timer", O_RDONLY);
+ ASSERT_GE(timer_dev_fd, 0);
+
+ utimer_fd = ioctl(timer_dev_fd, SNDRV_TIMER_IOCTL_CREATE, &wrong_timer);
+ ASSERT_LT(utimer_fd, 0);
+ /* Check that id was not updated */
+ ASSERT_EQ(wrong_timer.id, UTIMER_DEFAULT_ID);
+
+ /* Test the NULL as an argument is processed correctly */
+ ASSERT_LT(ioctl(timer_dev_fd, SNDRV_TIMER_IOCTL_CREATE, NULL), 0);
+
+ close(timer_dev_fd);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/amd-pstate/Makefile b/tools/testing/selftests/amd-pstate/Makefile
new file mode 100644
index 000000000000..c382f579fe94
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for amd-pstate/ function selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq (x86,$(ARCH))
+TEST_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
+TEST_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+endif
+
+TEST_PROGS += run.sh
+TEST_FILES += basic.sh tbench.sh gitsource.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/amd-pstate/basic.sh b/tools/testing/selftests/amd-pstate/basic.sh
new file mode 100755
index 000000000000..e4c43193e4a3
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/basic.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# amd-pstate-ut is a test module for testing the amd-pstate driver.
+# It can only run on x86 architectures and current cpufreq driver
+# must be amd-pstate.
+# (1) It can help all users to verify their processor support
+# (SBIOS/Firmware or Hardware).
+# (2) Kernel can have a basic function test to avoid the kernel
+# regression during the update.
+# (3) We can introduce more functional or performance tests to align
+# the result together, it will benefit power and performance scale optimization.
+
+# protect against multiple inclusion
+if [ $FILE_BASIC ]; then
+ return 0
+else
+ FILE_BASIC=DONE
+fi
+
+amd_pstate_basic()
+{
+ printf "\n---------------------------------------------\n"
+ printf "*** Running AMD P-state ut ***"
+ printf "\n---------------------------------------------\n"
+
+ if ! /sbin/modprobe -q -n amd-pstate-ut; then
+ echo "amd-pstate-ut: module amd-pstate-ut is not found [SKIP]"
+ exit $ksft_skip
+ fi
+ if /sbin/modprobe -q amd-pstate-ut; then
+ /sbin/modprobe -q -r amd-pstate-ut
+ echo "amd-pstate-basic: ok"
+ else
+ echo "amd-pstate-basic: [FAIL]"
+ exit 1
+ fi
+}
diff --git a/tools/testing/selftests/amd-pstate/config b/tools/testing/selftests/amd-pstate/config
new file mode 100644
index 000000000000..f43103c9adc4
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/config
@@ -0,0 +1 @@
+CONFIG_X86_AMD_PSTATE_UT=m
diff --git a/tools/testing/selftests/amd-pstate/gitsource.sh b/tools/testing/selftests/amd-pstate/gitsource.sh
new file mode 100755
index 000000000000..4cde62f90468
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/gitsource.sh
@@ -0,0 +1,359 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Testing and monitor the cpu desire performance, frequency, load,
+# power consumption and throughput etc. when this script trigger
+# gitsource test.
+# 1) Download and tar gitsource codes.
+# 2) Run gitsource benchmark on specific governors, ondemand or schedutil.
+# 3) Run tbench benchmark comparative test on acpi-cpufreq kernel driver.
+# 4) Get desire performance, frequency, load by perf.
+# 5) Get power consumption and throughput by amd_pstate_trace.py.
+# 6) Get run time by /usr/bin/time.
+# 7) Analyse test results and save it in file selftest.gitsource.csv.
+#8) Plot png images about time, energy and performance per watt for each test.
+
+# protect against multiple inclusion
+if [ $FILE_GITSOURCE ]; then
+ return 0
+else
+ FILE_GITSOURCE=DONE
+fi
+
+git_name="git-2.15.1"
+git_tar="$git_name.tar.gz"
+gitsource_url="https://github.com/git/git/archive/refs/tags/v2.15.1.tar.gz"
+gitsource_governors=("ondemand" "schedutil")
+
+# $1: governor, $2: round, $3: des-perf, $4: freq, $5: load, $6: time $7: energy, $8: PPW
+store_csv_gitsource()
+{
+ echo "$1, $2, $3, $4, $5, $6, $7, $8" | tee -a $OUTFILE_GIT.csv > /dev/null 2>&1
+}
+
+# clear some special lines
+clear_csv_gitsource()
+{
+ if [ -f $OUTFILE_GIT.csv ]; then
+ sed -i '/Comprison(%)/d' $OUTFILE_GIT.csv
+ sed -i "/$(scaling_name)/d" $OUTFILE_GIT.csv
+ fi
+}
+
+# find string $1 in file csv and get the number of lines
+get_lines_csv_gitsource()
+{
+ if [ -f $OUTFILE_GIT.csv ]; then
+ return `grep -c "$1" $OUTFILE_GIT.csv`
+ else
+ return 0
+ fi
+}
+
+pre_clear_gitsource()
+{
+ post_clear_gitsource
+ rm -rf gitsource_*.png
+ clear_csv_gitsource
+}
+
+post_clear_gitsource()
+{
+ rm -rf results/tracer-gitsource*
+ rm -rf $OUTFILE_GIT*.log
+ rm -rf $OUTFILE_GIT*.result
+}
+
+install_gitsource()
+{
+ if [ ! -d $SCRIPTDIR/$git_name ]; then
+ pushd $(pwd) > /dev/null 2>&1
+ cd $SCRIPTDIR
+ printf "Download gitsource, please wait a moment ...\n\n"
+ wget -O $git_tar $gitsource_url > /dev/null 2>&1
+
+ printf "Tar gitsource ...\n\n"
+ tar -xzf $git_tar
+ popd > /dev/null 2>&1
+ fi
+}
+
+# $1: governor, $2: loop
+run_gitsource()
+{
+ echo "Launching amd pstate tracer for $1 #$2 tracer_interval: $TRACER_INTERVAL"
+ $TRACER -n tracer-gitsource-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
+
+ printf "Make and test gitsource for $1 #$2 make_cpus: $MAKE_CPUS\n"
+ BACKUP_DIR=$(pwd)
+ pushd $BACKUP_DIR > /dev/null 2>&1
+ cd $SCRIPTDIR/$git_name
+ $PERF stat -a --per-socket -I 1000 -e power/energy-pkg/ /usr/bin/time -o $BACKUP_DIR/$OUTFILE_GIT.time-gitsource-$1-$2.log make test -j$MAKE_CPUS > $BACKUP_DIR/$OUTFILE_GIT-perf-$1-$2.log 2>&1
+ popd > /dev/null 2>&1
+
+ for job in `jobs -p`
+ do
+ echo "Waiting for job id $job"
+ wait $job
+ done
+}
+
+# $1: governor, $2: loop
+parse_gitsource()
+{
+ awk '{print $5}' results/tracer-gitsource-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_GIT-des-perf-$1-$2.log
+ avg_des_perf=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_GIT-des-perf-$1-$2.log)
+ printf "Gitsource-$1-#$2 avg des perf: $avg_des_perf\n" | tee -a $OUTFILE_GIT.result
+
+ awk '{print $7}' results/tracer-gitsource-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_GIT-freq-$1-$2.log
+ avg_freq=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_GIT-freq-$1-$2.log)
+ printf "Gitsource-$1-#$2 avg freq: $avg_freq\n" | tee -a $OUTFILE_GIT.result
+
+ awk '{print $11}' results/tracer-gitsource-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_GIT-load-$1-$2.log
+ avg_load=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_GIT-load-$1-$2.log)
+ printf "Gitsource-$1-#$2 avg load: $avg_load\n" | tee -a $OUTFILE_GIT.result
+
+ grep user $OUTFILE_GIT.time-gitsource-$1-$2.log | awk '{print $1}' | sed -e 's/user//' > $OUTFILE_GIT-time-$1-$2.log
+ time_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_GIT-time-$1-$2.log)
+ printf "Gitsource-$1-#$2 user time(s): $time_sum\n" | tee -a $OUTFILE_GIT.result
+
+ grep Joules $OUTFILE_GIT-perf-$1-$2.log | awk '{print $4}' > $OUTFILE_GIT-energy-$1-$2.log
+ en_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_GIT-energy-$1-$2.log)
+ printf "Gitsource-$1-#$2 power consumption(J): $en_sum\n" | tee -a $OUTFILE_GIT.result
+
+ # Permance is the number of run gitsource per second, denoted 1/t, where 1 is the number of run gitsource in t
+ # seconds. It is well known that P=E/t, where P is power measured in watts(W), E is energy measured in joules(J),
+ # and t is time measured in seconds(s). This means that performance per watt becomes
+ # 1/t 1/t 1
+ # ----- = ----- = ---
+ # P E/t E
+ # with unit given by 1 per joule.
+ ppw=`echo "scale=9;1/$en_sum" | bc | awk '{printf "%.9f", $0}'`
+ printf "Gitsource-$1-#$2 performance per watt(1/J): $ppw\n" | tee -a $OUTFILE_GIT.result
+ printf "\n" | tee -a $OUTFILE_GIT.result
+
+ driver_name=`echo $(scaling_name)`
+ store_csv_gitsource "$driver_name-$1" $2 $avg_des_perf $avg_freq $avg_load $time_sum $en_sum $ppw
+}
+
+# $1: governor
+loop_gitsource()
+{
+ printf "\nGitsource total test times is $LOOP_TIMES for $1\n\n"
+ for i in `seq 1 $LOOP_TIMES`
+ do
+ run_gitsource $1 $i
+ parse_gitsource $1 $i
+ done
+}
+
+# $1: governor
+gather_gitsource()
+{
+ printf "Gitsource test result for $1 (loops:$LOOP_TIMES)" | tee -a $OUTFILE_GIT.result
+ printf "\n--------------------------------------------------\n" | tee -a $OUTFILE_GIT.result
+
+ grep "Gitsource-$1-#" $OUTFILE_GIT.result | grep "avg des perf:" | awk '{print $NF}' > $OUTFILE_GIT-des-perf-$1.log
+ avg_des_perf=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_GIT-des-perf-$1.log)
+ printf "Gitsource-$1 avg des perf: $avg_des_perf\n" | tee -a $OUTFILE_GIT.result
+
+ grep "Gitsource-$1-#" $OUTFILE_GIT.result | grep "avg freq:" | awk '{print $NF}' > $OUTFILE_GIT-freq-$1.log
+ avg_freq=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_GIT-freq-$1.log)
+ printf "Gitsource-$1 avg freq: $avg_freq\n" | tee -a $OUTFILE_GIT.result
+
+ grep "Gitsource-$1-#" $OUTFILE_GIT.result | grep "avg load:" | awk '{print $NF}' > $OUTFILE_GIT-load-$1.log
+ avg_load=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_GIT-load-$1.log)
+ printf "Gitsource-$1 avg load: $avg_load\n" | tee -a $OUTFILE_GIT.result
+
+ grep "Gitsource-$1-#" $OUTFILE_GIT.result | grep "user time(s):" | awk '{print $NF}' > $OUTFILE_GIT-time-$1.log
+ time_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_GIT-time-$1.log)
+ printf "Gitsource-$1 total user time(s): $time_sum\n" | tee -a $OUTFILE_GIT.result
+
+ avg_time=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_GIT-time-$1.log)
+ printf "Gitsource-$1 avg user times(s): $avg_time\n" | tee -a $OUTFILE_GIT.result
+
+ grep "Gitsource-$1-#" $OUTFILE_GIT.result | grep "power consumption(J):" | awk '{print $NF}' > $OUTFILE_GIT-energy-$1.log
+ en_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_GIT-energy-$1.log)
+ printf "Gitsource-$1 total power consumption(J): $en_sum\n" | tee -a $OUTFILE_GIT.result
+
+ avg_en=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_GIT-energy-$1.log)
+ printf "Gitsource-$1 avg power consumption(J): $avg_en\n" | tee -a $OUTFILE_GIT.result
+
+ # Permance is the number of run gitsource per second, denoted 1/t, where 1 is the number of run gitsource in t
+ # seconds. It is well known that P=E/t, where P is power measured in watts(W), E is energy measured in joules(J),
+ # and t is time measured in seconds(s). This means that performance per watt becomes
+ # 1/t 1/t 1
+ # ----- = ----- = ---
+ # P E/t E
+ # with unit given by 1 per joule.
+ ppw=`echo "scale=9;1/$avg_en" | bc | awk '{printf "%.9f", $0}'`
+ printf "Gitsource-$1 performance per watt(1/J): $ppw\n" | tee -a $OUTFILE_GIT.result
+ printf "\n" | tee -a $OUTFILE_GIT.result
+
+ driver_name=`echo $(scaling_name)`
+ store_csv_gitsource "$driver_name-$1" "Average" $avg_des_perf $avg_freq $avg_load $avg_time $avg_en $ppw
+}
+
+# $1: base scaling_driver $2: base governor $3: comparison scaling_driver $4: comparison governor
+__calc_comp_gitsource()
+{
+ base=`grep "$1-$2" $OUTFILE_GIT.csv | grep "Average"`
+ comp=`grep "$3-$4" $OUTFILE_GIT.csv | grep "Average"`
+
+ if [ -n "$base" -a -n "$comp" ]; then
+ printf "\n==================================================\n" | tee -a $OUTFILE_GIT.result
+ printf "Gitsource comparison $1-$2 VS $3-$4" | tee -a $OUTFILE_GIT.result
+ printf "\n==================================================\n" | tee -a $OUTFILE_GIT.result
+
+ # get the base values
+ des_perf_base=`echo "$base" | awk '{print $3}' | sed s/,//`
+ freq_base=`echo "$base" | awk '{print $4}' | sed s/,//`
+ load_base=`echo "$base" | awk '{print $5}' | sed s/,//`
+ time_base=`echo "$base" | awk '{print $6}' | sed s/,//`
+ energy_base=`echo "$base" | awk '{print $7}' | sed s/,//`
+ ppw_base=`echo "$base" | awk '{print $8}' | sed s/,//`
+
+ # get the comparison values
+ des_perf_comp=`echo "$comp" | awk '{print $3}' | sed s/,//`
+ freq_comp=`echo "$comp" | awk '{print $4}' | sed s/,//`
+ load_comp=`echo "$comp" | awk '{print $5}' | sed s/,//`
+ time_comp=`echo "$comp" | awk '{print $6}' | sed s/,//`
+ energy_comp=`echo "$comp" | awk '{print $7}' | sed s/,//`
+ ppw_comp=`echo "$comp" | awk '{print $8}' | sed s/,//`
+
+ # compare the base and comp values
+ des_perf_drop=`echo "scale=4;($des_perf_comp-$des_perf_base)*100/$des_perf_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 des perf base: $des_perf_base comprison: $des_perf_comp percent: $des_perf_drop\n" | tee -a $OUTFILE_GIT.result
+
+ freq_drop=`echo "scale=4;($freq_comp-$freq_base)*100/$freq_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 freq base: $freq_base comprison: $freq_comp percent: $freq_drop\n" | tee -a $OUTFILE_GIT.result
+
+ load_drop=`echo "scale=4;($load_comp-$load_base)*100/$load_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 load base: $load_base comprison: $load_comp percent: $load_drop\n" | tee -a $OUTFILE_GIT.result
+
+ time_drop=`echo "scale=4;($time_comp-$time_base)*100/$time_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 time base: $time_base comprison: $time_comp percent: $time_drop\n" | tee -a $OUTFILE_GIT.result
+
+ energy_drop=`echo "scale=4;($energy_comp-$energy_base)*100/$energy_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 energy base: $energy_base comprison: $energy_comp percent: $energy_drop\n" | tee -a $OUTFILE_GIT.result
+
+ ppw_drop=`echo "scale=4;($ppw_comp-$ppw_base)*100/$ppw_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Gitsource-$1 performance per watt base: $ppw_base comprison: $ppw_comp percent: $ppw_drop\n" | tee -a $OUTFILE_GIT.result
+ printf "\n" | tee -a $OUTFILE_GIT.result
+
+ store_csv_gitsource "$1-$2 VS $3-$4" "Comprison(%)" "$des_perf_drop" "$freq_drop" "$load_drop" "$time_drop" "$energy_drop" "$ppw_drop"
+ fi
+}
+
+# calculate the comparison(%)
+calc_comp_gitsource()
+{
+ # acpi-cpufreq-ondemand VS acpi-cpufreq-schedutil
+ __calc_comp_gitsource ${all_scaling_names[0]} ${gitsource_governors[0]} ${all_scaling_names[0]} ${gitsource_governors[1]}
+
+ # amd-pstate-ondemand VS amd-pstate-schedutil
+ __calc_comp_gitsource ${all_scaling_names[1]} ${gitsource_governors[0]} ${all_scaling_names[1]} ${gitsource_governors[1]}
+
+ # acpi-cpufreq-ondemand VS amd-pstate-ondemand
+ __calc_comp_gitsource ${all_scaling_names[0]} ${gitsource_governors[0]} ${all_scaling_names[1]} ${gitsource_governors[0]}
+
+ # acpi-cpufreq-schedutil VS amd-pstate-schedutil
+ __calc_comp_gitsource ${all_scaling_names[0]} ${gitsource_governors[1]} ${all_scaling_names[1]} ${gitsource_governors[1]}
+}
+
+# $1: file_name, $2: title, $3: ylable, $4: column
+plot_png_gitsource()
+{
+ # all_scaling_names[1] all_scaling_names[0] flag
+ # amd-pstate acpi-cpufreq
+ # N N 0
+ # N Y 1
+ # Y N 2
+ # Y Y 3
+ ret=`grep -c "${all_scaling_names[1]}" $OUTFILE_GIT.csv`
+ if [ $ret -eq 0 ]; then
+ ret=`grep -c "${all_scaling_names[0]}" $OUTFILE_GIT.csv`
+ if [ $ret -eq 0 ]; then
+ flag=0
+ else
+ flag=1
+ fi
+ else
+ ret=`grep -c "${all_scaling_names[0]}" $OUTFILE_GIT.csv`
+ if [ $ret -eq 0 ]; then
+ flag=2
+ else
+ flag=3
+ fi
+ fi
+
+ gnuplot << EOF
+ set term png
+ set output "$1"
+
+ set title "$2"
+ set xlabel "Test Cycles (round)"
+ set ylabel "$3"
+
+ set grid
+ set style data histogram
+ set style fill solid 0.5 border
+ set boxwidth 0.8
+
+ if ($flag == 1) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${gitsource_governors[0]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${gitsource_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${gitsource_governors[1]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${gitsource_governors[1]}"
+ } else {
+ if ($flag == 2) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${gitsource_governors[0]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${gitsource_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${gitsource_governors[1]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${gitsource_governors[1]}"
+ } else {
+ if ($flag == 3 ) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${gitsource_governors[0]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${gitsource_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${gitsource_governors[1]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${gitsource_governors[1]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${gitsource_governors[0]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${gitsource_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${gitsource_governors[1]}/p' $OUTFILE_GIT.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${gitsource_governors[1]}"
+ }
+ }
+ }
+ quit
+EOF
+}
+
+amd_pstate_gitsource()
+{
+ printf "\n---------------------------------------------\n"
+ printf "*** Running gitsource ***"
+ printf "\n---------------------------------------------\n"
+
+ pre_clear_gitsource
+
+ install_gitsource
+
+ get_lines_csv_gitsource "Governor"
+ if [ $? -eq 0 ]; then
+ # add titles and unit for csv file
+ store_csv_gitsource "Governor" "Round" "Des-perf" "Freq" "Load" "Time" "Energy" "Performance Per Watt"
+ store_csv_gitsource "Unit" "" "" "GHz" "" "s" "J" "1/J"
+ fi
+
+ backup_governor
+ for governor in ${gitsource_governors[*]} ; do
+ printf "\nSpecified governor is $governor\n\n"
+ switch_governor $governor
+ loop_gitsource $governor
+ gather_gitsource $governor
+ done
+ restore_governor
+
+ plot_png_gitsource "gitsource_time.png" "Gitsource Benchmark Time" "Time (s)" 6
+ plot_png_gitsource "gitsource_energy.png" "Gitsource Benchmark Energy" "Energy (J)" 7
+ plot_png_gitsource "gitsource_ppw.png" "Gitsource Benchmark Performance Per Watt" "Performance Per Watt (1/J)" 8
+
+ calc_comp_gitsource
+
+ post_clear_gitsource
+}
diff --git a/tools/testing/selftests/amd-pstate/run.sh b/tools/testing/selftests/amd-pstate/run.sh
new file mode 100755
index 000000000000..b053eea8bb19
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/run.sh
@@ -0,0 +1,396 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# protect against multiple inclusion
+if [ $FILE_MAIN ]; then
+ return 0
+else
+ FILE_MAIN=DONE
+fi
+
+SCRIPTDIR=`dirname "$0"`
+TRACER=$SCRIPTDIR/../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
+
+source $SCRIPTDIR/basic.sh
+source $SCRIPTDIR/tbench.sh
+source $SCRIPTDIR/gitsource.sh
+
+# amd-pstate-ut only run on x86/x86_64 AMD systems.
+ARCH=$(uname -m 2>/dev/null | sed -e 's/i.86/x86/' -e 's/x86_64/x86/')
+VENDOR=$(cat /proc/cpuinfo | grep -m 1 'vendor_id' | awk '{print $NF}')
+
+msg="Skip all tests:"
+FUNC=all
+OUTFILE=selftest
+OUTFILE_TBENCH="$OUTFILE.tbench"
+OUTFILE_GIT="$OUTFILE.gitsource"
+
+PERF=/usr/bin/perf
+SYSFS=
+CPUROOT=
+CPUFREQROOT=
+MAKE_CPUS=
+
+TIME_LIMIT=100
+PROCESS_NUM=128
+LOOP_TIMES=3
+TRACER_INTERVAL=10
+CURRENT_TEST=amd-pstate
+COMPARATIVE_TEST=
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+all_scaling_names=("acpi-cpufreq" "amd-pstate")
+
+# Get current cpufreq scaling driver name
+scaling_name()
+{
+ if [ "$COMPARATIVE_TEST" = "" ]; then
+ echo "$CURRENT_TEST"
+ else
+ echo "$COMPARATIVE_TEST"
+ fi
+}
+
+# Counts CPUs with cpufreq directories
+count_cpus()
+{
+ count=0;
+
+ for cpu in `ls $CPUROOT | grep "cpu[0-9].*"`; do
+ if [ -d $CPUROOT/$cpu/cpufreq ]; then
+ let count=count+1;
+ fi
+ done
+
+ echo $count;
+}
+
+# $1: policy
+find_current_governor()
+{
+ cat $CPUFREQROOT/$1/scaling_governor
+}
+
+backup_governor()
+{
+ policies=$(ls $CPUFREQROOT| grep "policy[0-9].*")
+ for policy in $policies; do
+ cur_gov=$(find_current_governor $policy)
+ echo "$policy $cur_gov" >> $OUTFILE.backup_governor.log
+ done
+
+ printf "Governor $cur_gov backup done.\n"
+}
+
+restore_governor()
+{
+ i=0;
+
+ policies=$(awk '{print $1}' $OUTFILE.backup_governor.log)
+ for policy in $policies; do
+ let i++;
+ governor=$(sed -n ''$i'p' $OUTFILE.backup_governor.log | awk '{print $2}')
+
+ # switch governor
+ echo $governor > $CPUFREQROOT/$policy/scaling_governor
+ done
+
+ printf "Governor restored to $governor.\n"
+}
+
+# $1: governor
+switch_governor()
+{
+ policies=$(ls $CPUFREQROOT| grep "policy[0-9].*")
+ for policy in $policies; do
+ filepath=$CPUFREQROOT/$policy/scaling_available_governors
+
+ # Exit if cpu isn't managed by cpufreq core
+ if [ ! -f $filepath ]; then
+ return;
+ fi
+
+ echo $1 > $CPUFREQROOT/$policy/scaling_governor
+ done
+
+ printf "Switched governor to $1.\n"
+}
+
+# All amd-pstate tests
+amd_pstate_all()
+{
+ printf "\n=============================================\n"
+ printf "***** Running AMD P-state Sanity Tests *****\n"
+ printf "=============================================\n\n"
+
+ count=$(count_cpus)
+ if [ $count = 0 ]; then
+ printf "No cpu is managed by cpufreq core, exiting\n"
+ exit;
+ else
+ printf "AMD P-state manages: $count CPUs\n"
+ fi
+
+ # unit test for amd-pstate kernel driver
+ amd_pstate_basic
+
+ # tbench
+ amd_pstate_tbench
+
+ # gitsource
+ amd_pstate_gitsource
+}
+
+help()
+{
+ printf "Usage: $0 [OPTION...]
+ [-h <help>]
+ [-o <output-file-for-dump>]
+ [-c <all: All testing,
+ basic: Basic testing,
+ tbench: Tbench testing,
+ gitsource: Gitsource testing.>]
+ [-t <tbench time limit>]
+ [-p <tbench process number>]
+ [-l <loop times for tbench>]
+ [-i <amd tracer interval>]
+ [-b <perf binary>]
+ [-m <comparative test: acpi-cpufreq>]
+ \n"
+ exit 2
+}
+
+parse_arguments()
+{
+ while getopts ho:c:t:p:l:i:b:m: arg
+ do
+ case $arg in
+ h) # --help
+ help
+ ;;
+
+ c) # --func_type (Function to perform: basic, tbench, gitsource (default: all))
+ FUNC=$OPTARG
+ ;;
+
+ o) # --output-file (Output file to store dumps)
+ OUTFILE=$OPTARG
+ ;;
+
+ t) # --tbench-time-limit
+ TIME_LIMIT=$OPTARG
+ ;;
+
+ p) # --tbench-process-number
+ PROCESS_NUM=$OPTARG
+ ;;
+
+ l) # --tbench/gitsource-loop-times
+ LOOP_TIMES=$OPTARG
+ ;;
+
+ i) # --amd-tracer-interval
+ TRACER_INTERVAL=$OPTARG
+ ;;
+
+ b) # --perf-binary
+ PERF=`realpath $OPTARG`
+ ;;
+
+ m) # --comparative-test
+ COMPARATIVE_TEST=$OPTARG
+ ;;
+
+ *)
+ help
+ ;;
+ esac
+ done
+}
+
+command_perf()
+{
+ if ! $PERF -v; then
+ echo $msg please install perf or provide perf binary path as argument >&2
+ exit $ksft_skip
+ fi
+}
+
+command_tbench()
+{
+ if ! command -v tbench > /dev/null; then
+ if apt policy dbench > /dev/null 2>&1; then
+ echo $msg apt install dbench >&2
+ exit $ksft_skip
+ elif yum list available | grep dbench > /dev/null 2>&1; then
+ echo $msg yum install dbench >&2
+ exit $ksft_skip
+ fi
+ fi
+
+ if ! command -v tbench > /dev/null; then
+ echo $msg please install tbench. >&2
+ exit $ksft_skip
+ fi
+}
+
+prerequisite()
+{
+ if ! echo "$ARCH" | grep -q x86; then
+ echo "$0 # Skipped: Test can only run on x86 architectures."
+ exit $ksft_skip
+ fi
+
+ if ! echo "$VENDOR" | grep -iq amd; then
+ echo "$0 # Skipped: Test can only run on AMD CPU."
+ echo "$0 # Current cpu vendor is $VENDOR."
+ exit $ksft_skip
+ fi
+
+ scaling_driver=$(cat /sys/devices/system/cpu/cpufreq/policy0/scaling_driver)
+ if [ "$COMPARATIVE_TEST" = "" ]; then
+ if [ "$scaling_driver" != "$CURRENT_TEST" ]; then
+ echo "$0 # Skipped: Test can only run on $CURRENT_TEST driver or run comparative test."
+ echo "$0 # Please set X86_AMD_PSTATE enabled or run comparative test."
+ echo "$0 # Current cpufreq scaling driver is $scaling_driver."
+ exit $ksft_skip
+ fi
+ else
+ case "$FUNC" in
+ "tbench" | "gitsource")
+ if [ "$scaling_driver" != "$COMPARATIVE_TEST" ]; then
+ echo "$0 # Skipped: Comparison test can only run on $COMPARISON_TEST driver."
+ echo "$0 # Current cpufreq scaling driver is $scaling_driver."
+ exit $ksft_skip
+ fi
+ ;;
+
+ *)
+ echo "$0 # Skipped: Comparison test are only for tbench or gitsource."
+ echo "$0 # Current comparative test is for $FUNC."
+ exit $ksft_skip
+ ;;
+ esac
+ fi
+
+ if [ ! -w /dev ]; then
+ echo $msg please run this as root >&2
+ exit $ksft_skip
+ fi
+
+ case "$FUNC" in
+ "all")
+ command_perf
+ command_tbench
+ ;;
+
+ "tbench")
+ command_perf
+ command_tbench
+ ;;
+
+ "gitsource")
+ command_perf
+ ;;
+ esac
+
+ SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
+
+ if [ ! -d "$SYSFS" ]; then
+ echo $msg sysfs is not mounted >&2
+ exit 2
+ fi
+
+ CPUROOT=$SYSFS/devices/system/cpu
+ CPUFREQROOT="$CPUROOT/cpufreq"
+
+ if ! ls $CPUROOT/cpu* > /dev/null 2>&1; then
+ echo $msg cpus not available in sysfs >&2
+ exit 2
+ fi
+
+ if ! ls $CPUROOT/cpufreq > /dev/null 2>&1; then
+ echo $msg cpufreq directory not available in sysfs >&2
+ exit 2
+ fi
+}
+
+do_test()
+{
+ # Check if CPUs are managed by cpufreq or not
+ count=$(count_cpus)
+ MAKE_CPUS=$((count*2))
+
+ if [ $count = 0 ]; then
+ echo "No cpu is managed by cpufreq core, exiting"
+ exit 2;
+ fi
+
+ case "$FUNC" in
+ "all")
+ amd_pstate_all
+ ;;
+
+ "basic")
+ amd_pstate_basic
+ ;;
+
+ "tbench")
+ amd_pstate_tbench
+ ;;
+
+ "gitsource")
+ amd_pstate_gitsource
+ ;;
+
+ *)
+ echo "Invalid [-f] function type"
+ help
+ ;;
+ esac
+}
+
+# clear dumps
+pre_clear_dumps()
+{
+ case "$FUNC" in
+ "all")
+ rm -rf $OUTFILE.log
+ rm -rf $OUTFILE.backup_governor.log
+ rm -rf *.png
+ ;;
+
+ "tbench")
+ rm -rf $OUTFILE.log
+ rm -rf $OUTFILE.backup_governor.log
+ rm -rf tbench_*.png
+ ;;
+
+ "gitsource")
+ rm -rf $OUTFILE.log
+ rm -rf $OUTFILE.backup_governor.log
+ rm -rf gitsource_*.png
+ ;;
+
+ *)
+ ;;
+ esac
+}
+
+post_clear_dumps()
+{
+ rm -rf $OUTFILE.log
+ rm -rf $OUTFILE.backup_governor.log
+}
+
+# Parse arguments
+parse_arguments $@
+
+# Make sure all requirements are met
+prerequisite
+
+# Run requested functions
+pre_clear_dumps
+do_test | tee -a $OUTFILE.log
+post_clear_dumps
diff --git a/tools/testing/selftests/amd-pstate/tbench.sh b/tools/testing/selftests/amd-pstate/tbench.sh
new file mode 100755
index 000000000000..2a98d9c9202e
--- /dev/null
+++ b/tools/testing/selftests/amd-pstate/tbench.sh
@@ -0,0 +1,339 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Testing and monitor the cpu desire performance, frequency, load,
+# power consumption and throughput etc.when this script trigger tbench
+# test cases.
+# 1) Run tbench benchmark on specific governors, ondemand or schedutil.
+# 2) Run tbench benchmark comparative test on acpi-cpufreq kernel driver.
+# 3) Get desire performance, frequency, load by perf.
+# 4) Get power consumption and throughput by amd_pstate_trace.py.
+# 5) Analyse test results and save it in file selftest.tbench.csv.
+# 6) Plot png images about performance, energy and performance per watt for each test.
+
+# protect against multiple inclusion
+if [ $FILE_TBENCH ]; then
+ return 0
+else
+ FILE_TBENCH=DONE
+fi
+
+tbench_governors=("ondemand" "schedutil")
+
+# $1: governor, $2: round, $3: des-perf, $4: freq, $5: load, $6: performance, $7: energy, $8: performance per watt
+store_csv_tbench()
+{
+ echo "$1, $2, $3, $4, $5, $6, $7, $8" | tee -a $OUTFILE_TBENCH.csv > /dev/null 2>&1
+}
+
+# clear some special lines
+clear_csv_tbench()
+{
+ if [ -f $OUTFILE_TBENCH.csv ]; then
+ sed -i '/Comprison(%)/d' $OUTFILE_TBENCH.csv
+ sed -i "/$(scaling_name)/d" $OUTFILE_TBENCH.csv
+ fi
+}
+
+# find string $1 in file csv and get the number of lines
+get_lines_csv_tbench()
+{
+ if [ -f $OUTFILE_TBENCH.csv ]; then
+ return `grep -c "$1" $OUTFILE_TBENCH.csv`
+ else
+ return 0
+ fi
+}
+
+pre_clear_tbench()
+{
+ post_clear_tbench
+ rm -rf tbench_*.png
+ clear_csv_tbench
+}
+
+post_clear_tbench()
+{
+ rm -rf results/tracer-tbench*
+ rm -rf $OUTFILE_TBENCH*.log
+ rm -rf $OUTFILE_TBENCH*.result
+
+}
+
+# $1: governor, $2: loop
+run_tbench()
+{
+ echo "Launching amd pstate tracer for $1 #$2 tracer_interval: $TRACER_INTERVAL"
+ $TRACER -n tracer-tbench-$1-$2 -i $TRACER_INTERVAL > /dev/null 2>&1 &
+
+ printf "Test tbench for $1 #$2 time_limit: $TIME_LIMIT procs_num: $PROCESS_NUM\n"
+ tbench_srv > /dev/null 2>&1 &
+ $PERF stat -a --per-socket -I 1000 -e power/energy-pkg/ tbench -t $TIME_LIMIT $PROCESS_NUM > $OUTFILE_TBENCH-perf-$1-$2.log 2>&1
+
+ pid=`pidof tbench_srv`
+ kill $pid
+
+ for job in `jobs -p`
+ do
+ echo "Waiting for job id $job"
+ wait $job
+ done
+}
+
+# $1: governor, $2: loop
+parse_tbench()
+{
+ awk '{print $5}' results/tracer-tbench-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_TBENCH-des-perf-$1-$2.log
+ avg_des_perf=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_TBENCH-des-perf-$1-$2.log)
+ printf "Tbench-$1-#$2 avg des perf: $avg_des_perf\n" | tee -a $OUTFILE_TBENCH.result
+
+ awk '{print $7}' results/tracer-tbench-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_TBENCH-freq-$1-$2.log
+ avg_freq=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_TBENCH-freq-$1-$2.log)
+ printf "Tbench-$1-#$2 avg freq: $avg_freq\n" | tee -a $OUTFILE_TBENCH.result
+
+ awk '{print $11}' results/tracer-tbench-$1-$2/cpu.csv | sed -e '1d' | sed s/,// > $OUTFILE_TBENCH-load-$1-$2.log
+ avg_load=$(awk 'BEGIN {i=0; sum=0};{i++; sum += $1};END {print sum/i}' $OUTFILE_TBENCH-load-$1-$2.log)
+ printf "Tbench-$1-#$2 avg load: $avg_load\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep Throughput $OUTFILE_TBENCH-perf-$1-$2.log | awk '{print $2}' > $OUTFILE_TBENCH-throughput-$1-$2.log
+ tp_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-throughput-$1-$2.log)
+ printf "Tbench-$1-#$2 throughput(MB/s): $tp_sum\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep Joules $OUTFILE_TBENCH-perf-$1-$2.log | awk '{print $4}' > $OUTFILE_TBENCH-energy-$1-$2.log
+ en_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-energy-$1-$2.log)
+ printf "Tbench-$1-#$2 power consumption(J): $en_sum\n" | tee -a $OUTFILE_TBENCH.result
+
+ # Permance is throughput per second, denoted T/t, where T is throught rendered in t seconds.
+ # It is well known that P=E/t, where P is power measured in watts(W), E is energy measured in joules(J),
+ # and t is time measured in seconds(s). This means that performance per watt becomes
+ # T/t T/t T
+ # --- = --- = ---
+ # P E/t E
+ # with unit given by MB per joule.
+ ppw=`echo "scale=4;($TIME_LIMIT-1)*$tp_sum/$en_sum" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1-#$2 performance per watt(MB/J): $ppw\n" | tee -a $OUTFILE_TBENCH.result
+ printf "\n" | tee -a $OUTFILE_TBENCH.result
+
+ driver_name=`echo $(scaling_name)`
+ store_csv_tbench "$driver_name-$1" $2 $avg_des_perf $avg_freq $avg_load $tp_sum $en_sum $ppw
+}
+
+# $1: governor
+loop_tbench()
+{
+ printf "\nTbench total test times is $LOOP_TIMES for $1\n\n"
+ for i in `seq 1 $LOOP_TIMES`
+ do
+ run_tbench $1 $i
+ parse_tbench $1 $i
+ done
+}
+
+# $1: governor
+gather_tbench()
+{
+ printf "Tbench test result for $1 (loops:$LOOP_TIMES)" | tee -a $OUTFILE_TBENCH.result
+ printf "\n--------------------------------------------------\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep "Tbench-$1-#" $OUTFILE_TBENCH.result | grep "avg des perf:" | awk '{print $NF}' > $OUTFILE_TBENCH-des-perf-$1.log
+ avg_des_perf=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-des-perf-$1.log)
+ printf "Tbench-$1 avg des perf: $avg_des_perf\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep "Tbench-$1-#" $OUTFILE_TBENCH.result | grep "avg freq:" | awk '{print $NF}' > $OUTFILE_TBENCH-freq-$1.log
+ avg_freq=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-freq-$1.log)
+ printf "Tbench-$1 avg freq: $avg_freq\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep "Tbench-$1-#" $OUTFILE_TBENCH.result | grep "avg load:" | awk '{print $NF}' > $OUTFILE_TBENCH-load-$1.log
+ avg_load=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-load-$1.log)
+ printf "Tbench-$1 avg load: $avg_load\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep "Tbench-$1-#" $OUTFILE_TBENCH.result | grep "throughput(MB/s):" | awk '{print $NF}' > $OUTFILE_TBENCH-throughput-$1.log
+ tp_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-throughput-$1.log)
+ printf "Tbench-$1 total throughput(MB/s): $tp_sum\n" | tee -a $OUTFILE_TBENCH.result
+
+ avg_tp=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-throughput-$1.log)
+ printf "Tbench-$1 avg throughput(MB/s): $avg_tp\n" | tee -a $OUTFILE_TBENCH.result
+
+ grep "Tbench-$1-#" $OUTFILE_TBENCH.result | grep "power consumption(J):" | awk '{print $NF}' > $OUTFILE_TBENCH-energy-$1.log
+ en_sum=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum}' $OUTFILE_TBENCH-energy-$1.log)
+ printf "Tbench-$1 total power consumption(J): $en_sum\n" | tee -a $OUTFILE_TBENCH.result
+
+ avg_en=$(awk 'BEGIN {sum=0};{sum += $1};END {print sum/'$LOOP_TIMES'}' $OUTFILE_TBENCH-energy-$1.log)
+ printf "Tbench-$1 avg power consumption(J): $avg_en\n" | tee -a $OUTFILE_TBENCH.result
+
+ # Permance is throughput per second, denoted T/t, where T is throught rendered in t seconds.
+ # It is well known that P=E/t, where P is power measured in watts(W), E is energy measured in joules(J),
+ # and t is time measured in seconds(s). This means that performance per watt becomes
+ # T/t T/t T
+ # --- = --- = ---
+ # P E/t E
+ # with unit given by MB per joule.
+ ppw=`echo "scale=4;($TIME_LIMIT-1)*$avg_tp/$avg_en" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 performance per watt(MB/J): $ppw\n" | tee -a $OUTFILE_TBENCH.result
+ printf "\n" | tee -a $OUTFILE_TBENCH.result
+
+ driver_name=`echo $(scaling_name)`
+ store_csv_tbench "$driver_name-$1" "Average" $avg_des_perf $avg_freq $avg_load $avg_tp $avg_en $ppw
+}
+
+# $1: base scaling_driver $2: base governor $3: comparative scaling_driver $4: comparative governor
+__calc_comp_tbench()
+{
+ base=`grep "$1-$2" $OUTFILE_TBENCH.csv | grep "Average"`
+ comp=`grep "$3-$4" $OUTFILE_TBENCH.csv | grep "Average"`
+
+ if [ -n "$base" -a -n "$comp" ]; then
+ printf "\n==================================================\n" | tee -a $OUTFILE_TBENCH.result
+ printf "Tbench comparison $1-$2 VS $3-$4" | tee -a $OUTFILE_TBENCH.result
+ printf "\n==================================================\n" | tee -a $OUTFILE_TBENCH.result
+
+ # get the base values
+ des_perf_base=`echo "$base" | awk '{print $3}' | sed s/,//`
+ freq_base=`echo "$base" | awk '{print $4}' | sed s/,//`
+ load_base=`echo "$base" | awk '{print $5}' | sed s/,//`
+ perf_base=`echo "$base" | awk '{print $6}' | sed s/,//`
+ energy_base=`echo "$base" | awk '{print $7}' | sed s/,//`
+ ppw_base=`echo "$base" | awk '{print $8}' | sed s/,//`
+
+ # get the comparative values
+ des_perf_comp=`echo "$comp" | awk '{print $3}' | sed s/,//`
+ freq_comp=`echo "$comp" | awk '{print $4}' | sed s/,//`
+ load_comp=`echo "$comp" | awk '{print $5}' | sed s/,//`
+ perf_comp=`echo "$comp" | awk '{print $6}' | sed s/,//`
+ energy_comp=`echo "$comp" | awk '{print $7}' | sed s/,//`
+ ppw_comp=`echo "$comp" | awk '{print $8}' | sed s/,//`
+
+ # compare the base and comp values
+ des_perf_drop=`echo "scale=4;($des_perf_comp-$des_perf_base)*100/$des_perf_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 des perf base: $des_perf_base comprison: $des_perf_comp percent: $des_perf_drop\n" | tee -a $OUTFILE_TBENCH.result
+
+ freq_drop=`echo "scale=4;($freq_comp-$freq_base)*100/$freq_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 freq base: $freq_base comprison: $freq_comp percent: $freq_drop\n" | tee -a $OUTFILE_TBENCH.result
+
+ load_drop=`echo "scale=4;($load_comp-$load_base)*100/$load_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 load base: $load_base comprison: $load_comp percent: $load_drop\n" | tee -a $OUTFILE_TBENCH.result
+
+ perf_drop=`echo "scale=4;($perf_comp-$perf_base)*100/$perf_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 perf base: $perf_base comprison: $perf_comp percent: $perf_drop\n" | tee -a $OUTFILE_TBENCH.result
+
+ energy_drop=`echo "scale=4;($energy_comp-$energy_base)*100/$energy_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 energy base: $energy_base comprison: $energy_comp percent: $energy_drop\n" | tee -a $OUTFILE_TBENCH.result
+
+ ppw_drop=`echo "scale=4;($ppw_comp-$ppw_base)*100/$ppw_base" | bc | awk '{printf "%.4f", $0}'`
+ printf "Tbench-$1 performance per watt base: $ppw_base comprison: $ppw_comp percent: $ppw_drop\n" | tee -a $OUTFILE_TBENCH.result
+ printf "\n" | tee -a $OUTFILE_TBENCH.result
+
+ store_csv_tbench "$1-$2 VS $3-$4" "Comprison(%)" "$des_perf_drop" "$freq_drop" "$load_drop" "$perf_drop" "$energy_drop" "$ppw_drop"
+ fi
+}
+
+# calculate the comparison(%)
+calc_comp_tbench()
+{
+ # acpi-cpufreq-ondemand VS acpi-cpufreq-schedutil
+ __calc_comp_tbench ${all_scaling_names[0]} ${tbench_governors[0]} ${all_scaling_names[0]} ${tbench_governors[1]}
+
+ # amd-pstate-ondemand VS amd-pstate-schedutil
+ __calc_comp_tbench ${all_scaling_names[1]} ${tbench_governors[0]} ${all_scaling_names[1]} ${tbench_governors[1]}
+
+ # acpi-cpufreq-ondemand VS amd-pstate-ondemand
+ __calc_comp_tbench ${all_scaling_names[0]} ${tbench_governors[0]} ${all_scaling_names[1]} ${tbench_governors[0]}
+
+ # acpi-cpufreq-schedutil VS amd-pstate-schedutil
+ __calc_comp_tbench ${all_scaling_names[0]} ${tbench_governors[1]} ${all_scaling_names[1]} ${tbench_governors[1]}
+}
+
+# $1: file_name, $2: title, $3: ylable, $4: column
+plot_png_tbench()
+{
+ # all_scaling_names[1] all_scaling_names[0] flag
+ # amd-pstate acpi-cpufreq
+ # N N 0
+ # N Y 1
+ # Y N 2
+ # Y Y 3
+ ret=`grep -c "${all_scaling_names[1]}" $OUTFILE_TBENCH.csv`
+ if [ $ret -eq 0 ]; then
+ ret=`grep -c "${all_scaling_names[0]}" $OUTFILE_TBENCH.csv`
+ if [ $ret -eq 0 ]; then
+ flag=0
+ else
+ flag=1
+ fi
+ else
+ ret=`grep -c "${all_scaling_names[0]}" $OUTFILE_TBENCH.csv`
+ if [ $ret -eq 0 ]; then
+ flag=2
+ else
+ flag=3
+ fi
+ fi
+
+ gnuplot << EOF
+ set term png
+ set output "$1"
+
+ set title "$2"
+ set xlabel "Test Cycles (round)"
+ set ylabel "$3"
+
+ set grid
+ set style data histogram
+ set style fill solid 0.5 border
+ set boxwidth 0.8
+
+ if ($flag == 1) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${tbench_governors[0]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${tbench_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${tbench_governors[1]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${tbench_governors[1]}"
+ } else {
+ if ($flag == 2) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${tbench_governors[0]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${tbench_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${tbench_governors[1]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${tbench_governors[1]}"
+ } else {
+ if ($flag == 3 ) {
+ plot \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${tbench_governors[0]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${tbench_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[0]}-${tbench_governors[1]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[0]}-${tbench_governors[1]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${tbench_governors[0]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${tbench_governors[0]}", \
+ "<(sed -n -e 's/,//g' -e '/${all_scaling_names[1]}-${tbench_governors[1]}/p' $OUTFILE_TBENCH.csv)" using $4:xtic(2) title "${all_scaling_names[1]}-${tbench_governors[1]}"
+ }
+ }
+ }
+ quit
+EOF
+}
+
+amd_pstate_tbench()
+{
+ printf "\n---------------------------------------------\n"
+ printf "*** Running tbench ***"
+ printf "\n---------------------------------------------\n"
+
+ pre_clear_tbench
+
+ get_lines_csv_tbench "Governor"
+ if [ $? -eq 0 ]; then
+ # add titles and unit for csv file
+ store_csv_tbench "Governor" "Round" "Des-perf" "Freq" "Load" "Performance" "Energy" "Performance Per Watt"
+ store_csv_tbench "Unit" "" "" "GHz" "" "MB/s" "J" "MB/J"
+ fi
+
+ backup_governor
+ for governor in ${tbench_governors[*]} ; do
+ printf "\nSpecified governor is $governor\n\n"
+ switch_governor $governor
+ loop_tbench $governor
+ gather_tbench $governor
+ done
+ restore_governor
+
+ plot_png_tbench "tbench_perfromance.png" "Tbench Benchmark Performance" "Performance" 6
+ plot_png_tbench "tbench_energy.png" "Tbench Benchmark Energy" "Energy (J)" 7
+ plot_png_tbench "tbench_ppw.png" "Tbench Benchmark Performance Per Watt" "Performance Per Watt (MB/J)" 8
+
+ calc_comp_tbench
+
+ post_clear_tbench
+}
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
index 2c9d012797a7..c4c72ee2ef55 100644
--- a/tools/testing/selftests/arm64/Makefile
+++ b/tools/testing/selftests/arm64/Makefile
@@ -4,7 +4,7 @@
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),aarch64 arm64))
-ARM64_SUBTARGETS ?= tags signal pauth fp mte
+ARM64_SUBTARGETS ?= tags signal pauth fp mte bti abi gcs
else
ARM64_SUBTARGETS :=
endif
@@ -17,16 +17,11 @@ top_srcdir = $(realpath ../../../../)
# Additional include paths needed by kselftest.h and local headers
CFLAGS += -I$(top_srcdir)/tools/testing/selftests/
-# Guessing where the Kernel headers could have been installed
-# depending on ENV config
-ifeq ($(KBUILD_OUTPUT),)
-khdr_dir = $(top_srcdir)/usr/include
-else
-# the KSFT preferred location when KBUILD_OUTPUT is set
-khdr_dir = $(KBUILD_OUTPUT)/kselftest/usr/include
-endif
+CFLAGS += $(KHDR_INCLUDES)
+
+CFLAGS += -I$(top_srcdir)/tools/include
-CFLAGS += -I$(khdr_dir)
+OUTPUT ?= $(CURDIR)
export CFLAGS
export top_srcdir
@@ -51,7 +46,7 @@ run_tests: all
done
# Avoid any output on non arm64 on emit_tests
-emit_tests: all
+emit_tests:
@for DIR in $(ARM64_SUBTARGETS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@; \
diff --git a/tools/testing/selftests/arm64/abi/.gitignore b/tools/testing/selftests/arm64/abi/.gitignore
new file mode 100644
index 000000000000..44f8b80f37e3
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/.gitignore
@@ -0,0 +1,4 @@
+hwcap
+ptrace
+syscall-abi
+tpidr2
diff --git a/tools/testing/selftests/arm64/abi/Makefile b/tools/testing/selftests/arm64/abi/Makefile
new file mode 100644
index 000000000000..483488f8c2ad
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2021 ARM Limited
+
+TEST_GEN_PROGS := hwcap ptrace syscall-abi tpidr2
+
+include ../../lib.mk
+
+$(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S
+
+# Build with nolibc since TPIDR2 is intended to be actively managed by
+# libc and we're trying to test the functionality that it depends on here.
+$(OUTPUT)/tpidr2: tpidr2.c
+ $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ -static -include ../../../../include/nolibc/nolibc.h \
+ -I../.. -ffreestanding -Wall $^ -o $@ -lgcc
diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
new file mode 100644
index 000000000000..c41640f18e4e
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/hwcap.c
@@ -0,0 +1,1295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 ARM Limited.
+ */
+
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
+#include <asm/unistd.h>
+
+#include <linux/auxvec.h>
+
+#include "kselftest.h"
+
+#define TESTS_PER_HWCAP 3
+
+#ifndef AT_HWCAP3
+#define AT_HWCAP3 29
+#endif
+
+/*
+ * Function expected to generate exception when the feature is not
+ * supported and return when it is supported. If the specific exception
+ * is generated then the handler must be able to skip over the
+ * instruction safely.
+ *
+ * Note that it is expected that for many architecture extensions
+ * there are no specific traps due to no architecture state being
+ * added so we may not fault if running on a kernel which doesn't know
+ * to add the hwcap.
+ */
+typedef void (*sig_fn)(void);
+
+static void aes_sigill(void)
+{
+ /* AESE V0.16B, V0.16B */
+ asm volatile(".inst 0x4e284800" : : : );
+}
+
+static void atomics_sigill(void)
+{
+ /* STADD W0, [SP] */
+ asm volatile(".inst 0xb82003ff" : : : );
+}
+
+static void cmpbr_sigill(void)
+{
+ /* Not implemented, too complicated and unreliable anyway */
+}
+
+static void crc32_sigill(void)
+{
+ /* CRC32W W0, W0, W1 */
+ asm volatile(".inst 0x1ac14800" : : : );
+}
+
+static void cssc_sigill(void)
+{
+ /* CNT x0, x0 */
+ asm volatile(".inst 0xdac01c00" : : : "x0");
+}
+
+static void f8cvt_sigill(void)
+{
+ /* FSCALE V0.4H, V0.4H, V0.4H */
+ asm volatile(".inst 0x2ec03c00");
+}
+
+static void f8dp2_sigill(void)
+{
+ /* FDOT V0.4H, V0.4H, V0.5H */
+ asm volatile(".inst 0xe40fc00");
+}
+
+static void f8dp4_sigill(void)
+{
+ /* FDOT V0.2S, V0.2S, V0.2S */
+ asm volatile(".inst 0xe00fc00");
+}
+
+static void f8fma_sigill(void)
+{
+ /* FMLALB V0.8H, V0.16B, V0.16B */
+ asm volatile(".inst 0xec0fc00");
+}
+
+static void f8mm4_sigill(void)
+{
+ /* FMMLA V0.4SH, V0.16B, V0.16B */
+ asm volatile(".inst 0x6e00ec00");
+}
+
+static void f8mm8_sigill(void)
+{
+ /* FMMLA V0.4S, V0.16B, V0.16B */
+ asm volatile(".inst 0x6e80ec00");
+}
+
+static void faminmax_sigill(void)
+{
+ /* FAMIN V0.4H, V0.4H, V0.4H */
+ asm volatile(".inst 0x2ec01c00");
+}
+
+static void fp_sigill(void)
+{
+ asm volatile("fmov s0, #1");
+}
+
+static void fpmr_sigill(void)
+{
+ asm volatile("mrs x0, S3_3_C4_C4_2" : : : "x0");
+}
+
+static void fprcvt_sigill(void)
+{
+ /* FCVTAS S0, H0 */
+ asm volatile(".inst 0x1efa0000");
+}
+
+static void gcs_sigill(void)
+{
+ unsigned long *gcspr;
+
+ asm volatile(
+ "mrs %0, S3_3_C2_C5_1"
+ : "=r" (gcspr)
+ :
+ : "cc");
+}
+
+static void ilrcpc_sigill(void)
+{
+ /* LDAPUR W0, [SP, #8] */
+ asm volatile(".inst 0x994083e0" : : : );
+}
+
+static void jscvt_sigill(void)
+{
+ /* FJCVTZS W0, D0 */
+ asm volatile(".inst 0x1e7e0000" : : : );
+}
+
+static void lrcpc_sigill(void)
+{
+ /* LDAPR W0, [SP, #0] */
+ asm volatile(".inst 0xb8bfc3e0" : : : );
+}
+
+static void lse128_sigill(void)
+{
+ u64 __attribute__ ((aligned (16))) mem[2] = { 10, 20 };
+ register u64 *memp asm ("x0") = mem;
+ register u64 val0 asm ("x1") = 5;
+ register u64 val1 asm ("x2") = 4;
+
+ /* SWPP X1, X2, [X0] */
+ asm volatile(".inst 0x19228001"
+ : "+r" (memp), "+r" (val0), "+r" (val1)
+ :
+ : "cc", "memory");
+}
+
+static void lsfe_sigill(void)
+{
+ float __attribute__ ((aligned (16))) mem;
+ register float *memp asm ("x0") = &mem;
+
+ /* STFADD H0, [X0] */
+ asm volatile(".inst 0x7c20801f"
+ : "+r" (memp)
+ :
+ : "memory");
+}
+
+static void lut_sigill(void)
+{
+ /* LUTI2 V0.16B, { V0.16B }, V[0] */
+ asm volatile(".inst 0x4e801000");
+}
+
+static void mops_sigill(void)
+{
+ char dst[1], src[1];
+ register char *dstp asm ("x0") = dst;
+ register char *srcp asm ("x1") = src;
+ register long size asm ("x2") = 1;
+
+ /* CPYP [x0]!, [x1]!, x2! */
+ asm volatile(".inst 0x1d010440"
+ : "+r" (dstp), "+r" (srcp), "+r" (size)
+ :
+ : "cc", "memory");
+}
+
+static void pmull_sigill(void)
+{
+ /* PMULL V0.1Q, V0.1D, V0.1D */
+ asm volatile(".inst 0x0ee0e000" : : : );
+}
+
+static void poe_sigill(void)
+{
+ /* mrs x0, POR_EL0 */
+ asm volatile("mrs x0, S3_3_C10_C2_4" : : : "x0");
+}
+
+static void rng_sigill(void)
+{
+ asm volatile("mrs x0, S3_3_C2_C4_0" : : : "x0");
+}
+
+static void sha1_sigill(void)
+{
+ /* SHA1H S0, S0 */
+ asm volatile(".inst 0x5e280800" : : : );
+}
+
+static void sha2_sigill(void)
+{
+ /* SHA256H Q0, Q0, V0.4S */
+ asm volatile(".inst 0x5e004000" : : : );
+}
+
+static void sha512_sigill(void)
+{
+ /* SHA512H Q0, Q0, V0.2D */
+ asm volatile(".inst 0xce608000" : : : );
+}
+
+static void sme_sigill(void)
+{
+ /* RDSVL x0, #0 */
+ asm volatile(".inst 0x04bf5800" : : : "x0");
+}
+
+static void sme2_sigill(void)
+{
+ /* SMSTART ZA */
+ asm volatile("msr S0_3_C4_C5_3, xzr" : : : );
+
+ /* ZERO ZT0 */
+ asm volatile(".inst 0xc0480001" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void sme2p1_sigill(void)
+{
+ /* SMSTART SM */
+ asm volatile("msr S0_3_C4_C3_3, xzr" : : : );
+
+ /* BFCLAMP { Z0.H - Z1.H }, Z0.H, Z0.H */
+ asm volatile(".inst 0xc120C000" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void sme2p2_sigill(void)
+{
+ /* SMSTART SM */
+ asm volatile("msr S0_3_C4_C3_3, xzr" : : : );
+
+ /* UXTB Z0.D, P0/Z, Z0.D */
+ asm volatile(".inst 0x4c1a000" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void sme_aes_sigill(void)
+{
+ /* SMSTART SM */
+ asm volatile("msr S0_3_C4_C3_3, xzr" : : : );
+
+ /* AESD z0.b, z0.b, z0.b */
+ asm volatile(".inst 0x4522e400" : : : "z0");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void sme_sbitperm_sigill(void)
+{
+ /* SMSTART SM */
+ asm volatile("msr S0_3_C4_C3_3, xzr" : : : );
+
+ /* BDEP Z0.B, Z0.B, Z0.B */
+ asm volatile(".inst 0x4500b400" : : : "z0");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smei16i32_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* SMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */
+ asm volatile(".inst 0xa0800000" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smebi32i32_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* BMOPA ZA0.S, P0/M, P0/M, Z0.B, Z0.B */
+ asm volatile(".inst 0x80800008" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smeb16b16_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* BFADD ZA.H[W0, 0], {Z0.H-Z1.H} */
+ asm volatile(".inst 0xC1E41C00" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smef16f16_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FADD ZA.H[W0, 0], { Z0.H-Z1.H } */
+ asm volatile(".inst 0xc1a41C00" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smef8f16_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FDOT ZA.H[W0, 0], Z0.B-Z1.B, Z0.B-Z1.B */
+ asm volatile(".inst 0xc1a01020" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smef8f32_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FDOT ZA.S[W0, 0], { Z0.B-Z1.B }, Z0.B[0] */
+ asm volatile(".inst 0xc1500038" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smelutv2_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* LUTI4 { Z0.B-Z3.B }, ZT0, { Z0-Z1 } */
+ asm volatile(".inst 0xc08b0000" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smesf8dp2_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FDOT Z0.H, Z0.B, Z0.B[0] */
+ asm volatile(".inst 0x64204400" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smesf8dp4_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FDOT Z0.S, Z0.B, Z0.B[0] */
+ asm volatile(".inst 0xc1a41C00" : : : );
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smesf8fma_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FMLALB Z0.8H, Z0.B, Z0.B */
+ asm volatile(".inst 0x64205000");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smesfexpa_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* FEXPA Z0.D, Z0.D */
+ asm volatile(".inst 0x04e0b800");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smesmop4_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* SMOP4A ZA0.S, Z0.B, { Z0.B - Z1.B } */
+ asm volatile(".inst 0x80108000");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void smestmop_sigill(void)
+{
+ /* SMSTART */
+ asm volatile("msr S0_3_C4_C7_3, xzr" : : : );
+
+ /* STMOPA ZA0.S, { Z0.H - Z1.H }, Z0.H, Z20[0] */
+ asm volatile(".inst 0x80408008");
+
+ /* SMSTOP */
+ asm volatile("msr S0_3_C4_C6_3, xzr" : : : );
+}
+
+static void sve_sigill(void)
+{
+ /* RDVL x0, #0 */
+ asm volatile(".inst 0x04bf5000" : : : "x0");
+}
+
+static void sve2_sigill(void)
+{
+ /* SQABS Z0.b, P0/M, Z0.B */
+ asm volatile(".inst 0x4408A000" : : : "z0");
+}
+
+static void sve2p1_sigill(void)
+{
+ /* BFADD Z0.H, Z0.H, Z0.H */
+ asm volatile(".inst 0x65000000" : : : "z0");
+}
+
+static void sve2p2_sigill(void)
+{
+ /* NOT Z0.D, P0/Z, Z0.D */
+ asm volatile(".inst 0x4cea000" : : : "z0");
+}
+
+static void sveaes_sigill(void)
+{
+ /* AESD z0.b, z0.b, z0.b */
+ asm volatile(".inst 0x4522e400" : : : "z0");
+}
+
+static void sveaes2_sigill(void)
+{
+ /* AESD {Z0.B - Z1.B }, { Z0.B - Z1.B }, Z0.Q */
+ asm volatile(".inst 0x4522ec00" : : : "z0");
+}
+
+static void sveb16b16_sigill(void)
+{
+ /* BFADD Z0.H, Z0.H, Z0.H */
+ asm volatile(".inst 0x65000000" : : : );
+}
+
+static void svebfscale_sigill(void)
+{
+ /* BFSCALE Z0.H, P0/M, Z0.H, Z0.H */
+ asm volatile(".inst 0x65098000" : : : "z0");
+}
+
+static void svef16mm_sigill(void)
+{
+ /* FMMLA Z0.S, Z0.H, Z0.H */
+ asm volatile(".inst 0x6420e400");
+}
+
+static void svepmull_sigill(void)
+{
+ /* PMULLB Z0.Q, Z0.D, Z0.D */
+ asm volatile(".inst 0x45006800" : : : "z0");
+}
+
+static void svebitperm_sigill(void)
+{
+ /* BDEP Z0.B, Z0.B, Z0.B */
+ asm volatile(".inst 0x4500b400" : : : "z0");
+}
+
+static void svesha3_sigill(void)
+{
+ /* EOR3 Z0.D, Z0.D, Z0.D, Z0.D */
+ asm volatile(".inst 0x4203800" : : : "z0");
+}
+
+static void sveeltperm_sigill(void)
+{
+ /* COMPACT Z0.B, P0, Z0.B */
+ asm volatile(".inst 0x5218000" : : : "x0");
+}
+
+static void svesm4_sigill(void)
+{
+ /* SM4E Z0.S, Z0.S, Z0.S */
+ asm volatile(".inst 0x4523e000" : : : "z0");
+}
+
+static void svei8mm_sigill(void)
+{
+ /* USDOT Z0.S, Z0.B, Z0.B[0] */
+ asm volatile(".inst 0x44a01800" : : : "z0");
+}
+
+static void svef32mm_sigill(void)
+{
+ /* FMMLA Z0.S, Z0.S, Z0.S */
+ asm volatile(".inst 0x64a0e400" : : : "z0");
+}
+
+static void svef64mm_sigill(void)
+{
+ /* FMMLA Z0.D, Z0.D, Z0.D */
+ asm volatile(".inst 0x64e0e400" : : : "z0");
+}
+
+static void svebf16_sigill(void)
+{
+ /* BFCVT Z0.H, P0/M, Z0.S */
+ asm volatile(".inst 0x658aa000" : : : "z0");
+}
+
+static void hbc_sigill(void)
+{
+ /* BC.EQ +4 */
+ asm volatile("cmp xzr, xzr\n"
+ ".inst 0x54000030" : : : "cc");
+}
+
+static void uscat_sigbus(void)
+{
+ /* unaligned atomic access */
+ asm volatile("ADD x1, sp, #2" : : : );
+ /* STADD W0, [X1] */
+ asm volatile(".inst 0xb820003f" : : : );
+}
+
+static void lrcpc3_sigill(void)
+{
+ int data[2] = { 1, 2 };
+
+ register int *src asm ("x0") = data;
+ register int data0 asm ("w2") = 0;
+ register int data1 asm ("w3") = 0;
+
+ /* LDIAPP w2, w3, [x0] */
+ asm volatile(".inst 0x99431802"
+ : "=r" (data0), "=r" (data1) : "r" (src) :);
+}
+
+static const struct hwcap_data {
+ const char *name;
+ unsigned long at_hwcap;
+ unsigned long hwcap_bit;
+ const char *cpuinfo;
+ sig_fn sigill_fn;
+ bool sigill_reliable;
+ sig_fn sigbus_fn;
+ bool sigbus_reliable;
+} hwcaps[] = {
+ {
+ .name = "AES",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_AES,
+ .cpuinfo = "aes",
+ .sigill_fn = aes_sigill,
+ },
+ {
+ .name = "CMPBR",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_CMPBR,
+ .cpuinfo = "cmpbr",
+ .sigill_fn = cmpbr_sigill,
+ },
+ {
+ .name = "CRC32",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_CRC32,
+ .cpuinfo = "crc32",
+ .sigill_fn = crc32_sigill,
+ },
+ {
+ .name = "CSSC",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_CSSC,
+ .cpuinfo = "cssc",
+ .sigill_fn = cssc_sigill,
+ },
+ {
+ .name = "F8CVT",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8CVT,
+ .cpuinfo = "f8cvt",
+ .sigill_fn = f8cvt_sigill,
+ },
+ {
+ .name = "F8DP4",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8DP4,
+ .cpuinfo = "f8dp4",
+ .sigill_fn = f8dp4_sigill,
+ },
+ {
+ .name = "F8DP2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8DP2,
+ .cpuinfo = "f8dp2",
+ .sigill_fn = f8dp2_sigill,
+ },
+ {
+ .name = "F8E5M2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8E5M2,
+ .cpuinfo = "f8e5m2",
+ },
+ {
+ .name = "F8E4M3",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8E4M3,
+ .cpuinfo = "f8e4m3",
+ },
+ {
+ .name = "F8FMA",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_F8FMA,
+ .cpuinfo = "f8fma",
+ .sigill_fn = f8fma_sigill,
+ },
+ {
+ .name = "F8MM8",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_F8MM8,
+ .cpuinfo = "f8mm8",
+ .sigill_fn = f8mm8_sigill,
+ },
+ {
+ .name = "F8MM4",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_F8MM4,
+ .cpuinfo = "f8mm4",
+ .sigill_fn = f8mm4_sigill,
+ },
+ {
+ .name = "FAMINMAX",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_FAMINMAX,
+ .cpuinfo = "faminmax",
+ .sigill_fn = faminmax_sigill,
+ },
+ {
+ .name = "FP",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_FP,
+ .cpuinfo = "fp",
+ .sigill_fn = fp_sigill,
+ },
+ {
+ .name = "FPMR",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_FPMR,
+ .cpuinfo = "fpmr",
+ .sigill_fn = fpmr_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "FPRCVT",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_FPRCVT,
+ .cpuinfo = "fprcvt",
+ .sigill_fn = fprcvt_sigill,
+ },
+ {
+ .name = "GCS",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_GCS,
+ .cpuinfo = "gcs",
+ .sigill_fn = gcs_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "JSCVT",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_JSCVT,
+ .cpuinfo = "jscvt",
+ .sigill_fn = jscvt_sigill,
+ },
+ {
+ .name = "LRCPC",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_LRCPC,
+ .cpuinfo = "lrcpc",
+ .sigill_fn = lrcpc_sigill,
+ },
+ {
+ .name = "LRCPC2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_ILRCPC,
+ .cpuinfo = "ilrcpc",
+ .sigill_fn = ilrcpc_sigill,
+ },
+ {
+ .name = "LRCPC3",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_LRCPC3,
+ .cpuinfo = "lrcpc3",
+ .sigill_fn = lrcpc3_sigill,
+ },
+ {
+ .name = "LSE",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_ATOMICS,
+ .cpuinfo = "atomics",
+ .sigill_fn = atomics_sigill,
+ },
+ {
+ .name = "LSE2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_USCAT,
+ .cpuinfo = "uscat",
+ .sigill_fn = atomics_sigill,
+ .sigbus_fn = uscat_sigbus,
+ .sigbus_reliable = true,
+ },
+ {
+ .name = "LSE128",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_LSE128,
+ .cpuinfo = "lse128",
+ .sigill_fn = lse128_sigill,
+ },
+ {
+ .name = "LSFE",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_LSFE,
+ .cpuinfo = "lsfe",
+ .sigill_fn = lsfe_sigill,
+ },
+ {
+ .name = "LUT",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_LUT,
+ .cpuinfo = "lut",
+ .sigill_fn = lut_sigill,
+ },
+ {
+ .name = "MOPS",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_MOPS,
+ .cpuinfo = "mops",
+ .sigill_fn = mops_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "PMULL",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_PMULL,
+ .cpuinfo = "pmull",
+ .sigill_fn = pmull_sigill,
+ },
+ {
+ .name = "POE",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_POE,
+ .cpuinfo = "poe",
+ .sigill_fn = poe_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "RNG",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_RNG,
+ .cpuinfo = "rng",
+ .sigill_fn = rng_sigill,
+ },
+ {
+ .name = "RPRFM",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_RPRFM,
+ .cpuinfo = "rprfm",
+ },
+ {
+ .name = "SHA1",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA1,
+ .cpuinfo = "sha1",
+ .sigill_fn = sha1_sigill,
+ },
+ {
+ .name = "SHA2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA2,
+ .cpuinfo = "sha2",
+ .sigill_fn = sha2_sigill,
+ },
+ {
+ .name = "SHA512",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA512,
+ .cpuinfo = "sha512",
+ .sigill_fn = sha512_sigill,
+ },
+ {
+ .name = "SME",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME,
+ .cpuinfo = "sme",
+ .sigill_fn = sme_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "SME2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME2,
+ .cpuinfo = "sme2",
+ .sigill_fn = sme2_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "SME 2.1",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME2P1,
+ .cpuinfo = "sme2p1",
+ .sigill_fn = sme2p1_sigill,
+ },
+ {
+ .name = "SME 2.2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME2P2,
+ .cpuinfo = "sme2p2",
+ .sigill_fn = sme2p2_sigill,
+ },
+ {
+ .name = "SME AES",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME_AES,
+ .cpuinfo = "smeaes",
+ .sigill_fn = sme_aes_sigill,
+ },
+ {
+ .name = "SME I16I32",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_I16I32,
+ .cpuinfo = "smei16i32",
+ .sigill_fn = smei16i32_sigill,
+ },
+ {
+ .name = "SME BI32I32",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_BI32I32,
+ .cpuinfo = "smebi32i32",
+ .sigill_fn = smebi32i32_sigill,
+ },
+ {
+ .name = "SME B16B16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_B16B16,
+ .cpuinfo = "smeb16b16",
+ .sigill_fn = smeb16b16_sigill,
+ },
+ {
+ .name = "SME F16F16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_F16F16,
+ .cpuinfo = "smef16f16",
+ .sigill_fn = smef16f16_sigill,
+ },
+ {
+ .name = "SME F8F16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_F8F16,
+ .cpuinfo = "smef8f16",
+ .sigill_fn = smef8f16_sigill,
+ },
+ {
+ .name = "SME F8F32",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_F8F32,
+ .cpuinfo = "smef8f32",
+ .sigill_fn = smef8f32_sigill,
+ },
+ {
+ .name = "SME LUTV2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_LUTV2,
+ .cpuinfo = "smelutv2",
+ .sigill_fn = smelutv2_sigill,
+ },
+ {
+ .name = "SME SBITPERM",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME_SBITPERM,
+ .cpuinfo = "smesbitperm",
+ .sigill_fn = sme_sbitperm_sigill,
+ },
+ {
+ .name = "SME SF8FMA",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_SF8FMA,
+ .cpuinfo = "smesf8fma",
+ .sigill_fn = smesf8fma_sigill,
+ },
+ {
+ .name = "SME SF8DP2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_SF8DP2,
+ .cpuinfo = "smesf8dp2",
+ .sigill_fn = smesf8dp2_sigill,
+ },
+ {
+ .name = "SME SF8DP4",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SME_SF8DP4,
+ .cpuinfo = "smesf8dp4",
+ .sigill_fn = smesf8dp4_sigill,
+ },
+ {
+ .name = "SME SFEXPA",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME_SFEXPA,
+ .cpuinfo = "smesfexpa",
+ .sigill_fn = smesfexpa_sigill,
+ },
+ {
+ .name = "SME SMOP4",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME_SMOP4,
+ .cpuinfo = "smesmop4",
+ .sigill_fn = smesmop4_sigill,
+ },
+ {
+ .name = "SME STMOP",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SME_STMOP,
+ .cpuinfo = "smestmop",
+ .sigill_fn = smestmop_sigill,
+ },
+ {
+ .name = "SVE",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE,
+ .cpuinfo = "sve",
+ .sigill_fn = sve_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "SVE 2",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVE2,
+ .cpuinfo = "sve2",
+ .sigill_fn = sve2_sigill,
+ },
+ {
+ .name = "SVE 2.1",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVE2P1,
+ .cpuinfo = "sve2p1",
+ .sigill_fn = sve2p1_sigill,
+ },
+ {
+ .name = "SVE 2.2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE2P2,
+ .cpuinfo = "sve2p2",
+ .sigill_fn = sve2p2_sigill,
+ },
+ {
+ .name = "SVE AES",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEAES,
+ .cpuinfo = "sveaes",
+ .sigill_fn = sveaes_sigill,
+ },
+ {
+ .name = "SVE AES2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE_AES2,
+ .cpuinfo = "sveaes2",
+ .sigill_fn = sveaes2_sigill,
+ },
+ {
+ .name = "SVE BFSCALE",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE_BFSCALE,
+ .cpuinfo = "svebfscale",
+ .sigill_fn = svebfscale_sigill,
+ },
+ {
+ .name = "SVE ELTPERM",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE_ELTPERM,
+ .cpuinfo = "sveeltperm",
+ .sigill_fn = sveeltperm_sigill,
+ },
+ {
+ .name = "SVE F16MM",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SVE_F16MM,
+ .cpuinfo = "svef16mm",
+ .sigill_fn = svef16mm_sigill,
+ },
+ {
+ .name = "SVE2 B16B16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVE_B16B16,
+ .cpuinfo = "sveb16b16",
+ .sigill_fn = sveb16b16_sigill,
+ },
+ {
+ .name = "SVE2 PMULL",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEPMULL,
+ .cpuinfo = "svepmull",
+ .sigill_fn = svepmull_sigill,
+ },
+ {
+ .name = "SVE2 BITPERM",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEBITPERM,
+ .cpuinfo = "svebitperm",
+ .sigill_fn = svebitperm_sigill,
+ },
+ {
+ .name = "SVE2 SHA3",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVESHA3,
+ .cpuinfo = "svesha3",
+ .sigill_fn = svesha3_sigill,
+ },
+ {
+ .name = "SVE2 SM4",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVESM4,
+ .cpuinfo = "svesm4",
+ .sigill_fn = svesm4_sigill,
+ },
+ {
+ .name = "SVE2 I8MM",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEI8MM,
+ .cpuinfo = "svei8mm",
+ .sigill_fn = svei8mm_sigill,
+ },
+ {
+ .name = "SVE2 F32MM",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEF32MM,
+ .cpuinfo = "svef32mm",
+ .sigill_fn = svef32mm_sigill,
+ },
+ {
+ .name = "SVE2 F64MM",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEF64MM,
+ .cpuinfo = "svef64mm",
+ .sigill_fn = svef64mm_sigill,
+ },
+ {
+ .name = "SVE2 BF16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVEBF16,
+ .cpuinfo = "svebf16",
+ .sigill_fn = svebf16_sigill,
+ },
+ {
+ .name = "SVE2 EBF16",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_SVE_EBF16,
+ .cpuinfo = "sveebf16",
+ },
+ {
+ .name = "HBC",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_HBC,
+ .cpuinfo = "hbc",
+ .sigill_fn = hbc_sigill,
+ .sigill_reliable = true,
+ },
+ {
+ .name = "MTE_FAR",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_MTE_FAR,
+ .cpuinfo = "mtefar",
+ },
+ {
+ .name = "MTE_STOREONLY",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_MTE_STORE_ONLY,
+ .cpuinfo = "mtestoreonly",
+ },
+};
+
+typedef void (*sighandler_fn)(int, siginfo_t *, void *);
+
+#define DEF_SIGHANDLER_FUNC(SIG, NUM) \
+static bool seen_##SIG; \
+static void handle_##SIG(int sig, siginfo_t *info, void *context) \
+{ \
+ ucontext_t *uc = context; \
+ \
+ seen_##SIG = true; \
+ /* Skip over the offending instruction */ \
+ uc->uc_mcontext.pc += 4; \
+}
+
+DEF_SIGHANDLER_FUNC(sigill, SIGILL);
+DEF_SIGHANDLER_FUNC(sigbus, SIGBUS);
+
+bool cpuinfo_present(const char *name)
+{
+ FILE *f;
+ char buf[2048], name_space[30], name_newline[30];
+ char *s;
+
+ /*
+ * The feature should appear with a leading space and either a
+ * trailing space or a newline.
+ */
+ snprintf(name_space, sizeof(name_space), " %s ", name);
+ snprintf(name_newline, sizeof(name_newline), " %s\n", name);
+
+ f = fopen("/proc/cpuinfo", "r");
+ if (!f) {
+ ksft_print_msg("Failed to open /proc/cpuinfo\n");
+ return false;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ /* Features: line? */
+ if (strncmp(buf, "Features\t:", strlen("Features\t:")) != 0)
+ continue;
+
+ /* All CPUs should be symmetric, don't read any more */
+ fclose(f);
+
+ s = strstr(buf, name_space);
+ if (s)
+ return true;
+ s = strstr(buf, name_newline);
+ if (s)
+ return true;
+
+ return false;
+ }
+
+ ksft_print_msg("Failed to find Features in /proc/cpuinfo\n");
+ fclose(f);
+ return false;
+}
+
+static int install_sigaction(int signum, sighandler_fn handler)
+{
+ int ret;
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(signum, &sa, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg("Failed to install SIGNAL handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ return ret;
+}
+
+static void uninstall_sigaction(int signum)
+{
+ if (sigaction(signum, NULL, NULL) < 0)
+ ksft_exit_fail_msg("Failed to uninstall SIGNAL handler: %s (%d)\n",
+ strerror(errno), errno);
+}
+
+#define DEF_INST_RAISE_SIG(SIG, NUM) \
+static bool inst_raise_##SIG(const struct hwcap_data *hwcap, \
+ bool have_hwcap) \
+{ \
+ if (!hwcap->SIG##_fn) { \
+ ksft_test_result_skip(#SIG"_%s\n", hwcap->name); \
+ /* assume that it would raise exception in default */ \
+ return true; \
+ } \
+ \
+ install_sigaction(NUM, handle_##SIG); \
+ \
+ seen_##SIG = false; \
+ hwcap->SIG##_fn(); \
+ \
+ if (have_hwcap) { \
+ /* Should be able to use the extension */ \
+ ksft_test_result(!seen_##SIG, \
+ #SIG"_%s\n", hwcap->name); \
+ } else if (hwcap->SIG##_reliable) { \
+ /* Guaranteed a SIGNAL */ \
+ ksft_test_result(seen_##SIG, \
+ #SIG"_%s\n", hwcap->name); \
+ } else { \
+ /* Missing SIGNAL might be fine */ \
+ ksft_print_msg(#SIG"_%sreported for %s\n", \
+ seen_##SIG ? "" : "not ", \
+ hwcap->name); \
+ ksft_test_result_skip(#SIG"_%s\n", \
+ hwcap->name); \
+ } \
+ \
+ uninstall_sigaction(NUM); \
+ return seen_##SIG; \
+}
+
+DEF_INST_RAISE_SIG(sigill, SIGILL);
+DEF_INST_RAISE_SIG(sigbus, SIGBUS);
+
+int main(void)
+{
+ int i;
+ const struct hwcap_data *hwcap;
+ bool have_cpuinfo, have_hwcap, raise_sigill;
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(hwcaps) * TESTS_PER_HWCAP);
+
+ for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+ hwcap = &hwcaps[i];
+
+ have_hwcap = getauxval(hwcap->at_hwcap) & hwcap->hwcap_bit;
+ have_cpuinfo = cpuinfo_present(hwcap->cpuinfo);
+
+ if (have_hwcap)
+ ksft_print_msg("%s present\n", hwcap->name);
+
+ ksft_test_result(have_hwcap == have_cpuinfo,
+ "cpuinfo_match_%s\n", hwcap->name);
+
+ /*
+ * Testing for SIGBUS only makes sense after make sure
+ * that the instruction does not cause a SIGILL signal.
+ */
+ raise_sigill = inst_raise_sigill(hwcap, have_hwcap);
+ if (!raise_sigill)
+ inst_raise_sigbus(hwcap, have_hwcap);
+ else
+ ksft_test_result_skip("sigbus_%s\n", hwcap->name);
+ }
+
+ ksft_print_cnts();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c
new file mode 100644
index 000000000000..0e46ac21c81d
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/ptrace.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 ARM Limited.
+ */
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <asm/sigcontext.h>
+#include <asm/ptrace.h>
+
+#include "kselftest.h"
+
+#define EXPECTED_TESTS 11
+
+#define MAX_TPIDRS 2
+
+static bool have_sme(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_SME;
+}
+
+static void test_tpidr(pid_t child)
+{
+ uint64_t read_val[MAX_TPIDRS];
+ uint64_t write_val[MAX_TPIDRS];
+ struct iovec read_iov, write_iov;
+ bool test_tpidr2 = false;
+ int ret, i;
+
+ read_iov.iov_base = read_val;
+ write_iov.iov_base = write_val;
+
+ /* Should be able to read a single TPIDR... */
+ read_iov.iov_len = sizeof(uint64_t);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
+ ksft_test_result(ret == 0, "read_tpidr_one\n");
+
+ /* ...write a new value.. */
+ write_iov.iov_len = sizeof(uint64_t);
+ write_val[0] = read_val[0] + 1;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
+ ksft_test_result(ret == 0, "write_tpidr_one\n");
+
+ /* ...then read it back */
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
+ ksft_test_result(ret == 0 && write_val[0] == read_val[0],
+ "verify_tpidr_one\n");
+
+ /* If we have TPIDR2 we should be able to read it */
+ read_iov.iov_len = sizeof(read_val);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
+ if (ret == 0) {
+ /* If we have SME there should be two TPIDRs */
+ if (read_iov.iov_len >= sizeof(read_val))
+ test_tpidr2 = true;
+
+ if (have_sme() && test_tpidr2) {
+ ksft_test_result(test_tpidr2, "count_tpidrs\n");
+ } else {
+ ksft_test_result(read_iov.iov_len % sizeof(uint64_t) == 0,
+ "count_tpidrs\n");
+ }
+ } else {
+ ksft_test_result_fail("count_tpidrs\n");
+ }
+
+ if (test_tpidr2) {
+ /* Try to write new values to all known TPIDRs... */
+ write_iov.iov_len = sizeof(write_val);
+ for (i = 0; i < MAX_TPIDRS; i++)
+ write_val[i] = read_val[i] + 1;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
+
+ ksft_test_result(ret == 0 &&
+ write_iov.iov_len == sizeof(write_val),
+ "tpidr2_write\n");
+
+ /* ...then read them back */
+ read_iov.iov_len = sizeof(read_val);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
+
+ if (have_sme()) {
+ /* Should read back the written value */
+ ksft_test_result(ret == 0 &&
+ read_iov.iov_len >= sizeof(read_val) &&
+ memcmp(read_val, write_val,
+ sizeof(read_val)) == 0,
+ "tpidr2_read\n");
+ } else {
+ /* TPIDR2 should read as zero */
+ ksft_test_result(ret == 0 &&
+ read_iov.iov_len >= sizeof(read_val) &&
+ read_val[0] == write_val[0] &&
+ read_val[1] == 0,
+ "tpidr2_read\n");
+ }
+
+ /* Writing only TPIDR... */
+ write_iov.iov_len = sizeof(uint64_t);
+ memcpy(write_val, read_val, sizeof(read_val));
+ write_val[0] += 1;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
+
+ if (ret == 0) {
+ /* ...should leave TPIDR2 untouched */
+ read_iov.iov_len = sizeof(read_val);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS,
+ &read_iov);
+
+ ksft_test_result(ret == 0 &&
+ read_iov.iov_len >= sizeof(read_val) &&
+ memcmp(read_val, write_val,
+ sizeof(read_val)) == 0,
+ "write_tpidr_only\n");
+ } else {
+ ksft_test_result_fail("write_tpidr_only\n");
+ }
+ } else {
+ ksft_test_result_skip("tpidr2_write\n");
+ ksft_test_result_skip("tpidr2_read\n");
+ ksft_test_result_skip("write_tpidr_only\n");
+ }
+}
+
+static void test_hw_debug(pid_t child, int type, const char *type_name)
+{
+ struct user_hwdebug_state state;
+ struct iovec iov;
+ int slots, arch, ret;
+
+ iov.iov_len = sizeof(state);
+ iov.iov_base = &state;
+
+ /* Should be able to read the values */
+ ret = ptrace(PTRACE_GETREGSET, child, type, &iov);
+ ksft_test_result(ret == 0, "read_%s\n", type_name);
+
+ if (ret == 0) {
+ /* Low 8 bits is the number of slots, next 4 bits the arch */
+ slots = state.dbg_info & 0xff;
+ arch = (state.dbg_info >> 8) & 0xf;
+
+ ksft_print_msg("%s version %d with %d slots\n", type_name,
+ arch, slots);
+
+ /* Zero is not currently architecturally valid */
+ ksft_test_result(arch, "%s_arch_set\n", type_name);
+ } else {
+ ksft_test_result_skip("%s_arch_set\n", type_name);
+ }
+}
+
+static int do_child(void)
+{
+ if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
+ ksft_exit_fail_perror("PTRACE_TRACEME");
+
+ if (raise(SIGSTOP))
+ ksft_exit_fail_perror("raise(SIGSTOP)");
+
+ return EXIT_SUCCESS;
+}
+
+static int do_parent(pid_t child)
+{
+ int ret = EXIT_FAILURE;
+ pid_t pid;
+ int status;
+ siginfo_t si;
+
+ /* Attach to the child */
+ while (1) {
+ int sig;
+
+ pid = wait(&status);
+ if (pid == -1) {
+ perror("wait");
+ goto error;
+ }
+
+ /*
+ * This should never happen but it's hard to flag in
+ * the framework.
+ */
+ if (pid != child)
+ continue;
+
+ if (WIFEXITED(status) || WIFSIGNALED(status))
+ ksft_exit_fail_msg("Child died unexpectedly\n");
+
+ if (!WIFSTOPPED(status))
+ goto error;
+
+ sig = WSTOPSIG(status);
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ if (errno == EINVAL) {
+ sig = 0; /* bust group-stop */
+ goto cont;
+ }
+
+ ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ if (sig == SIGSTOP && si.si_code == SI_TKILL &&
+ si.si_pid == pid)
+ break;
+
+ cont:
+ if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ ksft_test_result_fail("PTRACE_CONT: %s\n",
+ strerror(errno));
+ goto error;
+ }
+ }
+
+ ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
+
+ test_tpidr(child);
+ test_hw_debug(child, NT_ARM_HW_WATCH, "NT_ARM_HW_WATCH");
+ test_hw_debug(child, NT_ARM_HW_BREAK, "NT_ARM_HW_BREAK");
+
+ ret = EXIT_SUCCESS;
+
+error:
+ kill(child, SIGKILL);
+
+disappeared:
+ return ret;
+}
+
+int main(void)
+{
+ int ret = EXIT_SUCCESS;
+ pid_t child;
+
+ srandom(getpid());
+
+ ksft_print_header();
+
+ ksft_set_plan(EXPECTED_TESTS);
+
+ child = fork();
+ if (!child)
+ return do_child();
+
+ if (do_parent(child))
+ ret = EXIT_FAILURE;
+
+ ksft_print_cnts();
+
+ return ret;
+}
diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S
new file mode 100644
index 000000000000..66ab2e0bae5f
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 ARM Limited.
+//
+// Assembly portion of the syscall ABI test
+
+//
+// Load values from memory into registers, invoke a syscall and save the
+// register values back to memory for later checking. The syscall to be
+// invoked is configured in x8 of the input GPR data.
+//
+// x0: SVE VL, 0 for FP only
+// x1: SME VL
+//
+// GPRs: gpr_in, gpr_out
+// FPRs: fpr_in, fpr_out
+// Zn: z_in, z_out
+// Pn: p_in, p_out
+// FFR: ffr_in, ffr_out
+// ZA: za_in, za_out
+// SVCR: svcr_in, svcr_out
+
+#include "syscall-abi.h"
+
+.arch_extension sve
+
+#define ID_AA64SMFR0_EL1_SMEver_SHIFT 56
+#define ID_AA64SMFR0_EL1_SMEver_WIDTH 4
+
+/*
+ * LDR (vector to ZA array):
+ * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _ldr_za nw, nxbase, offset=0
+ .inst 0xe1000000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * STR (vector from ZA array):
+ * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _str_za nw, nxbase, offset=0
+ .inst 0xe1200000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * LDR (ZT0)
+ *
+ * LDR ZT0, nx
+ */
+.macro _ldr_zt nx
+ .inst 0xe11f8000 \
+ | (((\nx) & 0x1f) << 5)
+.endm
+
+/*
+ * STR (ZT0)
+ *
+ * STR ZT0, nx
+ */
+.macro _str_zt nx
+ .inst 0xe13f8000 \
+ | (((\nx) & 0x1f) << 5)
+.endm
+
+.globl do_syscall
+do_syscall:
+ // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
+ stp x29, x30, [sp, #-112]!
+ mov x29, sp
+ stp x0, x1, [sp, #16]
+ stp x19, x20, [sp, #32]
+ stp x21, x22, [sp, #48]
+ stp x23, x24, [sp, #64]
+ stp x25, x26, [sp, #80]
+ stp x27, x28, [sp, #96]
+
+ // Set SVCR if we're doing SME
+ cbz x1, load_gpr
+ adrp x2, svcr_in
+ ldr x2, [x2, :lo12:svcr_in]
+ msr S3_3_C4_C2_2, x2
+
+ // Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR
+ tbz x2, #SVCR_ZA_SHIFT, load_gpr
+ mov w12, #0
+ ldr x2, =za_in
+1: _ldr_za 12, 2
+ add x2, x2, x1
+ add x12, x12, #1
+ cmp x1, x12
+ bne 1b
+
+ // ZT0
+ mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1
+ ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
+ #ID_AA64SMFR0_EL1_SMEver_WIDTH
+ cbz x2, load_gpr
+ adrp x2, zt_in
+ add x2, x2, :lo12:zt_in
+ _ldr_zt 2
+
+load_gpr:
+ // Load GPRs x8-x28, and save our SP/FP for later comparison
+ ldr x2, =gpr_in
+ add x2, x2, #64
+ ldp x8, x9, [x2], #16
+ ldp x10, x11, [x2], #16
+ ldp x12, x13, [x2], #16
+ ldp x14, x15, [x2], #16
+ ldp x16, x17, [x2], #16
+ ldp x18, x19, [x2], #16
+ ldp x20, x21, [x2], #16
+ ldp x22, x23, [x2], #16
+ ldp x24, x25, [x2], #16
+ ldp x26, x27, [x2], #16
+ ldr x28, [x2], #8
+ str x29, [x2], #8 // FP
+ str x30, [x2], #8 // LR
+
+ // Load FPRs if we're not doing neither SVE nor streaming SVE
+ cbnz x0, check_sve_in
+ ldr x2, =svcr_in
+ tbnz x2, #SVCR_SM_SHIFT, check_sve_in
+
+ ldr x2, =fpr_in
+ ldp q0, q1, [x2]
+ ldp q2, q3, [x2, #16 * 2]
+ ldp q4, q5, [x2, #16 * 4]
+ ldp q6, q7, [x2, #16 * 6]
+ ldp q8, q9, [x2, #16 * 8]
+ ldp q10, q11, [x2, #16 * 10]
+ ldp q12, q13, [x2, #16 * 12]
+ ldp q14, q15, [x2, #16 * 14]
+ ldp q16, q17, [x2, #16 * 16]
+ ldp q18, q19, [x2, #16 * 18]
+ ldp q20, q21, [x2, #16 * 20]
+ ldp q22, q23, [x2, #16 * 22]
+ ldp q24, q25, [x2, #16 * 24]
+ ldp q26, q27, [x2, #16 * 26]
+ ldp q28, q29, [x2, #16 * 28]
+ ldp q30, q31, [x2, #16 * 30]
+
+ b 2f
+
+check_sve_in:
+ // Load the SVE registers if we're doing SVE/SME
+
+ ldr x2, =z_in
+ ldr z0, [x2, #0, MUL VL]
+ ldr z1, [x2, #1, MUL VL]
+ ldr z2, [x2, #2, MUL VL]
+ ldr z3, [x2, #3, MUL VL]
+ ldr z4, [x2, #4, MUL VL]
+ ldr z5, [x2, #5, MUL VL]
+ ldr z6, [x2, #6, MUL VL]
+ ldr z7, [x2, #7, MUL VL]
+ ldr z8, [x2, #8, MUL VL]
+ ldr z9, [x2, #9, MUL VL]
+ ldr z10, [x2, #10, MUL VL]
+ ldr z11, [x2, #11, MUL VL]
+ ldr z12, [x2, #12, MUL VL]
+ ldr z13, [x2, #13, MUL VL]
+ ldr z14, [x2, #14, MUL VL]
+ ldr z15, [x2, #15, MUL VL]
+ ldr z16, [x2, #16, MUL VL]
+ ldr z17, [x2, #17, MUL VL]
+ ldr z18, [x2, #18, MUL VL]
+ ldr z19, [x2, #19, MUL VL]
+ ldr z20, [x2, #20, MUL VL]
+ ldr z21, [x2, #21, MUL VL]
+ ldr z22, [x2, #22, MUL VL]
+ ldr z23, [x2, #23, MUL VL]
+ ldr z24, [x2, #24, MUL VL]
+ ldr z25, [x2, #25, MUL VL]
+ ldr z26, [x2, #26, MUL VL]
+ ldr z27, [x2, #27, MUL VL]
+ ldr z28, [x2, #28, MUL VL]
+ ldr z29, [x2, #29, MUL VL]
+ ldr z30, [x2, #30, MUL VL]
+ ldr z31, [x2, #31, MUL VL]
+
+ // Only set a non-zero FFR, test patterns must be zero since the
+ // syscall should clear it - this lets us handle FA64.
+ ldr x2, =ffr_in
+ ldr p0, [x2]
+ ldr x2, [x2, #0]
+ cbz x2, 1f
+ wrffr p0.b
+1:
+
+ ldr x2, =p_in
+ ldr p0, [x2, #0, MUL VL]
+ ldr p1, [x2, #1, MUL VL]
+ ldr p2, [x2, #2, MUL VL]
+ ldr p3, [x2, #3, MUL VL]
+ ldr p4, [x2, #4, MUL VL]
+ ldr p5, [x2, #5, MUL VL]
+ ldr p6, [x2, #6, MUL VL]
+ ldr p7, [x2, #7, MUL VL]
+ ldr p8, [x2, #8, MUL VL]
+ ldr p9, [x2, #9, MUL VL]
+ ldr p10, [x2, #10, MUL VL]
+ ldr p11, [x2, #11, MUL VL]
+ ldr p12, [x2, #12, MUL VL]
+ ldr p13, [x2, #13, MUL VL]
+ ldr p14, [x2, #14, MUL VL]
+ ldr p15, [x2, #15, MUL VL]
+2:
+
+ // Do the syscall
+ svc #0
+
+ // Save GPRs x8-x30
+ ldr x2, =gpr_out
+ add x2, x2, #64
+ stp x8, x9, [x2], #16
+ stp x10, x11, [x2], #16
+ stp x12, x13, [x2], #16
+ stp x14, x15, [x2], #16
+ stp x16, x17, [x2], #16
+ stp x18, x19, [x2], #16
+ stp x20, x21, [x2], #16
+ stp x22, x23, [x2], #16
+ stp x24, x25, [x2], #16
+ stp x26, x27, [x2], #16
+ stp x28, x29, [x2], #16
+ str x30, [x2]
+
+ // Restore x0 and x1 for feature checks
+ ldp x0, x1, [sp, #16]
+
+ // Save FPSIMD state
+ ldr x2, =fpr_out
+ stp q0, q1, [x2]
+ stp q2, q3, [x2, #16 * 2]
+ stp q4, q5, [x2, #16 * 4]
+ stp q6, q7, [x2, #16 * 6]
+ stp q8, q9, [x2, #16 * 8]
+ stp q10, q11, [x2, #16 * 10]
+ stp q12, q13, [x2, #16 * 12]
+ stp q14, q15, [x2, #16 * 14]
+ stp q16, q17, [x2, #16 * 16]
+ stp q18, q19, [x2, #16 * 18]
+ stp q20, q21, [x2, #16 * 20]
+ stp q22, q23, [x2, #16 * 22]
+ stp q24, q25, [x2, #16 * 24]
+ stp q26, q27, [x2, #16 * 26]
+ stp q28, q29, [x2, #16 * 28]
+ stp q30, q31, [x2, #16 * 30]
+
+ // Save SVCR if we're doing SME
+ cbz x1, check_sve_out
+ mrs x2, S3_3_C4_C2_2
+ adrp x3, svcr_out
+ str x2, [x3, :lo12:svcr_out]
+
+ // Save ZA if it's enabled - uses x12 as scratch due to SME STR
+ tbz x2, #SVCR_ZA_SHIFT, check_sve_out
+ mov w12, #0
+ ldr x2, =za_out
+1: _str_za 12, 2
+ add x2, x2, x1
+ add x12, x12, #1
+ cmp x1, x12
+ bne 1b
+
+ // ZT0
+ mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1
+ ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
+ #ID_AA64SMFR0_EL1_SMEver_WIDTH
+ cbz x2, check_sve_out
+ adrp x2, zt_out
+ add x2, x2, :lo12:zt_out
+ _str_zt 2
+
+check_sve_out:
+ // Save the SVE state if we have some
+ cbz x0, 1f
+
+ ldr x2, =z_out
+ str z0, [x2, #0, MUL VL]
+ str z1, [x2, #1, MUL VL]
+ str z2, [x2, #2, MUL VL]
+ str z3, [x2, #3, MUL VL]
+ str z4, [x2, #4, MUL VL]
+ str z5, [x2, #5, MUL VL]
+ str z6, [x2, #6, MUL VL]
+ str z7, [x2, #7, MUL VL]
+ str z8, [x2, #8, MUL VL]
+ str z9, [x2, #9, MUL VL]
+ str z10, [x2, #10, MUL VL]
+ str z11, [x2, #11, MUL VL]
+ str z12, [x2, #12, MUL VL]
+ str z13, [x2, #13, MUL VL]
+ str z14, [x2, #14, MUL VL]
+ str z15, [x2, #15, MUL VL]
+ str z16, [x2, #16, MUL VL]
+ str z17, [x2, #17, MUL VL]
+ str z18, [x2, #18, MUL VL]
+ str z19, [x2, #19, MUL VL]
+ str z20, [x2, #20, MUL VL]
+ str z21, [x2, #21, MUL VL]
+ str z22, [x2, #22, MUL VL]
+ str z23, [x2, #23, MUL VL]
+ str z24, [x2, #24, MUL VL]
+ str z25, [x2, #25, MUL VL]
+ str z26, [x2, #26, MUL VL]
+ str z27, [x2, #27, MUL VL]
+ str z28, [x2, #28, MUL VL]
+ str z29, [x2, #29, MUL VL]
+ str z30, [x2, #30, MUL VL]
+ str z31, [x2, #31, MUL VL]
+
+ ldr x2, =p_out
+ str p0, [x2, #0, MUL VL]
+ str p1, [x2, #1, MUL VL]
+ str p2, [x2, #2, MUL VL]
+ str p3, [x2, #3, MUL VL]
+ str p4, [x2, #4, MUL VL]
+ str p5, [x2, #5, MUL VL]
+ str p6, [x2, #6, MUL VL]
+ str p7, [x2, #7, MUL VL]
+ str p8, [x2, #8, MUL VL]
+ str p9, [x2, #9, MUL VL]
+ str p10, [x2, #10, MUL VL]
+ str p11, [x2, #11, MUL VL]
+ str p12, [x2, #12, MUL VL]
+ str p13, [x2, #13, MUL VL]
+ str p14, [x2, #14, MUL VL]
+ str p15, [x2, #15, MUL VL]
+
+ // Only save FFR if we wrote a value for SME
+ ldr x2, =ffr_in
+ ldr x2, [x2, #0]
+ cbz x2, 1f
+ ldr x2, =ffr_out
+ rdffr p0.b
+ str p0, [x2]
+1:
+
+ // Restore callee saved registers x19-x30
+ ldp x19, x20, [sp, #32]
+ ldp x21, x22, [sp, #48]
+ ldp x23, x24, [sp, #64]
+ ldp x25, x26, [sp, #80]
+ ldp x27, x28, [sp, #96]
+ ldp x29, x30, [sp], #112
+
+ // Clear SVCR if we were doing SME so future tests don't have ZA
+ cbz x1, 1f
+ msr S3_3_C4_C2_2, xzr
+1:
+
+ ret
diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c
new file mode 100644
index 000000000000..b67e3e26fa6d
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/syscall-abi.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Limited.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
+#include <asm/unistd.h>
+
+#include "kselftest.h"
+
+#include "syscall-abi.h"
+
+/*
+ * The kernel defines a much larger SVE_VQ_MAX than is expressable in
+ * the architecture, this creates a *lot* of overhead filling the
+ * buffers (especially ZA) on emulated platforms so use the actual
+ * architectural maximum instead.
+ */
+#define ARCH_SVE_VQ_MAX 16
+
+static int default_sme_vl;
+
+static int sve_vl_count;
+static unsigned int sve_vls[ARCH_SVE_VQ_MAX];
+static int sme_vl_count;
+static unsigned int sme_vls[ARCH_SVE_VQ_MAX];
+
+extern void do_syscall(int sve_vl, int sme_vl);
+
+static void fill_random(void *buf, size_t size)
+{
+ int i;
+ uint32_t *lbuf = buf;
+
+ /* random() returns a 32 bit number regardless of the size of long */
+ for (i = 0; i < size / sizeof(uint32_t); i++)
+ lbuf[i] = random();
+}
+
+/*
+ * We also repeat the test for several syscalls to try to expose different
+ * behaviour.
+ */
+static struct syscall_cfg {
+ int syscall_nr;
+ const char *name;
+} syscalls[] = {
+ { __NR_getpid, "getpid()" },
+ { __NR_sched_yield, "sched_yield()" },
+};
+
+#define NUM_GPR 31
+uint64_t gpr_in[NUM_GPR];
+uint64_t gpr_out[NUM_GPR];
+
+static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(gpr_in, sizeof(gpr_in));
+ gpr_in[8] = cfg->syscall_nr;
+ memset(gpr_out, 0, sizeof(gpr_out));
+}
+
+static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr)
+{
+ int errors = 0;
+ int i;
+
+ /*
+ * GPR x0-x7 may be clobbered, and all others should be preserved.
+ */
+ for (i = 9; i < ARRAY_SIZE(gpr_in); i++) {
+ if (gpr_in[i] != gpr_out[i]) {
+ ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %lx != %lx\n",
+ cfg->name, sve_vl, i,
+ gpr_in[i], gpr_out[i]);
+ errors++;
+ }
+ }
+
+ return errors;
+}
+
+#define NUM_FPR 32
+uint64_t fpr_in[NUM_FPR * 2];
+uint64_t fpr_out[NUM_FPR * 2];
+uint64_t fpr_zero[NUM_FPR * 2];
+
+static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(fpr_in, sizeof(fpr_in));
+ memset(fpr_out, 0, sizeof(fpr_out));
+}
+
+static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ int errors = 0;
+ int i;
+
+ if (!sve_vl && !(svcr & SVCR_SM_MASK)) {
+ for (i = 0; i < ARRAY_SIZE(fpr_in); i++) {
+ if (fpr_in[i] != fpr_out[i]) {
+ ksft_print_msg("%s Q%d/%d mismatch %lx != %lx\n",
+ cfg->name,
+ i / 2, i % 2,
+ fpr_in[i], fpr_out[i]);
+ errors++;
+ }
+ }
+ }
+
+ /*
+ * In streaming mode the whole register set should be cleared
+ * by the transition out of streaming mode.
+ */
+ if (svcr & SVCR_SM_MASK) {
+ if (memcmp(fpr_zero, fpr_out, sizeof(fpr_out)) != 0) {
+ ksft_print_msg("%s FPSIMD registers non-zero exiting SM\n",
+ cfg->name);
+ errors++;
+ }
+ }
+
+ return errors;
+}
+
+#define SVE_Z_SHARED_BYTES (128 / 8)
+
+static uint8_t z_zero[__SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
+
+static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(z_in, sizeof(z_in));
+ fill_random(z_out, sizeof(z_out));
+}
+
+static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ size_t reg_size = sve_vl;
+ int errors = 0;
+ int i;
+
+ if (!sve_vl)
+ return 0;
+
+ for (i = 0; i < SVE_NUM_ZREGS; i++) {
+ uint8_t *in = &z_in[reg_size * i];
+ uint8_t *out = &z_out[reg_size * i];
+
+ if (svcr & SVCR_SM_MASK) {
+ /*
+ * In streaming mode the whole register should
+ * be cleared by the transition out of
+ * streaming mode.
+ */
+ if (memcmp(z_zero, out, reg_size) != 0) {
+ ksft_print_msg("%s SVE VL %d Z%d non-zero\n",
+ cfg->name, sve_vl, i);
+ errors++;
+ }
+ } else {
+ /*
+ * For standard SVE the low 128 bits should be
+ * preserved and any additional bits cleared.
+ */
+ if (memcmp(in, out, SVE_Z_SHARED_BYTES) != 0) {
+ ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n",
+ cfg->name, sve_vl, i);
+ errors++;
+ }
+
+ if (reg_size > SVE_Z_SHARED_BYTES &&
+ (memcmp(z_zero, out + SVE_Z_SHARED_BYTES,
+ reg_size - SVE_Z_SHARED_BYTES) != 0)) {
+ ksft_print_msg("%s SVE VL %d Z%d high bits non-zero\n",
+ cfg->name, sve_vl, i);
+ errors++;
+ }
+ }
+ }
+
+ return errors;
+}
+
+uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+
+static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(p_in, sizeof(p_in));
+ fill_random(p_out, sizeof(p_out));
+}
+
+static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
+
+ int errors = 0;
+ int i;
+
+ if (!sve_vl)
+ return 0;
+
+ /* After a syscall the P registers should be zeroed */
+ for (i = 0; i < SVE_NUM_PREGS * reg_size; i++)
+ if (p_out[i])
+ errors++;
+ if (errors)
+ ksft_print_msg("%s SVE VL %d predicate registers non-zero\n",
+ cfg->name, sve_vl);
+
+ return errors;
+}
+
+uint8_t ffr_in[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t ffr_out[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+
+static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ /*
+ * If we are in streaming mode and do not have FA64 then FFR
+ * is unavailable.
+ */
+ if ((svcr & SVCR_SM_MASK) &&
+ !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) {
+ memset(&ffr_in, 0, sizeof(ffr_in));
+ return;
+ }
+
+ /*
+ * It is only valid to set a contiguous set of bits starting
+ * at 0. For now since we're expecting this to be cleared by
+ * a syscall just set all bits.
+ */
+ memset(ffr_in, 0xff, sizeof(ffr_in));
+ fill_random(ffr_out, sizeof(ffr_out));
+}
+
+static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
+ int errors = 0;
+ int i;
+
+ if (!sve_vl)
+ return 0;
+
+ if ((svcr & SVCR_SM_MASK) &&
+ !(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64))
+ return 0;
+
+ /* After a syscall FFR should be zeroed */
+ for (i = 0; i < reg_size; i++)
+ if (ffr_out[i])
+ errors++;
+ if (errors)
+ ksft_print_msg("%s SVE VL %d FFR non-zero\n",
+ cfg->name, sve_vl);
+
+ return errors;
+}
+
+uint64_t svcr_in, svcr_out;
+
+static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ svcr_in = svcr;
+}
+
+static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ int errors = 0;
+
+ if (svcr_out & SVCR_SM_MASK) {
+ ksft_print_msg("%s Still in SM, SVCR %lx\n",
+ cfg->name, svcr_out);
+ errors++;
+ }
+
+ if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) {
+ ksft_print_msg("%s PSTATE.ZA changed, SVCR %lx != %lx\n",
+ cfg->name, svcr_in, svcr_out);
+ errors++;
+ }
+
+ return errors;
+}
+
+uint8_t za_in[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t za_out[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
+
+static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(za_in, sizeof(za_in));
+ memset(za_out, 0, sizeof(za_out));
+}
+
+static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ size_t reg_size = sme_vl * sme_vl;
+ int errors = 0;
+
+ if (!(svcr & SVCR_ZA_MASK))
+ return 0;
+
+ if (memcmp(za_in, za_out, reg_size) != 0) {
+ ksft_print_msg("SME VL %d ZA does not match\n", sme_vl);
+ errors++;
+ }
+
+ return errors;
+}
+
+uint8_t zt_in[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
+uint8_t zt_out[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
+
+static void setup_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ fill_random(zt_in, sizeof(zt_in));
+ memset(zt_out, 0, sizeof(zt_out));
+}
+
+static int check_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ int errors = 0;
+
+ if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2))
+ return 0;
+
+ if (!(svcr & SVCR_ZA_MASK))
+ return 0;
+
+ if (memcmp(zt_in, zt_out, sizeof(zt_in)) != 0) {
+ ksft_print_msg("SME VL %d ZT does not match\n", sme_vl);
+ errors++;
+ }
+
+ return errors;
+}
+
+typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr);
+typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr);
+
+/*
+ * Each set of registers has a setup function which is called before
+ * the syscall to fill values in a global variable for loading by the
+ * test code and a check function which validates that the results are
+ * as expected. Vector lengths are passed everywhere, a vector length
+ * of 0 should be treated as do not test.
+ */
+static struct {
+ setup_fn setup;
+ check_fn check;
+} regset[] = {
+ { setup_gpr, check_gpr },
+ { setup_fpr, check_fpr },
+ { setup_z, check_z },
+ { setup_p, check_p },
+ { setup_ffr, check_ffr },
+ { setup_svcr, check_svcr },
+ { setup_za, check_za },
+ { setup_zt, check_zt },
+};
+
+static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
+ uint64_t svcr)
+{
+ int errors = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regset); i++)
+ regset[i].setup(cfg, sve_vl, sme_vl, svcr);
+
+ do_syscall(sve_vl, sme_vl);
+
+ for (i = 0; i < ARRAY_SIZE(regset); i++)
+ errors += regset[i].check(cfg, sve_vl, sme_vl, svcr);
+
+ return errors == 0;
+}
+
+static void test_one_syscall(struct syscall_cfg *cfg)
+{
+ int sve, sme;
+ int ret;
+
+ /* FPSIMD only case */
+ ksft_test_result(do_test(cfg, 0, default_sme_vl, 0),
+ "%s FPSIMD\n", cfg->name);
+
+ for (sve = 0; sve < sve_vl_count; sve++) {
+ ret = prctl(PR_SVE_SET_VL, sve_vls[sve]);
+ if (ret == -1)
+ ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ ksft_test_result(do_test(cfg, sve_vls[sve], default_sme_vl, 0),
+ "%s SVE VL %d\n", cfg->name, sve_vls[sve]);
+
+ for (sme = 0; sme < sme_vl_count; sme++) {
+ ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
+ if (ret == -1)
+ ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ ksft_test_result(do_test(cfg, sve_vls[sve],
+ sme_vls[sme],
+ SVCR_ZA_MASK | SVCR_SM_MASK),
+ "%s SVE VL %d/SME VL %d SM+ZA\n",
+ cfg->name, sve_vls[sve],
+ sme_vls[sme]);
+ ksft_test_result(do_test(cfg, sve_vls[sve],
+ sme_vls[sme], SVCR_SM_MASK),
+ "%s SVE VL %d/SME VL %d SM\n",
+ cfg->name, sve_vls[sve],
+ sme_vls[sme]);
+ ksft_test_result(do_test(cfg, sve_vls[sve],
+ sme_vls[sme], SVCR_ZA_MASK),
+ "%s SVE VL %d/SME VL %d ZA\n",
+ cfg->name, sve_vls[sve],
+ sme_vls[sme]);
+ }
+ }
+
+ for (sme = 0; sme < sme_vl_count; sme++) {
+ ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
+ if (ret == -1)
+ ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ ksft_test_result(do_test(cfg, 0, sme_vls[sme],
+ SVCR_ZA_MASK | SVCR_SM_MASK),
+ "%s SME VL %d SM+ZA\n",
+ cfg->name, sme_vls[sme]);
+ ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_SM_MASK),
+ "%s SME VL %d SM\n",
+ cfg->name, sme_vls[sme]);
+ ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_ZA_MASK),
+ "%s SME VL %d ZA\n",
+ cfg->name, sme_vls[sme]);
+ }
+}
+
+void sve_count_vls(void)
+{
+ unsigned int vq;
+ int vl;
+
+ if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
+ return;
+
+ /*
+ * Enumerate up to ARCH_SVE_VQ_MAX vector lengths
+ */
+ for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
+ vl = prctl(PR_SVE_SET_VL, vq * 16);
+ if (vl == -1)
+ ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ vl &= PR_SVE_VL_LEN_MASK;
+
+ if (vq != sve_vq_from_vl(vl))
+ vq = sve_vq_from_vl(vl);
+
+ sve_vls[sve_vl_count++] = vl;
+ }
+}
+
+void sme_count_vls(void)
+{
+ unsigned int vq;
+ int vl;
+
+ if (!(getauxval(AT_HWCAP2) & HWCAP2_SME))
+ return;
+
+ /*
+ * Enumerate up to ARCH_SVE_VQ_MAX vector lengths
+ */
+ for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
+ vl = prctl(PR_SME_SET_VL, vq * 16);
+ if (vl == -1)
+ ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ vl &= PR_SME_VL_LEN_MASK;
+
+ /* Found lowest VL */
+ if (sve_vq_from_vl(vl) > vq)
+ break;
+
+ if (vq != sve_vq_from_vl(vl))
+ vq = sve_vq_from_vl(vl);
+
+ sme_vls[sme_vl_count++] = vl;
+ }
+
+ /* Ensure we configure a SME VL, used to flag if SVCR is set */
+ default_sme_vl = sme_vls[0];
+}
+
+int main(void)
+{
+ int i;
+ int tests = 1; /* FPSIMD */
+ int sme_ver;
+
+ srandom(getpid());
+
+ ksft_print_header();
+
+ sve_count_vls();
+ sme_count_vls();
+
+ tests += sve_vl_count;
+ tests += sme_vl_count * 3;
+ tests += (sve_vl_count * sme_vl_count) * 3;
+ ksft_set_plan(ARRAY_SIZE(syscalls) * tests);
+
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
+ sme_ver = 2;
+ else
+ sme_ver = 1;
+
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
+ ksft_print_msg("SME%d with FA64\n", sme_ver);
+ else if (getauxval(AT_HWCAP2) & HWCAP2_SME)
+ ksft_print_msg("SME%d without FA64\n", sme_ver);
+
+ for (i = 0; i < ARRAY_SIZE(syscalls); i++)
+ test_one_syscall(&syscalls[i]);
+
+ ksft_print_cnts();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.h b/tools/testing/selftests/arm64/abi/syscall-abi.h
new file mode 100644
index 000000000000..bda5a87ad381
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/syscall-abi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Limited.
+ */
+
+#ifndef SYSCALL_ABI_H
+#define SYSCALL_ABI_H
+
+#define SVCR_ZA_MASK 2
+#define SVCR_SM_MASK 1
+
+#define SVCR_ZA_SHIFT 1
+#define SVCR_SM_SHIFT 0
+
+#endif
diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c
new file mode 100644
index 000000000000..1703543fb7c7
--- /dev/null
+++ b/tools/testing/selftests/arm64/abi/tpidr2.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "kselftest.h"
+
+#define SYS_TPIDR2 "S3_3_C13_C0_5"
+
+#define EXPECTED_TESTS 5
+
+static void set_tpidr2(uint64_t val)
+{
+ asm volatile (
+ "msr " SYS_TPIDR2 ", %0\n"
+ :
+ : "r"(val)
+ : "cc");
+}
+
+static uint64_t get_tpidr2(void)
+{
+ uint64_t val;
+
+ asm volatile (
+ "mrs %0, " SYS_TPIDR2 "\n"
+ : "=r"(val)
+ :
+ : "cc");
+
+ return val;
+}
+
+/* Processes should start with TPIDR2 == 0 */
+static int default_value(void)
+{
+ return get_tpidr2() == 0;
+}
+
+/* If we set TPIDR2 we should read that value */
+static int write_read(void)
+{
+ set_tpidr2(getpid());
+
+ return getpid() == get_tpidr2();
+}
+
+/* If we set a value we should read the same value after scheduling out */
+static int write_sleep_read(void)
+{
+ set_tpidr2(getpid());
+
+ msleep(100);
+
+ return getpid() == get_tpidr2();
+}
+
+/*
+ * If we fork the value in the parent should be unchanged and the
+ * child should start with the same value and be able to set its own
+ * value.
+ */
+static int write_fork_read(void)
+{
+ pid_t newpid, waiting, oldpid;
+ int status;
+
+ set_tpidr2(getpid());
+
+ oldpid = getpid();
+ newpid = fork();
+ if (newpid == 0) {
+ /* In child */
+ if (get_tpidr2() != oldpid) {
+ ksft_print_msg("TPIDR2 changed in child: %llx\n",
+ get_tpidr2());
+ exit(0);
+ }
+
+ set_tpidr2(getpid());
+ if (get_tpidr2() == getpid()) {
+ exit(1);
+ } else {
+ ksft_print_msg("Failed to set TPIDR2 in child\n");
+ exit(0);
+ }
+ }
+ if (newpid < 0) {
+ ksft_print_msg("fork() failed: %d\n", newpid);
+ return 0;
+ }
+
+ for (;;) {
+ waiting = waitpid(newpid, &status, 0);
+
+ if (waiting < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_print_msg("waitpid() failed: %d\n", errno);
+ return 0;
+ }
+ if (waiting != newpid) {
+ ksft_print_msg("waitpid() returned wrong PID: %d != %d\n",
+ waiting, newpid);
+ return 0;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("child did not exit\n");
+ return 0;
+ }
+
+ if (getpid() != get_tpidr2()) {
+ ksft_print_msg("TPIDR2 corrupted in parent\n");
+ return 0;
+ }
+
+ return WEXITSTATUS(status);
+ }
+}
+
+/*
+ * sys_clone() has a lot of per architecture variation so just define
+ * it here rather than adding it to nolibc, plus the raw API is a
+ * little more convenient for this test.
+ */
+static int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ int *parent_tidptr, unsigned long tls,
+ int *child_tidptr)
+{
+ return my_syscall5(__NR_clone, clone_flags, newsp, parent_tidptr, tls,
+ child_tidptr);
+}
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+
+/*
+ * If we clone with CLONE_VM then the value in the parent should
+ * be unchanged and the child should start with zero and be able to
+ * set its own value.
+ */
+static int write_clone_read(void)
+{
+ int parent_tid, child_tid;
+ pid_t parent, waiting;
+ int ret, status;
+ void *stack;
+
+ parent = getpid();
+ set_tpidr2(parent);
+
+ stack = malloc(__STACK_SIZE);
+ if (!stack) {
+ ksft_print_msg("malloc() failed\n");
+ return 0;
+ }
+
+ ret = sys_clone(CLONE_VM, (unsigned long)stack + __STACK_SIZE,
+ &parent_tid, 0, &child_tid);
+ if (ret == -1) {
+ ksft_print_msg("clone() failed: %d\n", errno);
+ return 0;
+ }
+
+ if (ret == 0) {
+ /* In child */
+ if (get_tpidr2() != 0) {
+ ksft_print_msg("TPIDR2 non-zero in child: %llx\n",
+ get_tpidr2());
+ exit(0);
+ }
+
+ if (gettid() == 0)
+ ksft_print_msg("Child TID==0\n");
+ set_tpidr2(gettid());
+ if (get_tpidr2() == gettid()) {
+ exit(1);
+ } else {
+ ksft_print_msg("Failed to set TPIDR2 in child\n");
+ exit(0);
+ }
+ }
+
+ for (;;) {
+ waiting = waitpid(ret, &status, __WCLONE);
+
+ if (waiting < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_print_msg("waitpid() failed: %d\n", errno);
+ return 0;
+ }
+ if (waiting != ret) {
+ ksft_print_msg("waitpid() returned wrong PID %d\n",
+ waiting);
+ return 0;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("child did not exit\n");
+ return 0;
+ }
+
+ if (parent != get_tpidr2()) {
+ ksft_print_msg("TPIDR2 corrupted in parent\n");
+ return 0;
+ }
+
+ return WEXITSTATUS(status);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ ksft_print_header();
+ ksft_set_plan(5);
+
+ ksft_print_msg("PID: %d\n", getpid());
+
+ /*
+ * This test is run with nolibc which doesn't support hwcap and
+ * it's probably disproportionate to implement so instead check
+ * for the default vector length configuration in /proc.
+ */
+ ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
+ if (ret >= 0) {
+ ksft_test_result(default_value(), "default_value\n");
+ ksft_test_result(write_read(), "write_read\n");
+ ksft_test_result(write_sleep_read(), "write_sleep_read\n");
+ ksft_test_result(write_fork_read(), "write_fork_read\n");
+ ksft_test_result(write_clone_read(), "write_clone_read\n");
+
+ } else {
+ ksft_print_msg("SME support not present\n");
+
+ ksft_test_result_skip("default_value\n");
+ ksft_test_result_skip("write_read\n");
+ ksft_test_result_skip("write_sleep_read\n");
+ ksft_test_result_skip("write_fork_read\n");
+ ksft_test_result_skip("write_clone_read\n");
+ }
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/arm64/bti/.gitignore b/tools/testing/selftests/arm64/bti/.gitignore
new file mode 100644
index 000000000000..73869fabada4
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/.gitignore
@@ -0,0 +1,2 @@
+btitest
+nobtitest
diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile
new file mode 100644
index 000000000000..05e4ee523a53
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := btitest nobtitest
+
+# These tests are built as freestanding binaries since otherwise BTI
+# support in ld.so is required which is not currently widespread; when
+# it is available it will still be useful to test this separately as the
+# cases for statically linked and dynamically lined binaries are
+# slightly different.
+
+CFLAGS_NOBTI = -mbranch-protection=none -DBTI=0
+CFLAGS_BTI = -mbranch-protection=standard -DBTI=1
+
+CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS)
+
+BTI_CC_COMMAND = $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -c -o $@ $<
+NOBTI_CC_COMMAND = $(CC) $(CFLAGS_NOBTI) $(CFLAGS_COMMON) -c -o $@ $<
+
+$(OUTPUT)/%-bti.o: %.c
+ $(BTI_CC_COMMAND)
+
+$(OUTPUT)/%-bti.o: %.S
+ $(BTI_CC_COMMAND)
+
+$(OUTPUT)/%-nobti.o: %.c
+ $(NOBTI_CC_COMMAND)
+
+$(OUTPUT)/%-nobti.o: %.S
+ $(NOBTI_CC_COMMAND)
+
+BTI_OBJS = \
+ $(OUTPUT)/test-bti.o \
+ $(OUTPUT)/signal-bti.o \
+ $(OUTPUT)/start-bti.o \
+ $(OUTPUT)/syscall-bti.o \
+ $(OUTPUT)/system-bti.o \
+ $(OUTPUT)/teststubs-bti.o \
+ $(OUTPUT)/trampoline-bti.o
+$(OUTPUT)/btitest: $(BTI_OBJS)
+ $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^
+
+NOBTI_OBJS = \
+ $(OUTPUT)/test-nobti.o \
+ $(OUTPUT)/signal-nobti.o \
+ $(OUTPUT)/start-nobti.o \
+ $(OUTPUT)/syscall-nobti.o \
+ $(OUTPUT)/system-nobti.o \
+ $(OUTPUT)/teststubs-nobti.o \
+ $(OUTPUT)/trampoline-nobti.o
+$(OUTPUT)/nobtitest: $(NOBTI_OBJS)
+ $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^
+
+# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
+# to account for any OUTPUT target-dirs optionally provided by
+# the toplevel makefile
+include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/bti/assembler.h b/tools/testing/selftests/arm64/bti/assembler.h
new file mode 100644
index 000000000000..141cdcbf0b8f
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/assembler.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef ASSEMBLER_H
+#define ASSEMBLER_H
+
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+
+/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
+
+.macro startfn name:req
+ .globl \name
+\name:
+ .macro endfn
+ .size \name, . - \name
+ .type \name, @function
+ .purgem endfn
+ .endm
+.endm
+
+.macro emit_aarch64_feature_1_and
+ .pushsection .note.gnu.property, "a"
+ .align 3
+ .long 2f - 1f
+ .long 6f - 3f
+ .long NT_GNU_PROPERTY_TYPE_0
+1: .string "GNU"
+2:
+ .align 3
+3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
+ .long 5f - 4f
+4:
+#if BTI
+ .long GNU_PROPERTY_AARCH64_FEATURE_1_PAC | \
+ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
+#else
+ .long 0
+#endif
+5:
+ .align 3
+6:
+ .popsection
+.endm
+
+.macro paciasp
+ hint 0x19
+.endm
+
+.macro autiasp
+ hint 0x1d
+.endm
+
+.macro __bti_
+ hint 0x20
+.endm
+
+.macro __bti_c
+ hint 0x22
+.endm
+
+.macro __bti_j
+ hint 0x24
+.endm
+
+.macro __bti_jc
+ hint 0x26
+.endm
+
+.macro bti what=
+ __bti_\what
+.endm
+
+#endif /* ! ASSEMBLER_H */
diff --git a/tools/testing/selftests/arm64/bti/btitest.h b/tools/testing/selftests/arm64/bti/btitest.h
new file mode 100644
index 000000000000..2aff9b10336e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/btitest.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef BTITEST_H
+#define BTITEST_H
+
+/* Trampolines for calling the test stubs: */
+void call_using_br_x0(void (*)(void));
+void call_using_br_x16(void (*)(void));
+void call_using_blr(void (*)(void));
+
+/* Test stubs: */
+void nohint_func(void);
+void bti_none_func(void);
+void bti_c_func(void);
+void bti_j_func(void);
+void bti_jc_func(void);
+void paciasp_func(void);
+
+#endif /* !BTITEST_H */
diff --git a/tools/testing/selftests/arm64/bti/signal.c b/tools/testing/selftests/arm64/bti/signal.c
new file mode 100644
index 000000000000..f3fd29b91141
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/signal.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+#include "signal.h"
+
+int sigemptyset(sigset_t *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < _NSIG_WORDS; ++i)
+ s->sig[i] = 0;
+
+ return 0;
+}
+
+int sigaddset(sigset_t *s, int n)
+{
+ if (n < 1 || n > _NSIG)
+ return -EINVAL;
+
+ s->sig[(n - 1) / _NSIG_BPW] |= 1UL << (n - 1) % _NSIG_BPW;
+ return 0;
+}
+
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old)
+{
+ return syscall(__NR_rt_sigaction, n, sa, old, sizeof(sa->sa_mask));
+}
+
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old)
+{
+ return syscall(__NR_rt_sigprocmask, how, mask, old, sizeof(*mask));
+}
diff --git a/tools/testing/selftests/arm64/bti/signal.h b/tools/testing/selftests/arm64/bti/signal.h
new file mode 100644
index 000000000000..103457dc880e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/signal.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SIGNAL_H
+#define SIGNAL_H
+
+#include <linux/signal.h>
+
+#include "system.h"
+
+typedef __sighandler_t sighandler_t;
+
+int sigemptyset(sigset_t *s);
+int sigaddset(sigset_t *s, int n);
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old);
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old);
+
+#endif /* ! SIGNAL_H */
diff --git a/tools/testing/selftests/arm64/bti/start.S b/tools/testing/selftests/arm64/bti/start.S
new file mode 100644
index 000000000000..831f952e0572
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/start.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn _start
+ mov x0, sp
+ b start
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/syscall.S b/tools/testing/selftests/arm64/bti/syscall.S
new file mode 100644
index 000000000000..8dde8b6f3db1
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/syscall.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn syscall
+ bti c
+ mov w8, w0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ mov x3, x4
+ mov x4, x5
+ mov x5, x6
+ mov x6, x7
+ svc #0
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/system.c b/tools/testing/selftests/arm64/bti/system.c
new file mode 100644
index 000000000000..93d772b00bfe
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/system.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <asm/unistd.h>
+
+void __noreturn exit(int n)
+{
+ syscall(__NR_exit, n);
+ unreachable();
+}
+
+ssize_t write(int fd, const void *buf, size_t size)
+{
+ return syscall(__NR_write, fd, buf, size);
+}
diff --git a/tools/testing/selftests/arm64/bti/system.h b/tools/testing/selftests/arm64/bti/system.h
new file mode 100644
index 000000000000..2e9ee1284a0c
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/system.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+typedef __kernel_size_t size_t;
+typedef __kernel_ssize_t ssize_t;
+
+#include <linux/errno.h>
+#include <linux/compiler.h>
+
+#include <asm/hwcap.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+long syscall(int nr, ...);
+
+void __noreturn exit(int n);
+ssize_t write(int fd, const void *buf, size_t size);
+
+#endif /* ! SYSTEM_H */
diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c
new file mode 100644
index 000000000000..28a8e8a28a84
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/test.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019,2021 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/errno.h>
+#include <linux/auxvec.h>
+#include <linux/signal.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+
+typedef struct ucontext ucontext_t;
+
+#include "btitest.h"
+#include "signal.h"
+
+#define EXPECTED_TESTS 18
+
+static volatile unsigned int test_num = 1;
+static unsigned int test_passed;
+static unsigned int test_failed;
+static unsigned int test_skipped;
+
+static void fdputs(int fd, const char *str)
+{
+ size_t len = 0;
+ const char *p = str;
+
+ while (*p++)
+ ++len;
+
+ write(fd, str, len);
+}
+
+static void putstr(const char *str)
+{
+ fdputs(1, str);
+}
+
+static void putnum(unsigned int num)
+{
+ char c;
+
+ if (num / 10)
+ putnum(num / 10);
+
+ c = '0' + (num % 10);
+ write(1, &c, 1);
+}
+
+#define puttestname(test_name, trampoline_name) do { \
+ putstr(test_name); \
+ putstr("/"); \
+ putstr(trampoline_name); \
+} while (0)
+
+void print_summary(void)
+{
+ putstr("# Totals: pass:");
+ putnum(test_passed);
+ putstr(" fail:");
+ putnum(test_failed);
+ putstr(" xfail:0 xpass:0 skip:");
+ putnum(test_skipped);
+ putstr(" error:0\n");
+}
+
+static const char *volatile current_test_name;
+static const char *volatile current_trampoline_name;
+static volatile int sigill_expected, sigill_received;
+
+static void handler(int n, siginfo_t *si __always_unused,
+ void *uc_ __always_unused)
+{
+ ucontext_t *uc = uc_;
+
+ putstr("# \t[SIGILL in ");
+ puttestname(current_test_name, current_trampoline_name);
+ putstr(", BTYPE=");
+ write(1, &"00011011"[((uc->uc_mcontext.pstate & PSR_BTYPE_MASK)
+ >> PSR_BTYPE_SHIFT) * 2], 2);
+ if (!sigill_expected) {
+ putstr("]\n");
+ putstr("not ok ");
+ putnum(test_num);
+ putstr(" ");
+ puttestname(current_test_name, current_trampoline_name);
+ putstr("(unexpected SIGILL)\n");
+ print_summary();
+ exit(128 + n);
+ }
+
+ putstr(" (expected)]\n");
+ sigill_received = 1;
+ /* zap BTYPE so that resuming the faulting code will work */
+ uc->uc_mcontext.pstate &= ~PSR_BTYPE_MASK;
+}
+
+/* Does the system have BTI? */
+static bool have_bti;
+
+static void __do_test(void (*trampoline)(void (*)(void)),
+ void (*fn)(void),
+ const char *trampoline_name,
+ const char *name,
+ int expect_sigill)
+{
+ /*
+ * Branch Target exceptions should only happen for BTI
+ * binaries running on a system with BTI:
+ */
+ if (!BTI || !have_bti)
+ expect_sigill = 0;
+
+ sigill_expected = expect_sigill;
+ sigill_received = 0;
+ current_test_name = name;
+ current_trampoline_name = trampoline_name;
+
+ trampoline(fn);
+
+ if (expect_sigill && !sigill_received) {
+ putstr("not ok ");
+ test_failed++;
+ } else {
+ putstr("ok ");
+ test_passed++;
+ }
+ putnum(test_num++);
+ putstr(" ");
+ puttestname(name, trampoline_name);
+ putstr("\n");
+}
+
+#define do_test(expect_sigill_br_x0, \
+ expect_sigill_br_x16, \
+ expect_sigill_blr, \
+ name) \
+do { \
+ __do_test(call_using_br_x0, name, "call_using_br_x0", #name, \
+ expect_sigill_br_x0); \
+ __do_test(call_using_br_x16, name, "call_using_br_x16", #name, \
+ expect_sigill_br_x16); \
+ __do_test(call_using_blr, name, "call_using_blr", #name, \
+ expect_sigill_blr); \
+} while (0)
+
+void start(int *argcp)
+{
+ struct sigaction sa;
+ void *const *p;
+ const struct auxv_entry {
+ unsigned long type;
+ unsigned long val;
+ } *auxv;
+ unsigned long hwcap = 0, hwcap2 = 0;
+
+ putstr("TAP version 13\n");
+ putstr("1..");
+ putnum(EXPECTED_TESTS);
+ putstr("\n");
+
+ /* Gross hack for finding AT_HWCAP2 from the initial process stack: */
+ p = (void *const *)argcp + 1 + *argcp + 1; /* start of environment */
+ /* step over environment */
+ while (*p++)
+ ;
+ for (auxv = (const struct auxv_entry *)p; auxv->type != AT_NULL; ++auxv) {
+ switch (auxv->type) {
+ case AT_HWCAP:
+ hwcap = auxv->val;
+ break;
+ case AT_HWCAP2:
+ hwcap2 = auxv->val;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (hwcap & HWCAP_PACA)
+ putstr("# HWCAP_PACA present\n");
+ else
+ putstr("# HWCAP_PACA not present\n");
+
+ if (hwcap2 & HWCAP2_BTI) {
+ putstr("# HWCAP2_BTI present\n");
+ if (!(hwcap & HWCAP_PACA))
+ putstr("# Bad hardware? Expect problems.\n");
+ have_bti = true;
+ } else {
+ putstr("# HWCAP2_BTI not present\n");
+ have_bti = false;
+ }
+
+ putstr("# Test binary");
+ if (!BTI)
+ putstr(" not");
+ putstr(" built for BTI\n");
+
+ sa.sa_handler = (sighandler_t)(void *)handler;
+ sa.sa_flags = SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ sigaction(SIGILL, &sa, NULL);
+ sigaddset(&sa.sa_mask, SIGILL);
+ sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
+
+ do_test(1, 1, 1, nohint_func);
+ do_test(1, 1, 1, bti_none_func);
+ do_test(1, 0, 0, bti_c_func);
+ do_test(0, 0, 1, bti_j_func);
+ do_test(0, 0, 0, bti_jc_func);
+ do_test(1, 0, 0, paciasp_func);
+
+ print_summary();
+
+ if (test_num - 1 != EXPECTED_TESTS)
+ putstr("# WARNING - EXPECTED TEST COUNT WRONG\n");
+
+ if (test_failed)
+ exit(1);
+ else
+ exit(0);
+}
diff --git a/tools/testing/selftests/arm64/bti/teststubs.S b/tools/testing/selftests/arm64/bti/teststubs.S
new file mode 100644
index 000000000000..b62c8c35f67e
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/teststubs.S
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn bti_none_func
+ bti
+ ret
+endfn
+
+startfn bti_c_func
+ bti c
+ ret
+endfn
+
+startfn bti_j_func
+ bti j
+ ret
+endfn
+
+startfn bti_jc_func
+ bti jc
+ ret
+endfn
+
+startfn paciasp_func
+ paciasp
+ autiasp
+ ret
+endfn
+
+startfn nohint_func
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/trampoline.S b/tools/testing/selftests/arm64/bti/trampoline.S
new file mode 100644
index 000000000000..09beb3f361f1
--- /dev/null
+++ b/tools/testing/selftests/arm64/bti/trampoline.S
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn call_using_br_x0
+ bti c
+ br x0
+endfn
+
+startfn call_using_br_x16
+ bti c
+ mov x16, x0
+ br x16
+endfn
+
+startfn call_using_blr
+ paciasp
+ stp x29, x30, [sp, #-16]!
+ blr x0
+ ldp x29, x30, [sp], #16
+ autiasp
+ ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore
index d66f76d2a650..8362e7ec35ad 100644
--- a/tools/testing/selftests/arm64/fp/.gitignore
+++ b/tools/testing/selftests/arm64/fp/.gitignore
@@ -1,5 +1,18 @@
+fp-pidbench
+fp-ptrace
+fp-stress
fpsimd-test
+kernel-test
+rdvl-sme
+rdvl-sve
sve-probe-vls
sve-ptrace
sve-test
+ssve-test
+vec-syscfg
vlset
+za-fork
+za-ptrace
+za-test
+zt-ptrace
+zt-test
diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile
index a57009d3a0dc..d171021e4cdd 100644
--- a/tools/testing/selftests/arm64/fp/Makefile
+++ b/tools/testing/selftests/arm64/fp/Makefile
@@ -1,17 +1,54 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I../../../../../usr/include/
-TEST_GEN_PROGS := sve-ptrace sve-probe-vls
-TEST_PROGS_EXTENDED := fpsimd-test fpsimd-stress sve-test sve-stress vlset
+# A proper top_srcdir is needed by KSFT(lib.mk)
+top_srcdir = $(realpath ../../../../../)
-all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED)
+CFLAGS += $(KHDR_INCLUDES)
-fpsimd-test: fpsimd-test.o
+TEST_GEN_PROGS := \
+ fp-ptrace \
+ fp-stress \
+ sve-ptrace sve-probe-vls \
+ vec-syscfg \
+ za-fork za-ptrace
+TEST_GEN_PROGS_EXTENDED := fp-pidbench fpsimd-test \
+ kernel-test \
+ rdvl-sme rdvl-sve \
+ sve-test \
+ ssve-test \
+ za-test \
+ zt-ptrace \
+ zt-test \
+ vlset
+TEST_PROGS_EXTENDED := fpsimd-stress sve-stress ssve-stress za-stress
+
+EXTRA_CLEAN += $(OUTPUT)/asm-utils.o $(OUTPUT)/rdvl.o $(OUTPUT)/za-fork-asm.o
+
+# Build with nolibc to avoid effects due to libc's clone() support
+$(OUTPUT)/fp-pidbench: fp-pidbench.S $(OUTPUT)/asm-utils.o
+ $(CC) -nostdlib $^ -o $@
+$(OUTPUT)/fp-ptrace: fp-ptrace.c fp-ptrace-asm.S
+$(OUTPUT)/fpsimd-test: fpsimd-test.S $(OUTPUT)/asm-utils.o
+ $(CC) -nostdlib $^ -o $@
+$(OUTPUT)/rdvl-sve: rdvl-sve.c $(OUTPUT)/rdvl.o
+$(OUTPUT)/rdvl-sme: rdvl-sme.c $(OUTPUT)/rdvl.o
+$(OUTPUT)/sve-ptrace: sve-ptrace.c
+$(OUTPUT)/sve-probe-vls: sve-probe-vls.c $(OUTPUT)/rdvl.o
+$(OUTPUT)/sve-test: sve-test.S $(OUTPUT)/asm-utils.o
+ $(CC) -nostdlib $^ -o $@
+$(OUTPUT)/ssve-test: sve-test.S $(OUTPUT)/asm-utils.o
+ $(CC) -DSSVE -nostdlib $^ -o $@
+$(OUTPUT)/vec-syscfg: vec-syscfg.c $(OUTPUT)/rdvl.o
+$(OUTPUT)/vlset: vlset.c
+$(OUTPUT)/za-fork: za-fork.c $(OUTPUT)/za-fork-asm.o
+ $(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ -include ../../../../include/nolibc/nolibc.h -I../..\
+ -static -ffreestanding -Wall $^ -o $@
+$(OUTPUT)/za-ptrace: za-ptrace.c
+$(OUTPUT)/za-test: za-test.S $(OUTPUT)/asm-utils.o
$(CC) -nostdlib $^ -o $@
-sve-ptrace: sve-ptrace.o sve-ptrace-asm.o
-sve-probe-vls: sve-probe-vls.o
-sve-test: sve-test.o
+$(OUTPUT)/zt-ptrace: zt-ptrace.c
+$(OUTPUT)/zt-test: zt-test.S $(OUTPUT)/asm-utils.o
$(CC) -nostdlib $^ -o $@
-vlset: vlset.o
include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/fp/TODO b/tools/testing/selftests/arm64/fp/TODO
new file mode 100644
index 000000000000..44004e53da33
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/TODO
@@ -0,0 +1,7 @@
+- Test unsupported values in the ABIs.
+- More coverage for ptrace:
+ - Get/set of FFR.
+ - Ensure ptraced processes actually see the register state visible through
+ the ptrace interface.
+ - Big endian.
+- Test PR_SVE_VL_INHERIT after a double fork.
diff --git a/tools/testing/selftests/arm64/fp/asm-offsets.h b/tools/testing/selftests/arm64/fp/asm-offsets.h
index a180851496ec..757b2fd75dd7 100644
--- a/tools/testing/selftests/arm64/fp/asm-offsets.h
+++ b/tools/testing/selftests/arm64/fp/asm-offsets.h
@@ -3,6 +3,7 @@
#define sa_handler 0
#define sa_mask_sz 8
#define SIGUSR1 10
+#define SIGUSR2 12
#define SIGTERM 15
#define SIGINT 2
#define SIGABRT 6
diff --git a/tools/testing/selftests/arm64/fp/asm-utils.S b/tools/testing/selftests/arm64/fp/asm-utils.S
new file mode 100644
index 000000000000..4b9728efc18d
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/asm-utils.S
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015-2021 ARM Limited.
+// Original author: Dave Martin <Dave.Martin@arm.com>
+//
+// Utility functions for assembly code.
+
+#include <asm/unistd.h>
+#include "assembler.h"
+
+// Print a single character x0 to stdout
+// Clobbers x0-x2,x8
+function putc
+ str x0, [sp, #-16]!
+
+ mov x0, #1 // STDOUT_FILENO
+ mov x1, sp
+ mov x2, #1
+ mov x8, #__NR_write
+ svc #0
+
+ add sp, sp, #16
+ ret
+endfunction
+.globl putc
+
+// Print a NUL-terminated string starting at address x0 to stdout
+// Clobbers x0-x3,x8
+function puts
+ mov x1, x0
+
+ mov x2, #0
+0: ldrb w3, [x0], #1
+ cbz w3, 1f
+ add x2, x2, #1
+ b 0b
+
+1: mov w0, #1 // STDOUT_FILENO
+ mov x8, #__NR_write
+ svc #0
+
+ ret
+endfunction
+.globl puts
+
+// Print an unsigned decimal number x0 to stdout
+// Clobbers x0-x4,x8
+function putdec
+ mov x1, sp
+ str x30, [sp, #-32]! // Result can't be > 20 digits
+
+ mov x2, #0
+ strb w2, [x1, #-1]! // Write the NUL terminator
+
+ mov x2, #10
+0: udiv x3, x0, x2 // div-mod loop to generate the digits
+ msub x0, x3, x2, x0
+ add w0, w0, #'0'
+ strb w0, [x1, #-1]!
+ mov x0, x3
+ cbnz x3, 0b
+
+ ldrb w0, [x1]
+ cbnz w0, 1f
+ mov w0, #'0' // Print "0" for 0, not ""
+ strb w0, [x1, #-1]!
+
+1: mov x0, x1
+ bl puts
+
+ ldr x30, [sp], #32
+ ret
+endfunction
+.globl putdec
+
+// Print an unsigned decimal number x0 to stdout, followed by a newline
+// Clobbers x0-x5,x8
+function putdecn
+ mov x5, x30
+
+ bl putdec
+ mov x0, #'\n'
+ bl putc
+
+ ret x5
+endfunction
+.globl putdecn
+
+// Clobbers x0-x3,x8
+function puthexb
+ str x30, [sp, #-0x10]!
+
+ mov w3, w0
+ lsr w0, w0, #4
+ bl puthexnibble
+ mov w0, w3
+
+ ldr x30, [sp], #0x10
+ // fall through to puthexnibble
+endfunction
+.globl puthexb
+
+// Clobbers x0-x2,x8
+function puthexnibble
+ and w0, w0, #0xf
+ cmp w0, #10
+ blo 1f
+ add w0, w0, #'a' - ('9' + 1)
+1: add w0, w0, #'0'
+ b putc
+endfunction
+.globl puthexnibble
+
+// x0=data in, x1=size in, clobbers x0-x5,x8
+function dumphex
+ str x30, [sp, #-0x10]!
+
+ mov x4, x0
+ mov x5, x1
+
+0: subs x5, x5, #1
+ b.lo 1f
+ ldrb w0, [x4], #1
+ bl puthexb
+ b 0b
+
+1: ldr x30, [sp], #0x10
+ ret
+endfunction
+.globl dumphex
+
+ // Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
+// Clobbers x0-x3
+function memcpy
+ cmp x2, #0
+ b.eq 1f
+0: ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne 0b
+1: ret
+endfunction
+.globl memcpy
+
+// Fill x1 bytes starting at x0 with 0xae (for canary purposes)
+// Clobbers x1, x2.
+function memfill_ae
+ mov w2, #0xae
+ b memfill
+endfunction
+.globl memfill_ae
+
+// Fill x1 bytes starting at x0 with 0.
+// Clobbers x1, x2.
+function memclr
+ mov w2, #0
+endfunction
+.globl memclr
+ // fall through to memfill
+
+// Trivial memory fill: fill x1 bytes starting at address x0 with byte w2
+// Clobbers x1
+function memfill
+ cmp x1, #0
+ b.eq 1f
+
+0: strb w2, [x0], #1
+ subs x1, x1, #1
+ b.ne 0b
+
+1: ret
+endfunction
+.globl memfill
diff --git a/tools/testing/selftests/arm64/fp/assembler.h b/tools/testing/selftests/arm64/fp/assembler.h
index 8944f2189206..1fc46a5642c2 100644
--- a/tools/testing/selftests/arm64/fp/assembler.h
+++ b/tools/testing/selftests/arm64/fp/assembler.h
@@ -54,4 +54,30 @@ endfunction
.purgem \name\()_entry
.endm
+// Utility macro to print a literal string
+// Clobbers x0-x4,x8
+.macro puts string
+ .pushsection .rodata.str1.1, "aMS", @progbits, 1
+.L__puts_literal\@: .string "\string"
+ .popsection
+
+ ldr x0, =.L__puts_literal\@
+ bl puts
+.endm
+
+#define PR_SET_SHADOW_STACK_STATUS 75
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+
+.macro enable_gcs
+ // Run with GCS
+ mov x0, PR_SET_SHADOW_STACK_STATUS
+ mov x1, PR_SHADOW_STACK_ENABLE
+ mov x2, xzr
+ mov x3, xzr
+ mov x4, xzr
+ mov x5, xzr
+ mov x8, #__NR_prctl
+ svc #0
+.endm
+
#endif /* ! ASSEMBLER_H */
diff --git a/tools/testing/selftests/arm64/fp/fp-pidbench.S b/tools/testing/selftests/arm64/fp/fp-pidbench.S
new file mode 100644
index 000000000000..73830f6bc99b
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/fp-pidbench.S
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 ARM Limited.
+// Original author: Mark Brown <broonie@kernel.org>
+//
+// Trivial syscall overhead benchmark.
+//
+// This is implemented in asm to ensure that we don't have any issues with
+// system libraries using instructions that disrupt the test.
+
+#include <asm/unistd.h>
+#include "assembler.h"
+
+.arch_extension sve
+
+.macro test_loop per_loop
+ mov x10, x20
+ mov x8, #__NR_getpid
+ mrs x11, CNTVCT_EL0
+1:
+ \per_loop
+ svc #0
+ sub x10, x10, #1
+ cbnz x10, 1b
+
+ mrs x12, CNTVCT_EL0
+ sub x0, x12, x11
+ bl putdec
+ puts "\n"
+.endm
+
+// Main program entry point
+.globl _start
+function _start
+ puts "Iterations per test: "
+ mov x20, #10000
+ lsl x20, x20, #8
+ mov x0, x20
+ bl putdec
+ puts "\n"
+
+ // Test having never used SVE
+ puts "No SVE: "
+ test_loop
+
+ // Check for SVE support - should use hwcap but that's hard in asm
+ mrs x0, ID_AA64PFR0_EL1
+ ubfx x0, x0, #32, #4
+ cbnz x0, 1f
+ puts "System does not support SVE\n"
+ b out
+1:
+
+ // Execute a SVE instruction
+ puts "SVE VL: "
+ rdvl x0, #8
+ bl putdec
+ puts "\n"
+
+ puts "SVE used once: "
+ test_loop
+
+ // Use SVE per syscall
+ puts "SVE used per syscall: "
+ test_loop "rdvl x0, #8"
+
+ // And we're done
+out:
+ mov x0, #0
+ mov x8, #__NR_exit
+ svc #0
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S
new file mode 100644
index 000000000000..82c3ab70e1cf
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021-3 ARM Limited.
+//
+// Assembly portion of the FP ptrace test
+
+//
+// Load values from memory into registers, break on a breakpoint, then
+// break on a further breakpoint
+//
+
+#include "fp-ptrace.h"
+#include "sme-inst.h"
+
+.arch_extension sve
+
+// Load and save register values with pauses for ptrace
+//
+// x0 - HAVE_ flags indicating which features are in use
+
+.globl load_and_save
+load_and_save:
+ stp x11, x12, [sp, #-0x10]!
+
+ // This should be redundant in the SVE case
+ ldr x7, =v_in
+ ldp q0, q1, [x7]
+ ldp q2, q3, [x7, #16 * 2]
+ ldp q4, q5, [x7, #16 * 4]
+ ldp q6, q7, [x7, #16 * 6]
+ ldp q8, q9, [x7, #16 * 8]
+ ldp q10, q11, [x7, #16 * 10]
+ ldp q12, q13, [x7, #16 * 12]
+ ldp q14, q15, [x7, #16 * 14]
+ ldp q16, q17, [x7, #16 * 16]
+ ldp q18, q19, [x7, #16 * 18]
+ ldp q20, q21, [x7, #16 * 20]
+ ldp q22, q23, [x7, #16 * 22]
+ ldp q24, q25, [x7, #16 * 24]
+ ldp q26, q27, [x7, #16 * 26]
+ ldp q28, q29, [x7, #16 * 28]
+ ldp q30, q31, [x7, #16 * 30]
+
+ // SME?
+ tbz x0, #HAVE_SME_SHIFT, check_sve_in
+
+ adrp x7, svcr_in
+ ldr x7, [x7, :lo12:svcr_in]
+ // SVCR is 0 by default, avoid triggering SME if not in use
+ cbz x7, check_sve_in
+ msr S3_3_C4_C2_2, x7
+
+ // ZA?
+ tbz x7, #SVCR_ZA_SHIFT, check_sm_in
+ rdsvl 11, 1
+ mov w12, #0
+ ldr x6, =za_in
+1: _ldr_za 12, 6
+ add x6, x6, x11
+ add x12, x12, #1
+ cmp x11, x12
+ bne 1b
+
+ // ZT?
+ tbz x0, #HAVE_SME2_SHIFT, check_sm_in
+ adrp x6, zt_in
+ add x6, x6, :lo12:zt_in
+ _ldr_zt 6
+
+ // In streaming mode?
+check_sm_in:
+ tbz x7, #SVCR_SM_SHIFT, check_sve_in
+
+ // Load FFR if we have FA64
+ ubfx x4, x0, #HAVE_FA64_SHIFT, #1
+ b load_sve
+
+ // SVE?
+check_sve_in:
+ tbz x0, #HAVE_SVE_SHIFT, check_fpmr_in
+ mov x4, #1
+
+load_sve:
+ ldr x7, =z_in
+ ldr z0, [x7, #0, MUL VL]
+ ldr z1, [x7, #1, MUL VL]
+ ldr z2, [x7, #2, MUL VL]
+ ldr z3, [x7, #3, MUL VL]
+ ldr z4, [x7, #4, MUL VL]
+ ldr z5, [x7, #5, MUL VL]
+ ldr z6, [x7, #6, MUL VL]
+ ldr z7, [x7, #7, MUL VL]
+ ldr z8, [x7, #8, MUL VL]
+ ldr z9, [x7, #9, MUL VL]
+ ldr z10, [x7, #10, MUL VL]
+ ldr z11, [x7, #11, MUL VL]
+ ldr z12, [x7, #12, MUL VL]
+ ldr z13, [x7, #13, MUL VL]
+ ldr z14, [x7, #14, MUL VL]
+ ldr z15, [x7, #15, MUL VL]
+ ldr z16, [x7, #16, MUL VL]
+ ldr z17, [x7, #17, MUL VL]
+ ldr z18, [x7, #18, MUL VL]
+ ldr z19, [x7, #19, MUL VL]
+ ldr z20, [x7, #20, MUL VL]
+ ldr z21, [x7, #21, MUL VL]
+ ldr z22, [x7, #22, MUL VL]
+ ldr z23, [x7, #23, MUL VL]
+ ldr z24, [x7, #24, MUL VL]
+ ldr z25, [x7, #25, MUL VL]
+ ldr z26, [x7, #26, MUL VL]
+ ldr z27, [x7, #27, MUL VL]
+ ldr z28, [x7, #28, MUL VL]
+ ldr z29, [x7, #29, MUL VL]
+ ldr z30, [x7, #30, MUL VL]
+ ldr z31, [x7, #31, MUL VL]
+
+ // FFR is not present in base SME
+ cbz x4, 1f
+ ldr x7, =ffr_in
+ ldr p0, [x7]
+ ldr x7, [x7, #0]
+ cbz x7, 1f
+ wrffr p0.b
+1:
+
+ ldr x7, =p_in
+ ldr p0, [x7, #0, MUL VL]
+ ldr p1, [x7, #1, MUL VL]
+ ldr p2, [x7, #2, MUL VL]
+ ldr p3, [x7, #3, MUL VL]
+ ldr p4, [x7, #4, MUL VL]
+ ldr p5, [x7, #5, MUL VL]
+ ldr p6, [x7, #6, MUL VL]
+ ldr p7, [x7, #7, MUL VL]
+ ldr p8, [x7, #8, MUL VL]
+ ldr p9, [x7, #9, MUL VL]
+ ldr p10, [x7, #10, MUL VL]
+ ldr p11, [x7, #11, MUL VL]
+ ldr p12, [x7, #12, MUL VL]
+ ldr p13, [x7, #13, MUL VL]
+ ldr p14, [x7, #14, MUL VL]
+ ldr p15, [x7, #15, MUL VL]
+
+ // This has to come after we set PSTATE.SM
+check_fpmr_in:
+ tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes
+ adrp x7, fpmr_in
+ ldr x7, [x7, :lo12:fpmr_in]
+ msr REG_FPMR, x7
+
+wait_for_writes:
+ // Wait for the parent
+ brk #0
+
+ // Save values
+ ldr x7, =v_out
+ stp q0, q1, [x7]
+ stp q2, q3, [x7, #16 * 2]
+ stp q4, q5, [x7, #16 * 4]
+ stp q6, q7, [x7, #16 * 6]
+ stp q8, q9, [x7, #16 * 8]
+ stp q10, q11, [x7, #16 * 10]
+ stp q12, q13, [x7, #16 * 12]
+ stp q14, q15, [x7, #16 * 14]
+ stp q16, q17, [x7, #16 * 16]
+ stp q18, q19, [x7, #16 * 18]
+ stp q20, q21, [x7, #16 * 20]
+ stp q22, q23, [x7, #16 * 22]
+ stp q24, q25, [x7, #16 * 24]
+ stp q26, q27, [x7, #16 * 26]
+ stp q28, q29, [x7, #16 * 28]
+ stp q30, q31, [x7, #16 * 30]
+
+ tbz x0, #HAVE_FPMR_SHIFT, check_sme_out
+ mrs x7, REG_FPMR
+ adrp x6, fpmr_out
+ str x7, [x6, :lo12:fpmr_out]
+
+check_sme_out:
+ tbz x0, #HAVE_SME_SHIFT, check_sve_out
+
+ rdsvl 11, 1
+ adrp x6, sme_vl_out
+ str x11, [x6, :lo12:sme_vl_out]
+
+ mrs x7, S3_3_C4_C2_2
+ adrp x6, svcr_out
+ str x7, [x6, :lo12:svcr_out]
+
+ // ZA?
+ tbz x7, #SVCR_ZA_SHIFT, check_sm_out
+ mov w12, #0
+ ldr x6, =za_out
+1: _str_za 12, 6
+ add x6, x6, x11
+ add x12, x12, #1
+ cmp x11, x12
+ bne 1b
+
+ // ZT?
+ tbz x0, #HAVE_SME2_SHIFT, check_sm_out
+ adrp x6, zt_out
+ add x6, x6, :lo12:zt_out
+ _str_zt 6
+
+ // In streaming mode?
+check_sm_out:
+ tbz x7, #SVCR_SM_SHIFT, check_sve_out
+
+ // Do we have FA64 and FFR?
+ ubfx x4, x0, #HAVE_FA64_SHIFT, #1
+ b read_sve
+
+ // SVE?
+check_sve_out:
+ tbz x0, #HAVE_SVE_SHIFT, wait_for_reads
+ mov x4, #1
+
+ rdvl x7, #1
+ adrp x6, sve_vl_out
+ str x7, [x6, :lo12:sve_vl_out]
+
+read_sve:
+ ldr x7, =z_out
+ str z0, [x7, #0, MUL VL]
+ str z1, [x7, #1, MUL VL]
+ str z2, [x7, #2, MUL VL]
+ str z3, [x7, #3, MUL VL]
+ str z4, [x7, #4, MUL VL]
+ str z5, [x7, #5, MUL VL]
+ str z6, [x7, #6, MUL VL]
+ str z7, [x7, #7, MUL VL]
+ str z8, [x7, #8, MUL VL]
+ str z9, [x7, #9, MUL VL]
+ str z10, [x7, #10, MUL VL]
+ str z11, [x7, #11, MUL VL]
+ str z12, [x7, #12, MUL VL]
+ str z13, [x7, #13, MUL VL]
+ str z14, [x7, #14, MUL VL]
+ str z15, [x7, #15, MUL VL]
+ str z16, [x7, #16, MUL VL]
+ str z17, [x7, #17, MUL VL]
+ str z18, [x7, #18, MUL VL]
+ str z19, [x7, #19, MUL VL]
+ str z20, [x7, #20, MUL VL]
+ str z21, [x7, #21, MUL VL]
+ str z22, [x7, #22, MUL VL]
+ str z23, [x7, #23, MUL VL]
+ str z24, [x7, #24, MUL VL]
+ str z25, [x7, #25, MUL VL]
+ str z26, [x7, #26, MUL VL]
+ str z27, [x7, #27, MUL VL]
+ str z28, [x7, #28, MUL VL]
+ str z29, [x7, #29, MUL VL]
+ str z30, [x7, #30, MUL VL]
+ str z31, [x7, #31, MUL VL]
+
+ ldr x7, =p_out
+ str p0, [x7, #0, MUL VL]
+ str p1, [x7, #1, MUL VL]
+ str p2, [x7, #2, MUL VL]
+ str p3, [x7, #3, MUL VL]
+ str p4, [x7, #4, MUL VL]
+ str p5, [x7, #5, MUL VL]
+ str p6, [x7, #6, MUL VL]
+ str p7, [x7, #7, MUL VL]
+ str p8, [x7, #8, MUL VL]
+ str p9, [x7, #9, MUL VL]
+ str p10, [x7, #10, MUL VL]
+ str p11, [x7, #11, MUL VL]
+ str p12, [x7, #12, MUL VL]
+ str p13, [x7, #13, MUL VL]
+ str p14, [x7, #14, MUL VL]
+ str p15, [x7, #15, MUL VL]
+
+ // Only save FFR if it exists
+ cbz x4, wait_for_reads
+ ldr x7, =ffr_out
+ rdffr p0.b
+ str p0, [x7]
+
+wait_for_reads:
+ // Wait for the parent
+ brk #0
+
+ // Ensure we don't leave ourselves in streaming mode
+ tbz x0, #HAVE_SME_SHIFT, out
+ msr S3_3_C4_C2_2, xzr
+
+out:
+ ldp x11, x12, [sp, #-0x10]
+ ret
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c
new file mode 100644
index 000000000000..22c584b78be5
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c
@@ -0,0 +1,1692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Limited.
+ * Original author: Mark Brown <broonie@kernel.org>
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+
+#include <linux/kernel.h>
+
+#include <asm/sigcontext.h>
+#include <asm/sve_context.h>
+#include <asm/ptrace.h>
+
+#include "kselftest.h"
+
+#include "fp-ptrace.h"
+
+#include <linux/bits.h>
+
+#define FPMR_LSCALE2_MASK GENMASK(37, 32)
+#define FPMR_NSCALE_MASK GENMASK(31, 24)
+#define FPMR_LSCALE_MASK GENMASK(22, 16)
+#define FPMR_OSC_MASK GENMASK(15, 15)
+#define FPMR_OSM_MASK GENMASK(14, 14)
+
+/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
+#ifndef NT_ARM_SVE
+#define NT_ARM_SVE 0x405
+#endif
+
+#ifndef NT_ARM_SSVE
+#define NT_ARM_SSVE 0x40b
+#endif
+
+#ifndef NT_ARM_ZA
+#define NT_ARM_ZA 0x40c
+#endif
+
+#ifndef NT_ARM_ZT
+#define NT_ARM_ZT 0x40d
+#endif
+
+#ifndef NT_ARM_FPMR
+#define NT_ARM_FPMR 0x40e
+#endif
+
+#define ARCH_VQ_MAX 256
+
+/* VL 128..2048 in powers of 2 */
+#define MAX_NUM_VLS 5
+
+/*
+ * FPMR bits we can set without doing feature checks to see if values
+ * are valid.
+ */
+#define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \
+ FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK)
+
+#define NUM_FPR 32
+__uint128_t v_in[NUM_FPR];
+__uint128_t v_expected[NUM_FPR];
+__uint128_t v_out[NUM_FPR];
+
+char z_in[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
+char z_expected[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
+char z_out[__SVE_ZREGS_SIZE(ARCH_VQ_MAX)];
+
+char p_in[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
+char p_expected[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
+char p_out[__SVE_PREGS_SIZE(ARCH_VQ_MAX)];
+
+char ffr_in[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
+char ffr_expected[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
+char ffr_out[__SVE_PREG_SIZE(ARCH_VQ_MAX)];
+
+char za_in[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
+char za_expected[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
+char za_out[ZA_SIG_REGS_SIZE(ARCH_VQ_MAX)];
+
+char zt_in[ZT_SIG_REG_BYTES];
+char zt_expected[ZT_SIG_REG_BYTES];
+char zt_out[ZT_SIG_REG_BYTES];
+
+uint64_t fpmr_in, fpmr_expected, fpmr_out;
+
+uint64_t sve_vl_out;
+uint64_t sme_vl_out;
+uint64_t svcr_in, svcr_expected, svcr_out;
+
+void load_and_save(int flags);
+
+static bool got_alarm;
+
+static void handle_alarm(int sig, siginfo_t *info, void *context)
+{
+ got_alarm = true;
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ u64 a = swab64(x);
+ u64 b = swab64(x >> 64);
+
+ return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
+static bool sve_supported(void)
+{
+ return getauxval(AT_HWCAP) & HWCAP_SVE;
+}
+
+static bool sme_supported(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_SME;
+}
+
+static bool sme2_supported(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_SME2;
+}
+
+static bool fa64_supported(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64;
+}
+
+static bool fpmr_supported(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_FPMR;
+}
+
+static bool compare_buffer(const char *name, void *out,
+ void *expected, size_t size)
+{
+ void *tmp;
+
+ if (memcmp(out, expected, size) == 0)
+ return true;
+
+ ksft_print_msg("Mismatch in %s\n", name);
+
+ /* Did we just get zeros back? */
+ tmp = malloc(size);
+ if (!tmp) {
+ ksft_print_msg("OOM allocating %lu bytes for %s\n",
+ size, name);
+ ksft_exit_fail();
+ }
+ memset(tmp, 0, size);
+
+ if (memcmp(out, tmp, size) == 0)
+ ksft_print_msg("%s is zero\n", name);
+
+ free(tmp);
+
+ return false;
+}
+
+struct test_config {
+ int sve_vl_in;
+ int sve_vl_expected;
+ int sme_vl_in;
+ int sme_vl_expected;
+ int svcr_in;
+ int svcr_expected;
+};
+
+struct test_definition {
+ const char *name;
+ bool sve_vl_change;
+ bool (*supported)(struct test_config *config);
+ void (*set_expected_values)(struct test_config *config);
+ void (*modify_values)(pid_t child, struct test_config *test_config);
+};
+
+static int vl_in(struct test_config *config)
+{
+ int vl;
+
+ if (config->svcr_in & SVCR_SM)
+ vl = config->sme_vl_in;
+ else
+ vl = config->sve_vl_in;
+
+ return vl;
+}
+
+static int vl_expected(struct test_config *config)
+{
+ int vl;
+
+ if (config->svcr_expected & SVCR_SM)
+ vl = config->sme_vl_expected;
+ else
+ vl = config->sve_vl_expected;
+
+ return vl;
+}
+
+static void run_child(struct test_config *config)
+{
+ int ret, flags;
+
+ /* Let the parent attach to us */
+ ret = ptrace(PTRACE_TRACEME, 0, 0, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg("PTRACE_TRACEME failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ /* VL setup */
+ if (sve_supported()) {
+ ret = prctl(PR_SVE_SET_VL, config->sve_vl_in);
+ if (ret != config->sve_vl_in) {
+ ksft_print_msg("Failed to set SVE VL %d: %d\n",
+ config->sve_vl_in, ret);
+ }
+ }
+
+ if (sme_supported()) {
+ ret = prctl(PR_SME_SET_VL, config->sme_vl_in);
+ if (ret != config->sme_vl_in) {
+ ksft_print_msg("Failed to set SME VL %d: %d\n",
+ config->sme_vl_in, ret);
+ }
+ }
+
+ /* Load values and wait for the parent */
+ flags = 0;
+ if (sve_supported())
+ flags |= HAVE_SVE;
+ if (sme_supported())
+ flags |= HAVE_SME;
+ if (sme2_supported())
+ flags |= HAVE_SME2;
+ if (fa64_supported())
+ flags |= HAVE_FA64;
+ if (fpmr_supported())
+ flags |= HAVE_FPMR;
+
+ load_and_save(flags);
+
+ exit(0);
+}
+
+static void read_one_child_regs(pid_t child, char *name,
+ struct iovec *iov_parent,
+ struct iovec *iov_child)
+{
+ int len = iov_parent->iov_len;
+ int ret;
+
+ ret = process_vm_readv(child, iov_parent, 1, iov_child, 1, 0);
+ if (ret == -1)
+ ksft_print_msg("%s read failed: %s (%d)\n",
+ name, strerror(errno), errno);
+ else if (ret != len)
+ ksft_print_msg("Short read of %s: %d\n", name, ret);
+}
+
+static void read_child_regs(pid_t child)
+{
+ struct iovec iov_parent, iov_child;
+
+ /*
+ * Since the child fork()ed from us the buffer addresses are
+ * the same in parent and child.
+ */
+ iov_parent.iov_base = &v_out;
+ iov_parent.iov_len = sizeof(v_out);
+ iov_child.iov_base = &v_out;
+ iov_child.iov_len = sizeof(v_out);
+ read_one_child_regs(child, "FPSIMD", &iov_parent, &iov_child);
+
+ if (sve_supported() || sme_supported()) {
+ iov_parent.iov_base = &sve_vl_out;
+ iov_parent.iov_len = sizeof(sve_vl_out);
+ iov_child.iov_base = &sve_vl_out;
+ iov_child.iov_len = sizeof(sve_vl_out);
+ read_one_child_regs(child, "SVE VL", &iov_parent, &iov_child);
+
+ iov_parent.iov_base = &z_out;
+ iov_parent.iov_len = sizeof(z_out);
+ iov_child.iov_base = &z_out;
+ iov_child.iov_len = sizeof(z_out);
+ read_one_child_regs(child, "Z", &iov_parent, &iov_child);
+
+ iov_parent.iov_base = &p_out;
+ iov_parent.iov_len = sizeof(p_out);
+ iov_child.iov_base = &p_out;
+ iov_child.iov_len = sizeof(p_out);
+ read_one_child_regs(child, "P", &iov_parent, &iov_child);
+
+ iov_parent.iov_base = &ffr_out;
+ iov_parent.iov_len = sizeof(ffr_out);
+ iov_child.iov_base = &ffr_out;
+ iov_child.iov_len = sizeof(ffr_out);
+ read_one_child_regs(child, "FFR", &iov_parent, &iov_child);
+ }
+
+ if (sme_supported()) {
+ iov_parent.iov_base = &sme_vl_out;
+ iov_parent.iov_len = sizeof(sme_vl_out);
+ iov_child.iov_base = &sme_vl_out;
+ iov_child.iov_len = sizeof(sme_vl_out);
+ read_one_child_regs(child, "SME VL", &iov_parent, &iov_child);
+
+ iov_parent.iov_base = &svcr_out;
+ iov_parent.iov_len = sizeof(svcr_out);
+ iov_child.iov_base = &svcr_out;
+ iov_child.iov_len = sizeof(svcr_out);
+ read_one_child_regs(child, "SVCR", &iov_parent, &iov_child);
+
+ iov_parent.iov_base = &za_out;
+ iov_parent.iov_len = sizeof(za_out);
+ iov_child.iov_base = &za_out;
+ iov_child.iov_len = sizeof(za_out);
+ read_one_child_regs(child, "ZA", &iov_parent, &iov_child);
+ }
+
+ if (sme2_supported()) {
+ iov_parent.iov_base = &zt_out;
+ iov_parent.iov_len = sizeof(zt_out);
+ iov_child.iov_base = &zt_out;
+ iov_child.iov_len = sizeof(zt_out);
+ read_one_child_regs(child, "ZT", &iov_parent, &iov_child);
+ }
+
+ if (fpmr_supported()) {
+ iov_parent.iov_base = &fpmr_out;
+ iov_parent.iov_len = sizeof(fpmr_out);
+ iov_child.iov_base = &fpmr_out;
+ iov_child.iov_len = sizeof(fpmr_out);
+ read_one_child_regs(child, "FPMR", &iov_parent, &iov_child);
+ }
+}
+
+static bool continue_breakpoint(pid_t child,
+ enum __ptrace_request restart_type)
+{
+ struct user_pt_regs pt_regs;
+ struct iovec iov;
+ int ret;
+
+ /* Get PC */
+ iov.iov_base = &pt_regs;
+ iov.iov_len = sizeof(pt_regs);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, &iov);
+ if (ret < 0) {
+ ksft_print_msg("Failed to get PC: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ /* Skip over the BRK */
+ pt_regs.pc += 4;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, &iov);
+ if (ret < 0) {
+ ksft_print_msg("Failed to skip BRK: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ /* Restart */
+ ret = ptrace(restart_type, child, 0, 0);
+ if (ret < 0) {
+ ksft_print_msg("Failed to restart child: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_ptrace_values_sve(pid_t child, struct test_config *config)
+{
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd;
+ struct iovec iov;
+ int ret, vq;
+ bool pass = true;
+
+ if (!sve_supported())
+ return true;
+
+ vq = __sve_vq_from_vl(config->sve_vl_in);
+
+ iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("OOM allocating %lu byte SVE buffer\n",
+ iov.iov_len);
+ return false;
+ }
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SVE, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial SVE: %s (%d)\n",
+ strerror(errno), errno);
+ pass = false;
+ goto out;
+ }
+
+ sve = iov.iov_base;
+
+ if (sve->vl != config->sve_vl_in) {
+ ksft_print_msg("Mismatch in initial SVE VL: %d != %d\n",
+ sve->vl, config->sve_vl_in);
+ pass = false;
+ }
+
+ /* If we are in streaming mode we should just read FPSIMD */
+ if ((config->svcr_in & SVCR_SM) && (sve->flags & SVE_PT_REGS_SVE)) {
+ ksft_print_msg("NT_ARM_SVE reports SVE with PSTATE.SM\n");
+ pass = false;
+ }
+
+ if (svcr_in & SVCR_SM) {
+ if (sve->size != sizeof(sve)) {
+ ksft_print_msg("NT_ARM_SVE reports data with PSTATE.SM\n");
+ pass = false;
+ }
+ } else {
+ if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
+ ksft_print_msg("Mismatch in SVE header size: %d != %lu\n",
+ sve->size, SVE_PT_SIZE(vq, sve->flags));
+ pass = false;
+ }
+ }
+
+ /* The registers might be in completely different formats! */
+ if (sve->flags & SVE_PT_REGS_SVE) {
+ if (!compare_buffer("initial SVE Z",
+ iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
+ z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("initial SVE P",
+ iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
+ p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("initial SVE FFR",
+ iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
+ ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
+ pass = false;
+ } else {
+ fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
+ if (!compare_buffer("initial V via SVE", &fpsimd->vregs[0],
+ v_in, sizeof(v_in)))
+ pass = false;
+ }
+
+out:
+ free(iov.iov_base);
+ return pass;
+}
+
+static bool check_ptrace_values_ssve(pid_t child, struct test_config *config)
+{
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd;
+ struct iovec iov;
+ int ret, vq;
+ bool pass = true;
+
+ if (!sme_supported())
+ return true;
+
+ vq = __sve_vq_from_vl(config->sme_vl_in);
+
+ iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("OOM allocating %lu byte SSVE buffer\n",
+ iov.iov_len);
+ return false;
+ }
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_SSVE, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial SSVE: %s (%d)\n",
+ strerror(errno), errno);
+ pass = false;
+ goto out;
+ }
+
+ sve = iov.iov_base;
+
+ if (sve->vl != config->sme_vl_in) {
+ ksft_print_msg("Mismatch in initial SSVE VL: %d != %d\n",
+ sve->vl, config->sme_vl_in);
+ pass = false;
+ }
+
+ if ((config->svcr_in & SVCR_SM) && !(sve->flags & SVE_PT_REGS_SVE)) {
+ ksft_print_msg("NT_ARM_SSVE reports FPSIMD with PSTATE.SM\n");
+ pass = false;
+ }
+
+ if (!(svcr_in & SVCR_SM)) {
+ if (sve->size != sizeof(sve)) {
+ ksft_print_msg("NT_ARM_SSVE reports data without PSTATE.SM\n");
+ pass = false;
+ }
+ } else {
+ if (sve->size != SVE_PT_SIZE(vq, sve->flags)) {
+ ksft_print_msg("Mismatch in SSVE header size: %d != %lu\n",
+ sve->size, SVE_PT_SIZE(vq, sve->flags));
+ pass = false;
+ }
+ }
+
+ /* The registers might be in completely different formats! */
+ if (sve->flags & SVE_PT_REGS_SVE) {
+ if (!compare_buffer("initial SSVE Z",
+ iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
+ z_in, SVE_PT_SVE_ZREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("initial SSVE P",
+ iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
+ p_in, SVE_PT_SVE_PREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("initial SSVE FFR",
+ iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
+ ffr_in, SVE_PT_SVE_PREG_SIZE(vq)))
+ pass = false;
+ } else {
+ fpsimd = iov.iov_base + SVE_PT_FPSIMD_OFFSET;
+ if (!compare_buffer("initial V via SSVE",
+ &fpsimd->vregs[0], v_in, sizeof(v_in)))
+ pass = false;
+ }
+
+out:
+ free(iov.iov_base);
+ return pass;
+}
+
+static bool check_ptrace_values_za(pid_t child, struct test_config *config)
+{
+ struct user_za_header *za;
+ struct iovec iov;
+ int ret, vq;
+ bool pass = true;
+
+ if (!sme_supported())
+ return true;
+
+ vq = __sve_vq_from_vl(config->sme_vl_in);
+
+ iov.iov_len = ZA_SIG_CONTEXT_SIZE(vq);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("OOM allocating %lu byte ZA buffer\n",
+ iov.iov_len);
+ return false;
+ }
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZA, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial ZA: %s (%d)\n",
+ strerror(errno), errno);
+ pass = false;
+ goto out;
+ }
+
+ za = iov.iov_base;
+
+ if (za->vl != config->sme_vl_in) {
+ ksft_print_msg("Mismatch in initial SME VL: %d != %d\n",
+ za->vl, config->sme_vl_in);
+ pass = false;
+ }
+
+ /* If PSTATE.ZA is not set we should just read the header */
+ if (config->svcr_in & SVCR_ZA) {
+ if (za->size != ZA_PT_SIZE(vq)) {
+ ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
+ za->size, ZA_PT_SIZE(vq));
+ pass = false;
+ }
+
+ if (!compare_buffer("initial ZA",
+ iov.iov_base + ZA_PT_ZA_OFFSET,
+ za_in, ZA_PT_ZA_SIZE(vq)))
+ pass = false;
+ } else {
+ if (za->size != sizeof(*za)) {
+ ksft_print_msg("Unexpected ZA ptrace read size: %d != %lu\n",
+ za->size, sizeof(*za));
+ pass = false;
+ }
+ }
+
+out:
+ free(iov.iov_base);
+ return pass;
+}
+
+static bool check_ptrace_values_zt(pid_t child, struct test_config *config)
+{
+ uint8_t buf[512];
+ struct iovec iov;
+ int ret;
+
+ if (!sme2_supported())
+ return true;
+
+ iov.iov_base = &buf;
+ iov.iov_len = ZT_SIG_REG_BYTES;
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_ZT, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial ZT: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES);
+}
+
+static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config)
+{
+ uint64_t val;
+ struct iovec iov;
+ int ret;
+
+ if (!fpmr_supported())
+ return true;
+
+ iov.iov_base = &val;
+ iov.iov_len = sizeof(val);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial FPMR: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val));
+}
+
+static bool check_ptrace_values(pid_t child, struct test_config *config)
+{
+ bool pass = true;
+ struct user_fpsimd_state fpsimd;
+ struct iovec iov;
+ int ret;
+
+ iov.iov_base = &fpsimd;
+ iov.iov_len = sizeof(fpsimd);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PRFPREG, &iov);
+ if (ret == 0) {
+ if (!compare_buffer("initial V", &fpsimd.vregs, v_in,
+ sizeof(v_in))) {
+ pass = false;
+ }
+ } else {
+ ksft_print_msg("Failed to read initial V: %s (%d)\n",
+ strerror(errno), errno);
+ pass = false;
+ }
+
+ if (!check_ptrace_values_sve(child, config))
+ pass = false;
+
+ if (!check_ptrace_values_ssve(child, config))
+ pass = false;
+
+ if (!check_ptrace_values_za(child, config))
+ pass = false;
+
+ if (!check_ptrace_values_zt(child, config))
+ pass = false;
+
+ if (!check_ptrace_values_fpmr(child, config))
+ pass = false;
+
+ return pass;
+}
+
+static bool run_parent(pid_t child, struct test_definition *test,
+ struct test_config *config)
+{
+ int wait_status, ret;
+ pid_t pid;
+ bool pass;
+
+ /* Initial attach */
+ while (1) {
+ pid = waitpid(child, &wait_status, 0);
+ if (pid < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ if (pid == child)
+ break;
+ }
+
+ if (WIFEXITED(wait_status)) {
+ ksft_print_msg("Child exited loading values with status %d\n",
+ WEXITSTATUS(wait_status));
+ pass = false;
+ goto out;
+ }
+
+ if (WIFSIGNALED(wait_status)) {
+ ksft_print_msg("Child died from signal %d loading values\n",
+ WTERMSIG(wait_status));
+ pass = false;
+ goto out;
+ }
+
+ /* Read initial values via ptrace */
+ pass = check_ptrace_values(child, config);
+
+ /* Do whatever writes we want to do */
+ if (test->modify_values)
+ test->modify_values(child, config);
+
+ if (!continue_breakpoint(child, PTRACE_CONT))
+ goto cleanup;
+
+ while (1) {
+ pid = waitpid(child, &wait_status, 0);
+ if (pid < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ if (pid == child)
+ break;
+ }
+
+ if (WIFEXITED(wait_status)) {
+ ksft_print_msg("Child exited saving values with status %d\n",
+ WEXITSTATUS(wait_status));
+ pass = false;
+ goto out;
+ }
+
+ if (WIFSIGNALED(wait_status)) {
+ ksft_print_msg("Child died from signal %d saving values\n",
+ WTERMSIG(wait_status));
+ pass = false;
+ goto out;
+ }
+
+ /* See what happened as a result */
+ read_child_regs(child);
+
+ if (!continue_breakpoint(child, PTRACE_DETACH))
+ goto cleanup;
+
+ /* The child should exit cleanly */
+ got_alarm = false;
+ alarm(1);
+ while (1) {
+ if (got_alarm) {
+ ksft_print_msg("Wait for child timed out\n");
+ goto cleanup;
+ }
+
+ pid = waitpid(child, &wait_status, 0);
+ if (pid < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ if (pid == child)
+ break;
+ }
+ alarm(0);
+
+ if (got_alarm) {
+ ksft_print_msg("Timed out waiting for child\n");
+ pass = false;
+ goto cleanup;
+ }
+
+ if (pid == child && WIFSIGNALED(wait_status)) {
+ ksft_print_msg("Child died from signal %d cleaning up\n",
+ WTERMSIG(wait_status));
+ pass = false;
+ goto out;
+ }
+
+ if (pid == child && WIFEXITED(wait_status)) {
+ if (WEXITSTATUS(wait_status) != 0) {
+ ksft_print_msg("Child exited with error %d\n",
+ WEXITSTATUS(wait_status));
+ pass = false;
+ }
+ } else {
+ ksft_print_msg("Child did not exit cleanly\n");
+ pass = false;
+ goto cleanup;
+ }
+
+ goto out;
+
+cleanup:
+ ret = kill(child, SIGKILL);
+ if (ret != 0) {
+ ksft_print_msg("kill() failed: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ while (1) {
+ pid = waitpid(child, &wait_status, 0);
+ if (pid < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("waitpid() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ if (pid == child)
+ break;
+ }
+
+out:
+ return pass;
+}
+
+static void fill_random(void *buf, size_t size)
+{
+ int i;
+ uint32_t *lbuf = buf;
+
+ /* random() returns a 32 bit number regardless of the size of long */
+ for (i = 0; i < size / sizeof(uint32_t); i++)
+ lbuf[i] = random();
+}
+
+static void fill_random_ffr(void *buf, size_t vq)
+{
+ uint8_t *lbuf = buf;
+ int bits, i;
+
+ /*
+ * Only values with a continuous set of 0..n bits set are
+ * valid for FFR, set all bits then clear a random number of
+ * high bits.
+ */
+ memset(buf, 0, __SVE_FFR_SIZE(vq));
+
+ bits = random() % (__SVE_FFR_SIZE(vq) * 8);
+ for (i = 0; i < bits / 8; i++)
+ lbuf[i] = 0xff;
+ if (bits / 8 != __SVE_FFR_SIZE(vq))
+ lbuf[i] = (1 << (bits % 8)) - 1;
+}
+
+static void fpsimd_to_sve(__uint128_t *v, char *z, int vl)
+{
+ int vq = __sve_vq_from_vl(vl);
+ int i;
+ __uint128_t *p;
+
+ if (!vl)
+ return;
+
+ for (i = 0; i < __SVE_NUM_ZREGS; i++) {
+ p = (__uint128_t *)&z[__SVE_ZREG_OFFSET(vq, i)];
+ *p = arm64_cpu_to_le128(v[i]);
+ }
+}
+
+static void set_initial_values(struct test_config *config)
+{
+ int vq = __sve_vq_from_vl(vl_in(config));
+ int sme_vq = __sve_vq_from_vl(config->sme_vl_in);
+
+ svcr_in = config->svcr_in;
+ svcr_expected = config->svcr_expected;
+ svcr_out = 0;
+
+ fill_random(&v_in, sizeof(v_in));
+ memcpy(v_expected, v_in, sizeof(v_in));
+ memset(v_out, 0, sizeof(v_out));
+
+ /* Changes will be handled in the test case */
+ if (sve_supported() || (config->svcr_in & SVCR_SM)) {
+ /* The low 128 bits of Z are shared with the V registers */
+ fill_random(&z_in, __SVE_ZREGS_SIZE(vq));
+ fpsimd_to_sve(v_in, z_in, vl_in(config));
+ memcpy(z_expected, z_in, __SVE_ZREGS_SIZE(vq));
+ memset(z_out, 0, sizeof(z_out));
+
+ fill_random(&p_in, __SVE_PREGS_SIZE(vq));
+ memcpy(p_expected, p_in, __SVE_PREGS_SIZE(vq));
+ memset(p_out, 0, sizeof(p_out));
+
+ if ((config->svcr_in & SVCR_SM) && !fa64_supported())
+ memset(ffr_in, 0, __SVE_PREG_SIZE(vq));
+ else
+ fill_random_ffr(&ffr_in, vq);
+ memcpy(ffr_expected, ffr_in, __SVE_PREG_SIZE(vq));
+ memset(ffr_out, 0, __SVE_PREG_SIZE(vq));
+ }
+
+ if (config->svcr_in & SVCR_ZA)
+ fill_random(za_in, ZA_SIG_REGS_SIZE(sme_vq));
+ else
+ memset(za_in, 0, ZA_SIG_REGS_SIZE(sme_vq));
+ if (config->svcr_expected & SVCR_ZA)
+ memcpy(za_expected, za_in, ZA_SIG_REGS_SIZE(sme_vq));
+ else
+ memset(za_expected, 0, ZA_SIG_REGS_SIZE(sme_vq));
+ if (sme_supported())
+ memset(za_out, 0, sizeof(za_out));
+
+ if (sme2_supported()) {
+ if (config->svcr_in & SVCR_ZA)
+ fill_random(zt_in, ZT_SIG_REG_BYTES);
+ else
+ memset(zt_in, 0, ZT_SIG_REG_BYTES);
+ if (config->svcr_expected & SVCR_ZA)
+ memcpy(zt_expected, zt_in, ZT_SIG_REG_BYTES);
+ else
+ memset(zt_expected, 0, ZT_SIG_REG_BYTES);
+ memset(zt_out, 0, sizeof(zt_out));
+ }
+
+ if (fpmr_supported()) {
+ fill_random(&fpmr_in, sizeof(fpmr_in));
+ fpmr_in &= FPMR_SAFE_BITS;
+ fpmr_expected = fpmr_in;
+ } else {
+ fpmr_in = 0;
+ fpmr_expected = 0;
+ fpmr_out = 0;
+ }
+}
+
+static bool check_memory_values(struct test_config *config)
+{
+ bool pass = true;
+ int vq, sme_vq;
+
+ if (!compare_buffer("saved V", v_out, v_expected, sizeof(v_out)))
+ pass = false;
+
+ vq = __sve_vq_from_vl(vl_expected(config));
+ sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
+
+ if (svcr_out != svcr_expected) {
+ ksft_print_msg("Mismatch in saved SVCR %lx != %lx\n",
+ svcr_out, svcr_expected);
+ pass = false;
+ }
+
+ if (sve_vl_out != config->sve_vl_expected) {
+ ksft_print_msg("Mismatch in SVE VL: %ld != %d\n",
+ sve_vl_out, config->sve_vl_expected);
+ pass = false;
+ }
+
+ if (sme_vl_out != config->sme_vl_expected) {
+ ksft_print_msg("Mismatch in SME VL: %ld != %d\n",
+ sme_vl_out, config->sme_vl_expected);
+ pass = false;
+ }
+
+ if (!compare_buffer("saved Z", z_out, z_expected,
+ __SVE_ZREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("saved P", p_out, p_expected,
+ __SVE_PREGS_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("saved FFR", ffr_out, ffr_expected,
+ __SVE_PREG_SIZE(vq)))
+ pass = false;
+
+ if (!compare_buffer("saved ZA", za_out, za_expected,
+ ZA_PT_ZA_SIZE(sme_vq)))
+ pass = false;
+
+ if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES))
+ pass = false;
+
+ if (fpmr_out != fpmr_expected) {
+ ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n",
+ fpmr_out, fpmr_expected);
+ pass = false;
+ }
+
+ return pass;
+}
+
+static bool sve_sme_same(struct test_config *config)
+{
+ if (config->sve_vl_in != config->sve_vl_expected)
+ return false;
+
+ if (config->sme_vl_in != config->sme_vl_expected)
+ return false;
+
+ if (config->svcr_in != config->svcr_expected)
+ return false;
+
+ return true;
+}
+
+static bool sve_write_supported(struct test_config *config)
+{
+ if (!sve_supported() && !sme_supported())
+ return false;
+
+ if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
+ return false;
+
+ if (config->svcr_expected & SVCR_SM) {
+ if (config->sve_vl_in != config->sve_vl_expected) {
+ return false;
+ }
+
+ /* Changing the SME VL disables ZA */
+ if ((config->svcr_expected & SVCR_ZA) &&
+ (config->sme_vl_in != config->sme_vl_expected)) {
+ return false;
+ }
+ } else {
+ if (config->sme_vl_in != config->sme_vl_expected) {
+ return false;
+ }
+
+ if (!sve_supported())
+ return false;
+ }
+
+ return true;
+}
+
+static bool sve_write_fpsimd_supported(struct test_config *config)
+{
+ if (!sve_supported() && !sme_supported())
+ return false;
+
+ if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
+ return false;
+
+ if (config->svcr_expected & SVCR_SM)
+ return false;
+
+ if (config->sme_vl_in != config->sme_vl_expected)
+ return false;
+
+ return true;
+}
+
+static void fpsimd_write_expected(struct test_config *config)
+{
+ int vl;
+
+ fill_random(&v_expected, sizeof(v_expected));
+
+ /* The SVE registers are flushed by a FPSIMD write */
+ vl = vl_expected(config);
+
+ memset(z_expected, 0, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
+ memset(p_expected, 0, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
+ memset(ffr_expected, 0, __SVE_PREG_SIZE(__sve_vq_from_vl(vl)));
+
+ fpsimd_to_sve(v_expected, z_expected, vl);
+}
+
+static void fpsimd_write(pid_t child, struct test_config *test_config)
+{
+ struct user_fpsimd_state fpsimd;
+ struct iovec iov;
+ int ret;
+
+ memset(&fpsimd, 0, sizeof(fpsimd));
+ memcpy(&fpsimd.vregs, v_expected, sizeof(v_expected));
+
+ iov.iov_base = &fpsimd;
+ iov.iov_len = sizeof(fpsimd);
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PRFPREG, &iov);
+ if (ret == -1)
+ ksft_print_msg("FPSIMD set failed: (%s) %d\n",
+ strerror(errno), errno);
+}
+
+static bool fpmr_write_supported(struct test_config *config)
+{
+ if (!fpmr_supported())
+ return false;
+
+ if (!sve_sme_same(config))
+ return false;
+
+ return true;
+}
+
+static void fpmr_write_expected(struct test_config *config)
+{
+ fill_random(&fpmr_expected, sizeof(fpmr_expected));
+ fpmr_expected &= FPMR_SAFE_BITS;
+}
+
+static void fpmr_write(pid_t child, struct test_config *config)
+{
+ struct iovec iov;
+ int ret;
+
+ iov.iov_len = sizeof(fpmr_expected);
+ iov.iov_base = &fpmr_expected;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write FPMR: %s (%d)\n",
+ strerror(errno), errno);
+}
+
+static void sve_write_expected(struct test_config *config)
+{
+ int vl = vl_expected(config);
+ int sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
+
+ if (!vl)
+ return;
+
+ fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
+ fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
+
+ if ((svcr_expected & SVCR_SM) && !fa64_supported())
+ memset(ffr_expected, 0, __SVE_PREG_SIZE(sme_vq));
+ else
+ fill_random_ffr(ffr_expected, __sve_vq_from_vl(vl));
+
+ /* Share the low bits of Z with V */
+ fill_random(&v_expected, sizeof(v_expected));
+ fpsimd_to_sve(v_expected, z_expected, vl);
+
+ if (config->sme_vl_in != config->sme_vl_expected) {
+ memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
+ memset(zt_expected, 0, sizeof(zt_expected));
+ }
+}
+
+static void sve_write_sve(pid_t child, struct test_config *config)
+{
+ struct user_sve_header *sve;
+ struct iovec iov;
+ int ret, vl, vq, regset;
+
+ vl = vl_expected(config);
+ vq = __sve_vq_from_vl(vl);
+
+ if (!vl)
+ return;
+
+ iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_SVE);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
+ iov.iov_len);
+ return;
+ }
+ memset(iov.iov_base, 0, iov.iov_len);
+
+ sve = iov.iov_base;
+ sve->size = iov.iov_len;
+ sve->flags = SVE_PT_REGS_SVE;
+ sve->vl = vl;
+
+ memcpy(iov.iov_base + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
+ z_expected, SVE_PT_SVE_ZREGS_SIZE(vq));
+ memcpy(iov.iov_base + SVE_PT_SVE_PREG_OFFSET(vq, 0),
+ p_expected, SVE_PT_SVE_PREGS_SIZE(vq));
+ memcpy(iov.iov_base + SVE_PT_SVE_FFR_OFFSET(vq),
+ ffr_expected, SVE_PT_SVE_PREG_SIZE(vq));
+
+ if (svcr_expected & SVCR_SM)
+ regset = NT_ARM_SSVE;
+ else
+ regset = NT_ARM_SVE;
+
+ ret = ptrace(PTRACE_SETREGSET, child, regset, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write SVE: %s (%d)\n",
+ strerror(errno), errno);
+
+ free(iov.iov_base);
+}
+
+static void sve_write_fpsimd(pid_t child, struct test_config *config)
+{
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd;
+ struct iovec iov;
+ int ret, vl, vq;
+
+ vl = vl_expected(config);
+ vq = __sve_vq_from_vl(vl);
+
+ iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_FPSIMD);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
+ iov.iov_len);
+ return;
+ }
+ memset(iov.iov_base, 0, iov.iov_len);
+
+ sve = iov.iov_base;
+ sve->size = iov.iov_len;
+ sve->flags = SVE_PT_REGS_FPSIMD;
+ sve->vl = vl;
+
+ fpsimd = iov.iov_base + SVE_PT_REGS_OFFSET;
+ memcpy(&fpsimd->vregs, v_expected, sizeof(v_expected));
+
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_SVE, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write SVE: %s (%d)\n",
+ strerror(errno), errno);
+
+ free(iov.iov_base);
+}
+
+static bool za_write_supported(struct test_config *config)
+{
+ if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
+ return false;
+
+ return true;
+}
+
+static void za_write_expected(struct test_config *config)
+{
+ int sme_vq, sve_vq;
+
+ sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
+
+ if (config->svcr_expected & SVCR_ZA) {
+ fill_random(za_expected, ZA_PT_ZA_SIZE(sme_vq));
+ } else {
+ memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
+ memset(zt_expected, 0, sizeof(zt_expected));
+ }
+
+ /* Changing the SME VL flushes ZT, SVE state */
+ if (config->sme_vl_in != config->sme_vl_expected) {
+ sve_vq = __sve_vq_from_vl(vl_expected(config));
+ memset(z_expected, 0, __SVE_ZREGS_SIZE(sve_vq));
+ memset(p_expected, 0, __SVE_PREGS_SIZE(sve_vq));
+ memset(ffr_expected, 0, __SVE_PREG_SIZE(sve_vq));
+ memset(zt_expected, 0, sizeof(zt_expected));
+
+ fpsimd_to_sve(v_expected, z_expected, vl_expected(config));
+ }
+}
+
+static void za_write(pid_t child, struct test_config *config)
+{
+ struct user_za_header *za;
+ struct iovec iov;
+ int ret, vq;
+
+ vq = __sve_vq_from_vl(config->sme_vl_expected);
+
+ if (config->svcr_expected & SVCR_ZA)
+ iov.iov_len = ZA_PT_SIZE(vq);
+ else
+ iov.iov_len = sizeof(*za);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("Failed allocating %lu byte ZA write buffer\n",
+ iov.iov_len);
+ return;
+ }
+ memset(iov.iov_base, 0, iov.iov_len);
+
+ za = iov.iov_base;
+ za->size = iov.iov_len;
+ za->vl = config->sme_vl_expected;
+ if (config->svcr_expected & SVCR_ZA)
+ memcpy(iov.iov_base + ZA_PT_ZA_OFFSET, za_expected,
+ ZA_PT_ZA_SIZE(vq));
+
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZA, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write ZA: %s (%d)\n",
+ strerror(errno), errno);
+
+ free(iov.iov_base);
+}
+
+static bool zt_write_supported(struct test_config *config)
+{
+ if (!sme2_supported())
+ return false;
+ if (config->sme_vl_in != config->sme_vl_expected)
+ return false;
+ if (!(config->svcr_expected & SVCR_ZA))
+ return false;
+ if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
+ return false;
+
+ return true;
+}
+
+static void zt_write_expected(struct test_config *config)
+{
+ int sme_vq;
+
+ sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
+
+ if (config->svcr_expected & SVCR_ZA) {
+ fill_random(zt_expected, sizeof(zt_expected));
+ } else {
+ memset(za_expected, 0, ZA_PT_ZA_SIZE(sme_vq));
+ memset(zt_expected, 0, sizeof(zt_expected));
+ }
+}
+
+static void zt_write(pid_t child, struct test_config *config)
+{
+ struct iovec iov;
+ int ret;
+
+ iov.iov_len = ZT_SIG_REG_BYTES;
+ iov.iov_base = zt_expected;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_ZT, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write ZT: %s (%d)\n",
+ strerror(errno), errno);
+}
+
+/* Actually run a test */
+static void run_test(struct test_definition *test, struct test_config *config)
+{
+ pid_t child;
+ char name[1024];
+ bool pass;
+
+ if (sve_supported() && sme_supported())
+ snprintf(name, sizeof(name), "%s, SVE %d->%d, SME %d/%x->%d/%x",
+ test->name,
+ config->sve_vl_in, config->sve_vl_expected,
+ config->sme_vl_in, config->svcr_in,
+ config->sme_vl_expected, config->svcr_expected);
+ else if (sve_supported())
+ snprintf(name, sizeof(name), "%s, SVE %d->%d", test->name,
+ config->sve_vl_in, config->sve_vl_expected);
+ else if (sme_supported())
+ snprintf(name, sizeof(name), "%s, SME %d/%x->%d/%x",
+ test->name,
+ config->sme_vl_in, config->svcr_in,
+ config->sme_vl_expected, config->svcr_expected);
+ else
+ snprintf(name, sizeof(name), "%s", test->name);
+
+ if (test->supported && !test->supported(config)) {
+ ksft_test_result_skip("%s\n", name);
+ return;
+ }
+
+ set_initial_values(config);
+
+ if (test->set_expected_values)
+ test->set_expected_values(config);
+
+ child = fork();
+ if (child < 0)
+ ksft_exit_fail_msg("fork() failed: %s (%d)\n",
+ strerror(errno), errno);
+ /* run_child() never returns */
+ if (child == 0)
+ run_child(config);
+
+ pass = run_parent(child, test, config);
+ if (!check_memory_values(config))
+ pass = false;
+
+ ksft_test_result(pass, "%s\n", name);
+}
+
+static void run_tests(struct test_definition defs[], int count,
+ struct test_config *config)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ run_test(&defs[i], config);
+}
+
+static struct test_definition base_test_defs[] = {
+ {
+ .name = "No writes",
+ .supported = sve_sme_same,
+ },
+ {
+ .name = "FPSIMD write",
+ .supported = sve_sme_same,
+ .set_expected_values = fpsimd_write_expected,
+ .modify_values = fpsimd_write,
+ },
+ {
+ .name = "FPMR write",
+ .supported = fpmr_write_supported,
+ .set_expected_values = fpmr_write_expected,
+ .modify_values = fpmr_write,
+ },
+};
+
+static struct test_definition sve_test_defs[] = {
+ {
+ .name = "SVE write",
+ .supported = sve_write_supported,
+ .set_expected_values = sve_write_expected,
+ .modify_values = sve_write_sve,
+ },
+ {
+ .name = "SVE write FPSIMD format",
+ .supported = sve_write_fpsimd_supported,
+ .set_expected_values = fpsimd_write_expected,
+ .modify_values = sve_write_fpsimd,
+ },
+};
+
+static struct test_definition za_test_defs[] = {
+ {
+ .name = "ZA write",
+ .supported = za_write_supported,
+ .set_expected_values = za_write_expected,
+ .modify_values = za_write,
+ },
+};
+
+static struct test_definition zt_test_defs[] = {
+ {
+ .name = "ZT write",
+ .supported = zt_write_supported,
+ .set_expected_values = zt_write_expected,
+ .modify_values = zt_write,
+ },
+};
+
+static int sve_vls[MAX_NUM_VLS], sme_vls[MAX_NUM_VLS];
+static int sve_vl_count, sme_vl_count;
+
+static void probe_vls(const char *name, int vls[], int *vl_count, int set_vl)
+{
+ unsigned int vq;
+ int vl;
+
+ *vl_count = 0;
+
+ for (vq = ARCH_VQ_MAX; vq > 0; vq /= 2) {
+ vl = prctl(set_vl, vq * 16);
+ if (vl == -1)
+ ksft_exit_fail_msg("SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ vl &= PR_SVE_VL_LEN_MASK;
+
+ if (*vl_count && (vl == vls[*vl_count - 1]))
+ break;
+
+ vq = sve_vq_from_vl(vl);
+
+ vls[*vl_count] = vl;
+ *vl_count += 1;
+ }
+
+ if (*vl_count > 2) {
+ /* Just use the minimum and maximum */
+ vls[1] = vls[*vl_count - 1];
+ ksft_print_msg("%d %s VLs, using %d and %d\n",
+ *vl_count, name, vls[0], vls[1]);
+ *vl_count = 2;
+ } else {
+ ksft_print_msg("%d %s VLs\n", *vl_count, name);
+ }
+}
+
+static struct {
+ int svcr_in, svcr_expected;
+} svcr_combinations[] = {
+ { .svcr_in = 0, .svcr_expected = 0, },
+ { .svcr_in = 0, .svcr_expected = SVCR_SM, },
+ { .svcr_in = 0, .svcr_expected = SVCR_ZA, },
+ /* Can't enable both SM and ZA with a single ptrace write */
+
+ { .svcr_in = SVCR_SM, .svcr_expected = 0, },
+ { .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM, },
+ { .svcr_in = SVCR_SM, .svcr_expected = SVCR_ZA, },
+ { .svcr_in = SVCR_SM, .svcr_expected = SVCR_SM | SVCR_ZA, },
+
+ { .svcr_in = SVCR_ZA, .svcr_expected = 0, },
+ { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM, },
+ { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_ZA, },
+ { .svcr_in = SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
+
+ { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = 0, },
+ { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM, },
+ { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_ZA, },
+ { .svcr_in = SVCR_SM | SVCR_ZA, .svcr_expected = SVCR_SM | SVCR_ZA, },
+};
+
+static void run_sve_tests(void)
+{
+ struct test_config test_config;
+ int i, j;
+
+ if (!sve_supported())
+ return;
+
+ test_config.sme_vl_in = sme_vls[0];
+ test_config.sme_vl_expected = sme_vls[0];
+ test_config.svcr_in = 0;
+ test_config.svcr_expected = 0;
+
+ for (i = 0; i < sve_vl_count; i++) {
+ test_config.sve_vl_in = sve_vls[i];
+
+ for (j = 0; j < sve_vl_count; j++) {
+ test_config.sve_vl_expected = sve_vls[j];
+
+ run_tests(base_test_defs,
+ ARRAY_SIZE(base_test_defs),
+ &test_config);
+ if (sve_supported())
+ run_tests(sve_test_defs,
+ ARRAY_SIZE(sve_test_defs),
+ &test_config);
+ }
+ }
+}
+
+static void run_sme_tests(void)
+{
+ struct test_config test_config;
+ int i, j, k;
+
+ if (!sme_supported())
+ return;
+
+ test_config.sve_vl_in = sve_vls[0];
+ test_config.sve_vl_expected = sve_vls[0];
+
+ /*
+ * Every SME VL/SVCR combination
+ */
+ for (i = 0; i < sme_vl_count; i++) {
+ test_config.sme_vl_in = sme_vls[i];
+
+ for (j = 0; j < sme_vl_count; j++) {
+ test_config.sme_vl_expected = sme_vls[j];
+
+ for (k = 0; k < ARRAY_SIZE(svcr_combinations); k++) {
+ test_config.svcr_in = svcr_combinations[k].svcr_in;
+ test_config.svcr_expected = svcr_combinations[k].svcr_expected;
+
+ run_tests(base_test_defs,
+ ARRAY_SIZE(base_test_defs),
+ &test_config);
+ run_tests(sve_test_defs,
+ ARRAY_SIZE(sve_test_defs),
+ &test_config);
+ run_tests(za_test_defs,
+ ARRAY_SIZE(za_test_defs),
+ &test_config);
+
+ if (sme2_supported())
+ run_tests(zt_test_defs,
+ ARRAY_SIZE(zt_test_defs),
+ &test_config);
+ }
+ }
+ }
+}
+
+int main(void)
+{
+ struct test_config test_config;
+ struct sigaction sa;
+ int tests, ret, tmp;
+
+ srandom(getpid());
+
+ ksft_print_header();
+
+ if (sve_supported()) {
+ probe_vls("SVE", sve_vls, &sve_vl_count, PR_SVE_SET_VL);
+
+ tests = ARRAY_SIZE(base_test_defs) +
+ ARRAY_SIZE(sve_test_defs);
+ tests *= sve_vl_count * sve_vl_count;
+ } else {
+ /* Only run the FPSIMD tests */
+ sve_vl_count = 1;
+ tests = ARRAY_SIZE(base_test_defs);
+ }
+
+ if (sme_supported()) {
+ probe_vls("SME", sme_vls, &sme_vl_count, PR_SME_SET_VL);
+
+ tmp = ARRAY_SIZE(base_test_defs) + ARRAY_SIZE(sve_test_defs)
+ + ARRAY_SIZE(za_test_defs);
+
+ if (sme2_supported())
+ tmp += ARRAY_SIZE(zt_test_defs);
+
+ tmp *= sme_vl_count * sme_vl_count;
+ tmp *= ARRAY_SIZE(svcr_combinations);
+ tests += tmp;
+ } else {
+ sme_vl_count = 1;
+ }
+
+ if (sme2_supported())
+ ksft_print_msg("SME2 supported\n");
+
+ if (fa64_supported())
+ ksft_print_msg("FA64 supported\n");
+
+ if (fpmr_supported())
+ ksft_print_msg("FPMR supported\n");
+
+ ksft_set_plan(tests);
+
+ /* Get signal handers ready before we start any children */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_alarm;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGALRM, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGALRM handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ /*
+ * Run the test set if there is no SVE or SME, with those we
+ * have to pick a VL for each run.
+ */
+ if (!sve_supported() && !sme_supported()) {
+ test_config.sve_vl_in = 0;
+ test_config.sve_vl_expected = 0;
+ test_config.sme_vl_in = 0;
+ test_config.sme_vl_expected = 0;
+ test_config.svcr_in = 0;
+ test_config.svcr_expected = 0;
+
+ run_tests(base_test_defs, ARRAY_SIZE(base_test_defs),
+ &test_config);
+ }
+
+ run_sve_tests();
+ run_sme_tests();
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.h b/tools/testing/selftests/arm64/fp/fp-ptrace.h
new file mode 100644
index 000000000000..c06919aaf1f7
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/fp-ptrace.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021-3 ARM Limited.
+
+#ifndef FP_PTRACE_H
+#define FP_PTRACE_H
+
+#define SVCR_SM_SHIFT 0
+#define SVCR_ZA_SHIFT 1
+
+#define SVCR_SM (1 << SVCR_SM_SHIFT)
+#define SVCR_ZA (1 << SVCR_ZA_SHIFT)
+
+#define HAVE_SVE_SHIFT 0
+#define HAVE_SME_SHIFT 1
+#define HAVE_SME2_SHIFT 2
+#define HAVE_FA64_SHIFT 3
+#define HAVE_FPMR_SHIFT 4
+
+#define HAVE_SVE (1 << HAVE_SVE_SHIFT)
+#define HAVE_SME (1 << HAVE_SME_SHIFT)
+#define HAVE_SME2 (1 << HAVE_SME2_SHIFT)
+#define HAVE_FA64 (1 << HAVE_FA64_SHIFT)
+#define HAVE_FPMR (1 << HAVE_FPMR_SHIFT)
+
+#endif
diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c
new file mode 100644
index 000000000000..65e01aba96ff
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/fp-stress.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+#define _POSIX_C_SOURCE 199309L
+
+#include <errno.h>
+#include <getopt.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/epoll.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <asm/hwcap.h>
+
+#include "kselftest.h"
+
+#define MAX_VLS 16
+
+#define SIGNAL_INTERVAL_MS 25
+#define LOG_INTERVALS (1000 / SIGNAL_INTERVAL_MS)
+
+struct child_data {
+ char *name, *output;
+ pid_t pid;
+ int stdout;
+ bool output_seen;
+ bool exited;
+ int exit_status;
+};
+
+static int epoll_fd;
+static struct child_data *children;
+static struct epoll_event *evs;
+static int tests;
+static int num_children;
+static bool terminate;
+
+static int startup_pipe[2];
+
+static int num_processors(void)
+{
+ long nproc = sysconf(_SC_NPROCESSORS_CONF);
+ if (nproc < 0) {
+ perror("Unable to read number of processors\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return nproc;
+}
+
+static void child_start(struct child_data *child, const char *program)
+{
+ int ret, pipefd[2], i;
+ struct epoll_event ev;
+
+ ret = pipe(pipefd);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to create stdout pipe: %s (%d)\n",
+ strerror(errno), errno);
+
+ child->pid = fork();
+ if (child->pid == -1)
+ ksft_exit_fail_msg("fork() failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ if (!child->pid) {
+ /*
+ * In child, replace stdout with the pipe, errors to
+ * stderr from here as kselftest prints to stdout.
+ */
+ ret = dup2(pipefd[1], 1);
+ if (ret == -1) {
+ printf("dup2() %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Duplicate the read side of the startup pipe to
+ * FD 3 so we can close everything else.
+ */
+ ret = dup2(startup_pipe[0], 3);
+ if (ret == -1) {
+ printf("dup2() %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Very dumb mechanism to clean open FDs other than
+ * stdio. We don't want O_CLOEXEC for the pipes...
+ */
+ for (i = 4; i < 8192; i++)
+ close(i);
+
+ /*
+ * Read from the startup pipe, there should be no data
+ * and we should block until it is closed. We just
+ * carry-on on error since this isn't super critical.
+ */
+ ret = read(3, &i, sizeof(i));
+ if (ret < 0)
+ printf("read(startp pipe) failed: %s (%d)\n",
+ strerror(errno), errno);
+ if (ret > 0)
+ printf("%d bytes of data on startup pipe\n", ret);
+ close(3);
+
+ ret = execl(program, program, NULL);
+ printf("execl(%s) failed: %d (%s)\n",
+ program, errno, strerror(errno));
+
+ exit(EXIT_FAILURE);
+ } else {
+ /*
+ * In parent, remember the child and close our copy of the
+ * write side of stdout.
+ */
+ close(pipefd[1]);
+ child->stdout = pipefd[0];
+ child->output = NULL;
+ child->exited = false;
+ child->output_seen = false;
+
+ ev.events = EPOLLIN | EPOLLHUP;
+ ev.data.ptr = child;
+
+ ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, child->stdout, &ev);
+ if (ret < 0) {
+ ksft_exit_fail_msg("%s EPOLL_CTL_ADD failed: %s (%d)\n",
+ child->name, strerror(errno), errno);
+ }
+ }
+}
+
+static bool child_output_read(struct child_data *child)
+{
+ char read_data[1024];
+ char work[1024];
+ int ret, len, cur_work, cur_read;
+
+ ret = read(child->stdout, read_data, sizeof(read_data));
+ if (ret < 0) {
+ if (errno == EINTR)
+ return true;
+
+ ksft_print_msg("%s: read() failed: %s (%d)\n",
+ child->name, strerror(errno),
+ errno);
+ return false;
+ }
+ len = ret;
+
+ child->output_seen = true;
+
+ /* Pick up any partial read */
+ if (child->output) {
+ strncpy(work, child->output, sizeof(work) - 1);
+ cur_work = strnlen(work, sizeof(work));
+ free(child->output);
+ child->output = NULL;
+ } else {
+ cur_work = 0;
+ }
+
+ cur_read = 0;
+ while (cur_read < len) {
+ work[cur_work] = read_data[cur_read++];
+
+ if (work[cur_work] == '\n') {
+ work[cur_work] = '\0';
+ ksft_print_msg("%s: %s\n", child->name, work);
+ cur_work = 0;
+ } else {
+ cur_work++;
+ }
+ }
+
+ if (cur_work) {
+ work[cur_work] = '\0';
+ ret = asprintf(&child->output, "%s", work);
+ if (ret == -1)
+ ksft_exit_fail_msg("Out of memory\n");
+ }
+
+ return false;
+}
+
+static void child_output(struct child_data *child, uint32_t events,
+ bool flush)
+{
+ bool read_more;
+
+ if (events & EPOLLIN) {
+ do {
+ read_more = child_output_read(child);
+ } while (read_more);
+ }
+
+ if (events & EPOLLHUP) {
+ close(child->stdout);
+ child->stdout = -1;
+ flush = true;
+ }
+
+ if (flush && child->output) {
+ ksft_print_msg("%s: %s<EOF>\n", child->name, child->output);
+ free(child->output);
+ child->output = NULL;
+ }
+}
+
+static void child_tickle(struct child_data *child)
+{
+ if (child->output_seen && !child->exited)
+ kill(child->pid, SIGUSR1);
+}
+
+static void child_stop(struct child_data *child)
+{
+ if (!child->exited)
+ kill(child->pid, SIGTERM);
+}
+
+static void child_cleanup(struct child_data *child)
+{
+ pid_t ret;
+ int status;
+ bool fail = false;
+
+ if (!child->exited) {
+ do {
+ ret = waitpid(child->pid, &status, 0);
+ if (ret == -1 && errno == EINTR)
+ continue;
+
+ if (ret == -1) {
+ ksft_print_msg("waitpid(%d) failed: %s (%d)\n",
+ child->pid, strerror(errno),
+ errno);
+ fail = true;
+ break;
+ }
+ } while (!WIFEXITED(status));
+ child->exit_status = WEXITSTATUS(status);
+ }
+
+ if (!child->output_seen) {
+ ksft_print_msg("%s no output seen\n", child->name);
+ fail = true;
+ }
+
+ if (child->exit_status != 0) {
+ ksft_print_msg("%s exited with error code %d\n",
+ child->name, child->exit_status);
+ fail = true;
+ }
+
+ ksft_test_result(!fail, "%s\n", child->name);
+}
+
+static void handle_child_signal(int sig, siginfo_t *info, void *context)
+{
+ int i;
+ bool found = false;
+
+ for (i = 0; i < num_children; i++) {
+ if (children[i].pid == info->si_pid) {
+ children[i].exited = true;
+ children[i].exit_status = info->si_status;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ ksft_print_msg("SIGCHLD for unknown PID %d with status %d\n",
+ info->si_pid, info->si_status);
+}
+
+static void handle_exit_signal(int sig, siginfo_t *info, void *context)
+{
+ int i;
+
+ /* If we're already exiting then don't signal again */
+ if (terminate)
+ return;
+
+ ksft_print_msg("Got signal, exiting...\n");
+
+ terminate = true;
+
+ /*
+ * This should be redundant, the main loop should clean up
+ * after us, but for safety stop everything we can here.
+ */
+ for (i = 0; i < num_children; i++)
+ child_stop(&children[i]);
+}
+
+static void start_fpsimd(struct child_data *child, int cpu, int copy)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "FPSIMD-%d-%d", cpu, copy);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./fpsimd-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void start_kernel(struct child_data *child, int cpu, int copy)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "KERNEL-%d-%d", cpu, copy);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./kernel-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void start_sve(struct child_data *child, int vl, int cpu)
+{
+ int ret;
+
+ ret = prctl(PR_SVE_SET_VL, vl | PR_SVE_VL_INHERIT);
+ if (ret < 0)
+ ksft_exit_fail_msg("Failed to set SVE VL %d\n", vl);
+
+ ret = asprintf(&child->name, "SVE-VL-%d-%d", vl, cpu);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./sve-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void start_ssve(struct child_data *child, int vl, int cpu)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "SSVE-VL-%d-%d", vl, cpu);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ ret = prctl(PR_SME_SET_VL, vl | PR_SME_VL_INHERIT);
+ if (ret < 0)
+ ksft_exit_fail_msg("Failed to set SME VL %d\n", ret);
+
+ child_start(child, "./ssve-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void start_za(struct child_data *child, int vl, int cpu)
+{
+ int ret;
+
+ ret = prctl(PR_SME_SET_VL, vl | PR_SVE_VL_INHERIT);
+ if (ret < 0)
+ ksft_exit_fail_msg("Failed to set SME VL %d\n", ret);
+
+ ret = asprintf(&child->name, "ZA-VL-%d-%d", vl, cpu);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./za-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void start_zt(struct child_data *child, int cpu)
+{
+ int ret;
+
+ ret = asprintf(&child->name, "ZT-%d", cpu);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ child_start(child, "./zt-test");
+
+ ksft_print_msg("Started %s\n", child->name);
+}
+
+static void probe_vls(int vls[], int *vl_count, int set_vl)
+{
+ unsigned int vq;
+ int vl;
+
+ *vl_count = 0;
+
+ for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
+ vl = prctl(set_vl, vq * 16);
+ if (vl == -1)
+ ksft_exit_fail_msg("SET_VL failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ vl &= PR_SVE_VL_LEN_MASK;
+
+ if (*vl_count && (vl == vls[*vl_count - 1]))
+ break;
+
+ vq = sve_vq_from_vl(vl);
+
+ vls[*vl_count] = vl;
+ *vl_count += 1;
+ }
+}
+
+/* Handle any pending output without blocking */
+static void drain_output(bool flush)
+{
+ int ret = 1;
+ int i;
+
+ while (ret > 0) {
+ ret = epoll_wait(epoll_fd, evs, tests, 0);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_print_msg("epoll_wait() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ for (i = 0; i < ret; i++)
+ child_output(evs[i].data.ptr, evs[i].events, flush);
+ }
+}
+
+static const struct option options[] = {
+ { "timeout", required_argument, NULL, 't' },
+ { }
+};
+
+int main(int argc, char **argv)
+{
+ int ret;
+ int timeout = 10 * (1000 / SIGNAL_INTERVAL_MS);
+ int poll_interval = 5000;
+ int cpus, i, j, c;
+ int sve_vl_count, sme_vl_count;
+ bool all_children_started = false;
+ int seen_children;
+ int sve_vls[MAX_VLS], sme_vls[MAX_VLS];
+ bool have_sme2;
+ struct sigaction sa;
+
+ while ((c = getopt_long(argc, argv, "t:", options, NULL)) != -1) {
+ switch (c) {
+ case 't':
+ ret = sscanf(optarg, "%d", &timeout);
+ if (ret != 1)
+ ksft_exit_fail_msg("Failed to parse timeout %s\n",
+ optarg);
+ break;
+ default:
+ ksft_exit_fail_msg("Unknown argument\n");
+ }
+ }
+
+ cpus = num_processors();
+ tests = 0;
+
+ if (getauxval(AT_HWCAP) & HWCAP_SVE) {
+ probe_vls(sve_vls, &sve_vl_count, PR_SVE_SET_VL);
+ tests += sve_vl_count * cpus;
+ } else {
+ sve_vl_count = 0;
+ }
+
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME) {
+ probe_vls(sme_vls, &sme_vl_count, PR_SME_SET_VL);
+ tests += sme_vl_count * cpus * 2;
+ } else {
+ sme_vl_count = 0;
+ }
+
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME2) {
+ tests += cpus;
+ have_sme2 = true;
+ } else {
+ have_sme2 = false;
+ }
+
+ tests += cpus * 2;
+
+ ksft_print_header();
+ ksft_set_plan(tests);
+
+ ksft_print_msg("%d CPUs, %d SVE VLs, %d SME VLs, SME2 %s\n",
+ cpus, sve_vl_count, sme_vl_count,
+ have_sme2 ? "present" : "absent");
+
+ if (timeout > 0)
+ ksft_print_msg("Will run for %d\n", timeout);
+ else
+ ksft_print_msg("Will run until terminated\n");
+
+ children = calloc(sizeof(*children), tests);
+ if (!children)
+ ksft_exit_fail_msg("Unable to allocate child data\n");
+
+ ret = epoll_create1(EPOLL_CLOEXEC);
+ if (ret < 0)
+ ksft_exit_fail_msg("epoll_create1() failed: %s (%d)\n",
+ strerror(errno), ret);
+ epoll_fd = ret;
+
+ /* Create a pipe which children will block on before execing */
+ ret = pipe(startup_pipe);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to create startup pipe: %s (%d)\n",
+ strerror(errno), errno);
+
+ /* Get signal handers ready before we start any children */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_exit_signal;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGINT, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGINT handler: %s (%d)\n",
+ strerror(errno), errno);
+ ret = sigaction(SIGTERM, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGTERM handler: %s (%d)\n",
+ strerror(errno), errno);
+ sa.sa_sigaction = handle_child_signal;
+ ret = sigaction(SIGCHLD, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGCHLD handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ evs = calloc(tests, sizeof(*evs));
+ if (!evs)
+ ksft_exit_fail_msg("Failed to allocate %d epoll events\n",
+ tests);
+
+ for (i = 0; i < cpus; i++) {
+ start_fpsimd(&children[num_children++], i, 0);
+ start_kernel(&children[num_children++], i, 0);
+
+ for (j = 0; j < sve_vl_count; j++)
+ start_sve(&children[num_children++], sve_vls[j], i);
+
+ for (j = 0; j < sme_vl_count; j++) {
+ start_ssve(&children[num_children++], sme_vls[j], i);
+ start_za(&children[num_children++], sme_vls[j], i);
+ }
+
+ if (have_sme2)
+ start_zt(&children[num_children++], i);
+ }
+
+ /*
+ * All children started, close the startup pipe and let them
+ * run.
+ */
+ close(startup_pipe[0]);
+ close(startup_pipe[1]);
+
+ for (;;) {
+ /* Did we get a signal asking us to exit? */
+ if (terminate)
+ break;
+
+ /*
+ * Timeout is counted in poll intervals with no
+ * output, the tests print during startup then are
+ * silent when running so this should ensure they all
+ * ran enough to install the signal handler, this is
+ * especially useful in emulation where we will both
+ * be slow and likely to have a large set of VLs.
+ */
+ ret = epoll_wait(epoll_fd, evs, tests, poll_interval);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("epoll_wait() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ /* Output? */
+ if (ret > 0) {
+ for (i = 0; i < ret; i++) {
+ child_output(evs[i].data.ptr, evs[i].events,
+ false);
+ }
+ continue;
+ }
+
+ /* Otherwise epoll_wait() timed out */
+
+ /*
+ * If the child processes have not produced output they
+ * aren't actually running the tests yet .
+ */
+ if (!all_children_started) {
+ seen_children = 0;
+
+ for (i = 0; i < num_children; i++)
+ if (children[i].output_seen ||
+ children[i].exited)
+ seen_children++;
+
+ if (seen_children != num_children) {
+ ksft_print_msg("Waiting for %d children\n",
+ num_children - seen_children);
+ continue;
+ }
+
+ all_children_started = true;
+ poll_interval = SIGNAL_INTERVAL_MS;
+ }
+
+ if ((timeout % LOG_INTERVALS) == 0)
+ ksft_print_msg("Sending signals, timeout remaining: %d\n",
+ timeout);
+
+ for (i = 0; i < num_children; i++)
+ child_tickle(&children[i]);
+
+ /* Negative timeout means run indefinitely */
+ if (timeout < 0)
+ continue;
+ if (--timeout == 0)
+ break;
+ }
+
+ ksft_print_msg("Finishing up...\n");
+ terminate = true;
+
+ for (i = 0; i < tests; i++)
+ child_stop(&children[i]);
+
+ drain_output(false);
+
+ for (i = 0; i < tests; i++)
+ child_cleanup(&children[i]);
+
+ drain_output(true);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/arm64/fp/fpsimd-test.S b/tools/testing/selftests/arm64/fp/fpsimd-test.S
index 1c5556bdd11d..f89d67894c2e 100644
--- a/tools/testing/selftests/arm64/fp/fpsimd-test.S
+++ b/tools/testing/selftests/arm64/fp/fpsimd-test.S
@@ -33,131 +33,6 @@
define_accessor setv, NVR, _vldr
define_accessor getv, NVR, _vstr
-// Print a single character x0 to stdout
-// Clobbers x0-x2,x8
-function putc
- str x0, [sp, #-16]!
-
- mov x0, #1 // STDOUT_FILENO
- mov x1, sp
- mov x2, #1
- mov x8, #__NR_write
- svc #0
-
- add sp, sp, #16
- ret
-endfunction
-
-// Print a NUL-terminated string starting at address x0 to stdout
-// Clobbers x0-x3,x8
-function puts
- mov x1, x0
-
- mov x2, #0
-0: ldrb w3, [x0], #1
- cbz w3, 1f
- add x2, x2, #1
- b 0b
-
-1: mov w0, #1 // STDOUT_FILENO
- mov x8, #__NR_write
- svc #0
-
- ret
-endfunction
-
-// Utility macro to print a literal string
-// Clobbers x0-x4,x8
-.macro puts string
- .pushsection .rodata.str1.1, "aMS", 1
-.L__puts_literal\@: .string "\string"
- .popsection
-
- ldr x0, =.L__puts_literal\@
- bl puts
-.endm
-
-// Print an unsigned decimal number x0 to stdout
-// Clobbers x0-x4,x8
-function putdec
- mov x1, sp
- str x30, [sp, #-32]! // Result can't be > 20 digits
-
- mov x2, #0
- strb w2, [x1, #-1]! // Write the NUL terminator
-
- mov x2, #10
-0: udiv x3, x0, x2 // div-mod loop to generate the digits
- msub x0, x3, x2, x0
- add w0, w0, #'0'
- strb w0, [x1, #-1]!
- mov x0, x3
- cbnz x3, 0b
-
- ldrb w0, [x1]
- cbnz w0, 1f
- mov w0, #'0' // Print "0" for 0, not ""
- strb w0, [x1, #-1]!
-
-1: mov x0, x1
- bl puts
-
- ldr x30, [sp], #32
- ret
-endfunction
-
-// Print an unsigned decimal number x0 to stdout, followed by a newline
-// Clobbers x0-x5,x8
-function putdecn
- mov x5, x30
-
- bl putdec
- mov x0, #'\n'
- bl putc
-
- ret x5
-endfunction
-
-
-// Clobbers x0-x3,x8
-function puthexb
- str x30, [sp, #-0x10]!
-
- mov w3, w0
- lsr w0, w0, #4
- bl puthexnibble
- mov w0, w3
-
- ldr x30, [sp], #0x10
- // fall through to puthexnibble
-endfunction
-// Clobbers x0-x2,x8
-function puthexnibble
- and w0, w0, #0xf
- cmp w0, #10
- blo 1f
- add w0, w0, #'a' - ('9' + 1)
-1: add w0, w0, #'0'
- b putc
-endfunction
-
-// x0=data in, x1=size in, clobbers x0-x5,x8
-function dumphex
- str x30, [sp, #-0x10]!
-
- mov x4, x0
- mov x5, x1
-
-0: subs x5, x5, #1
- b.lo 1f
- ldrb w0, [x4], #1
- bl puthexb
- b 0b
-
-1: ldr x30, [sp], #0x10
- ret
-endfunction
-
// Declare some storate space to shadow the SVE register contents:
.pushsection .text
.data
@@ -168,18 +43,6 @@ scratch:
.space MAXVL_B
.popsection
-// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
-// Clobbers x0-x3
-function memcpy
- cmp x2, #0
- b.eq 1f
-0: ldrb w3, [x1], #1
- strb w3, [x0], #1
- subs x2, x2, #1
- b.ne 0b
-1: ret
-endfunction
-
// Generate a test pattern for storage in SVE registers
// x0: pid (16 bits)
// x1: register number (6 bits)
@@ -227,33 +90,6 @@ function setup_vreg
ret x4
endfunction
-// Fill x1 bytes starting at x0 with 0xae (for canary purposes)
-// Clobbers x1, x2.
-function memfill_ae
- mov w2, #0xae
- b memfill
-endfunction
-
-// Fill x1 bytes starting at x0 with 0.
-// Clobbers x1, x2.
-function memclr
- mov w2, #0
-endfunction
- // fall through to memfill
-
-// Trivial memory fill: fill x1 bytes starting at address x0 with byte w2
-// Clobbers x1
-function memfill
- cmp x1, #0
- b.eq 1f
-
-0: strb w2, [x0], #1
- subs x1, x1, #1
- b.ne 0b
-
-1: ret
-endfunction
-
// Trivial memory compare: compare x2 bytes starting at address x0 with
// bytes starting at address x1.
// Returns only if all bytes match; otherwise, the program is aborted.
@@ -298,8 +134,7 @@ function check_vreg
b memcmp
endfunction
-// Any SVE register modified here can cause corruption in the main
-// thread -- but *only* the registers modified here.
+// Modify live register state, the signal return will undo our changes
function irritator_handler
// Increment the irritation signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
@@ -307,7 +142,6 @@ function irritator_handler
str x0, [x2, #ucontext_regs + 8 * 23]
// Corrupt some random V-regs
- adr x0, .text + (irritator_handler - .text) / 16 * 16
movi v0.8b, #7
movi v9.16b, #9
movi v31.8b, #31
@@ -315,6 +149,15 @@ function irritator_handler
ret
endfunction
+function tickle_handler
+ // Increment the signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
+
+ ret
+endfunction
+
function terminate_handler
mov w21, w0
mov x20, x2
@@ -370,7 +213,32 @@ endfunction
// Main program entry point
.globl _start
function _start
-_start:
+ enable_gcs
+
+ mov x23, #0 // signal count
+
+ mov w0, #SIGINT
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGTERM
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGUSR1
+ adr x1, irritator_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ mov w0, #SIGUSR2
+ adr x1, tickle_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
// Sanity-check and report the vector length
mov x19, #128
@@ -401,24 +269,6 @@ _start:
mov x0, x20
bl putdecn
- mov x23, #0 // Irritation signal count
-
- mov w0, #SIGINT
- adr x1, terminate_handler
- mov w2, #SA_SIGINFO
- bl setsignal
-
- mov w0, #SIGTERM
- adr x1, terminate_handler
- mov w2, #SA_SIGINFO
- bl setsignal
-
- mov w0, #SIGUSR1
- adr x1, irritator_handler
- mov w2, #SA_SIGINFO
- orr w2, w2, #SA_NODEFER
- bl setsignal
-
mov x22, #0 // generation number, increments per iteration
.Ltest_loop:
@@ -457,7 +307,7 @@ function barf
mov x11, x1 // actual data
mov x12, x2 // data size
- puts "Mistatch: PID="
+ puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
new file mode 100644
index 000000000000..0c40007d1282
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+
+#include <linux/kernel.h>
+#include <linux/if_alg.h>
+
+#define DATA_SIZE (16 * 4096)
+
+static int base, sock;
+
+static int digest_len;
+static char *ref;
+static char *digest;
+static char *alg_name;
+
+static struct iovec data_iov;
+static int zerocopy[2];
+static int sigs;
+static int iter;
+
+static void handle_exit_signal(int sig, siginfo_t *info, void *context)
+{
+ printf("Terminated by signal %d, iterations=%d, signals=%d\n",
+ sig, iter, sigs);
+ exit(0);
+}
+
+static void handle_kick_signal(int sig, siginfo_t *info, void *context)
+{
+ sigs++;
+}
+
+static char *drivers[] = {
+ "sha1-ce",
+ "sha224-arm64",
+ "sha224-arm64-neon",
+ "sha224-ce",
+ "sha256-arm64",
+ "sha256-arm64-neon",
+ "sha256-ce",
+ "sha384-ce",
+ "sha512-ce",
+ "sha3-224-ce",
+ "sha3-256-ce",
+ "sha3-384-ce",
+ "sha3-512-ce",
+ "sm3-ce",
+ "sm3-neon",
+};
+
+static bool create_socket(void)
+{
+ FILE *proc;
+ struct sockaddr_alg addr;
+ char buf[1024];
+ char *c, *driver_name;
+ bool is_shash, match;
+ int ret, i;
+
+ ret = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (ret < 0) {
+ if (errno == EAFNOSUPPORT) {
+ printf("AF_ALG not supported\n");
+ return false;
+ }
+
+ printf("Failed to create AF_ALG socket: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+ base = ret;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.salg_family = AF_ALG;
+ strncpy((char *)addr.salg_type, "hash", sizeof(addr.salg_type));
+
+ proc = fopen("/proc/crypto", "r");
+ if (!proc) {
+ printf("Unable to open /proc/crypto\n");
+ return false;
+ }
+
+ driver_name = NULL;
+ is_shash = false;
+ match = false;
+
+ /* Look through /proc/crypto for a driver with kernel mode FP usage */
+ while (!match) {
+ c = fgets(buf, sizeof(buf), proc);
+ if (!c) {
+ if (feof(proc)) {
+ printf("Nothing found in /proc/crypto\n");
+ return false;
+ }
+ continue;
+ }
+
+ /* Algorithm descriptions are separated by a blank line */
+ if (*c == '\n') {
+ if (is_shash && driver_name) {
+ for (i = 0; i < ARRAY_SIZE(drivers); i++) {
+ if (strcmp(drivers[i],
+ driver_name) == 0) {
+ match = true;
+ }
+ }
+ }
+
+ if (!match) {
+ digest_len = 0;
+
+ free(driver_name);
+ driver_name = NULL;
+
+ free(alg_name);
+ alg_name = NULL;
+
+ is_shash = false;
+ }
+ continue;
+ }
+
+ /* Remove trailing newline */
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ /* Find the field/value separator and start of the value */
+ c = strchr(buf, ':');
+ if (!c)
+ continue;
+ c += 2;
+
+ if (strncmp(buf, "digestsize", strlen("digestsize")) == 0)
+ sscanf(c, "%d", &digest_len);
+
+ if (strncmp(buf, "name", strlen("name")) == 0)
+ alg_name = strdup(c);
+
+ if (strncmp(buf, "driver", strlen("driver")) == 0)
+ driver_name = strdup(c);
+
+ if (strncmp(buf, "type", strlen("type")) == 0)
+ if (strncmp(c, "shash", strlen("shash")) == 0)
+ is_shash = true;
+ }
+
+ strncpy((char *)addr.salg_name, alg_name,
+ sizeof(addr.salg_name) - 1);
+
+ ret = bind(base, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ printf("Failed to bind %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ ret = accept(base, NULL, 0);
+ if (ret < 0) {
+ printf("Failed to accept %s: %s (%d)\n",
+ addr.salg_name, strerror(errno), errno);
+ return false;
+ }
+
+ sock = ret;
+
+ ret = pipe(zerocopy);
+ if (ret != 0) {
+ printf("Failed to create zerocopy pipe: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ ref = malloc(digest_len);
+ if (!ref) {
+ printf("Failed to allocate %d byte reference\n", digest_len);
+ return false;
+ }
+
+ digest = malloc(digest_len);
+ if (!digest) {
+ printf("Failed to allocate %d byte digest\n", digest_len);
+ return false;
+ }
+
+ return true;
+}
+
+static bool compute_digest(void *buf)
+{
+ struct iovec iov;
+ int ret, wrote;
+
+ iov = data_iov;
+ while (iov.iov_len) {
+ ret = vmsplice(zerocopy[1], &iov, 1, SPLICE_F_GIFT);
+ if (ret < 0) {
+ printf("Failed to send buffer: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ wrote = ret;
+ ret = splice(zerocopy[0], NULL, sock, NULL, wrote, 0);
+ if (ret < 0) {
+ printf("Failed to splice buffer: %s (%d)\n",
+ strerror(errno), errno);
+ } else if (ret != wrote) {
+ printf("Short splice: %d < %d\n", ret, wrote);
+ }
+
+ iov.iov_len -= wrote;
+ iov.iov_base += wrote;
+ }
+
+reread:
+ ret = recv(sock, buf, digest_len, 0);
+ if (ret == 0) {
+ printf("No digest returned\n");
+ return false;
+ }
+ if (ret != digest_len) {
+ if (errno == -EAGAIN)
+ goto reread;
+ printf("Failed to get digest: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return true;
+}
+
+int main(void)
+{
+ char *data;
+ struct sigaction sa;
+ int ret;
+
+ /* Ensure we have unbuffered output */
+ setvbuf(stdout, NULL, _IOLBF, 0);
+
+ /* The parent will communicate with us via signals */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_exit_signal;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGTERM, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGTERM handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ sa.sa_sigaction = handle_kick_signal;
+ ret = sigaction(SIGUSR1, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGUSR1 handler: %s (%d)\n",
+ strerror(errno), errno);
+ ret = sigaction(SIGUSR2, &sa, NULL);
+ if (ret < 0)
+ printf("Failed to install SIGUSR2 handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ data = malloc(DATA_SIZE);
+ if (!data) {
+ printf("Failed to allocate data buffer\n");
+ return EXIT_FAILURE;
+ }
+ memset(data, 0, DATA_SIZE);
+
+ data_iov.iov_base = data;
+ data_iov.iov_len = DATA_SIZE;
+
+ /*
+ * If we can't create a socket assume it's a lack of system
+ * support and fall back to a basic FPSIMD test for the
+ * benefit of fp-stress.
+ */
+ if (!create_socket()) {
+ execl("./fpsimd-test", "./fpsimd-test", NULL);
+ printf("Failed to fall back to fspimd-test: %d (%s)\n",
+ errno, strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ /*
+ * Compute a reference digest we hope is repeatable, we do
+ * this at runtime partly to make it easier to play with
+ * parameters.
+ */
+ if (!compute_digest(ref)) {
+ printf("Failed to compute reference digest\n");
+ return EXIT_FAILURE;
+ }
+
+ printf("AF_ALG using %s\n", alg_name);
+
+ while (true) {
+ if (!compute_digest(digest)) {
+ printf("Failed to compute digest, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ if (memcmp(ref, digest, digest_len) != 0) {
+ printf("Digest mismatch, iter=%d\n", iter);
+ return EXIT_FAILURE;
+ }
+
+ iter++;
+ }
+
+ return EXIT_FAILURE;
+}
diff --git a/tools/testing/selftests/arm64/fp/rdvl-sme.c b/tools/testing/selftests/arm64/fp/rdvl-sme.c
new file mode 100644
index 000000000000..49b0b2e08bac
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/rdvl-sme.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <stdio.h>
+
+#include "rdvl.h"
+
+int main(void)
+{
+ int vl = rdvl_sme();
+
+ printf("%d\n", vl);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/fp/rdvl-sve.c b/tools/testing/selftests/arm64/fp/rdvl-sve.c
new file mode 100644
index 000000000000..7f8a13a18f5d
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/rdvl-sve.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <stdio.h>
+
+#include "rdvl.h"
+
+int main(void)
+{
+ int vl = rdvl_sve();
+
+ printf("%d\n", vl);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/fp/rdvl.S b/tools/testing/selftests/arm64/fp/rdvl.S
new file mode 100644
index 000000000000..20dc29996dc6
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/rdvl.S
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 ARM Limited.
+
+#include "sme-inst.h"
+
+.arch_extension sve
+
+.globl rdvl_sve
+rdvl_sve:
+ hint 34 // BTI C
+ rdvl x0, #1
+ ret
+
+.globl rdvl_sme
+rdvl_sme:
+ hint 34 // BTI C
+
+ rdsvl 0, 1
+
+ ret
diff --git a/tools/testing/selftests/arm64/fp/rdvl.h b/tools/testing/selftests/arm64/fp/rdvl.h
new file mode 100644
index 000000000000..5d323679fbc9
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/rdvl.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef RDVL_H
+#define RDVL_H
+
+int rdvl_sme(void);
+int rdvl_sve(void);
+
+#endif
diff --git a/tools/testing/selftests/arm64/fp/sme-inst.h b/tools/testing/selftests/arm64/fp/sme-inst.h
new file mode 100644
index 000000000000..85b9184e0835
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/sme-inst.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021-2 ARM Limited.
+// Original author: Mark Brown <broonie@kernel.org>
+
+#ifndef SME_INST_H
+#define SME_INST_H
+
+#define REG_FPMR S3_3_C4_C4_2
+
+/*
+ * RDSVL X\nx, #\imm
+ */
+.macro rdsvl nx, imm
+ .inst 0x4bf5800 \
+ | (\imm << 5) \
+ | (\nx)
+.endm
+
+.macro smstop
+ msr S0_3_C4_C6_3, xzr
+.endm
+
+.macro smstart_za
+ msr S0_3_C4_C5_3, xzr
+.endm
+
+.macro smstart_sm
+ msr S0_3_C4_C3_3, xzr
+.endm
+
+/*
+ * LDR (vector to ZA array):
+ * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _ldr_za nw, nxbase, offset=0
+ .inst 0xe1000000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * STR (vector from ZA array):
+ * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _str_za nw, nxbase, offset=0
+ .inst 0xe1200000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * LDR (ZT0)
+ *
+ * LDR ZT0, nx
+ */
+.macro _ldr_zt nx
+ .inst 0xe11f8000 \
+ | (((\nx) & 0x1f) << 5)
+.endm
+
+/*
+ * STR (ZT0)
+ *
+ * STR ZT0, nx
+ */
+.macro _str_zt nx
+ .inst 0xe13f8000 \
+ | (((\nx) & 0x1f) << 5)
+.endm
+
+#endif
diff --git a/tools/testing/selftests/arm64/fp/ssve-stress b/tools/testing/selftests/arm64/fp/ssve-stress
new file mode 100644
index 000000000000..e2bd2cc184ad
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/ssve-stress
@@ -0,0 +1,59 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2015-2019 ARM Limited.
+# Original author: Dave Martin <Dave.Martin@arm.com>
+
+set -ue
+
+NR_CPUS=`nproc`
+
+pids=
+logs=
+
+cleanup () {
+ trap - INT TERM CHLD
+ set +e
+
+ if [ -n "$pids" ]; then
+ kill $pids
+ wait $pids
+ pids=
+ fi
+
+ if [ -n "$logs" ]; then
+ cat $logs
+ rm $logs
+ logs=
+ fi
+}
+
+interrupt () {
+ cleanup
+ exit 0
+}
+
+child_died () {
+ cleanup
+ exit 1
+}
+
+trap interrupt INT TERM EXIT
+
+for x in `seq 0 $((NR_CPUS * 4))`; do
+ log=`mktemp`
+ logs=$logs\ $log
+ ./ssve-test >$log &
+ pids=$pids\ $!
+done
+
+# Wait for all child processes to be created:
+sleep 10
+
+while :; do
+ kill -USR1 $pids
+done &
+pids=$pids\ $!
+
+wait
+
+exit 1
diff --git a/tools/testing/selftests/arm64/fp/sve-probe-vls.c b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
index b29cbc642c57..df0c1b6eb114 100644
--- a/tools/testing/selftests/arm64/fp/sve-probe-vls.c
+++ b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
@@ -12,7 +12,8 @@
#include <sys/prctl.h>
#include <asm/sigcontext.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
+#include "rdvl.h"
int main(int argc, char **argv)
{
@@ -25,7 +26,7 @@ int main(int argc, char **argv)
ksft_set_plan(2);
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
- ksft_exit_skip("SVE not available");
+ ksft_exit_skip("SVE not available\n");
/*
* Enumerate up to SVE_VQ_MAX vector lengths
@@ -38,6 +39,10 @@ int main(int argc, char **argv)
vl &= PR_SVE_VL_LEN_MASK;
+ if (rdvl_sve() != vl)
+ ksft_exit_fail_msg("PR_SVE_SET_VL reports %d, RDVL %d\n",
+ vl, rdvl_sve());
+
if (!sve_vl_valid(vl))
ksft_exit_fail_msg("VL %d invalid\n", vl);
vq = sve_vq_from_vl(vl);
diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace-asm.S b/tools/testing/selftests/arm64/fp/sve-ptrace-asm.S
deleted file mode 100644
index 3e81f9fab574..000000000000
--- a/tools/testing/selftests/arm64/fp/sve-ptrace-asm.S
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-// Copyright (C) 2015-2019 ARM Limited.
-// Original author: Dave Martin <Dave.Martin@arm.com>
-#include <asm/unistd.h>
-
-.arch_extension sve
-
-.globl sve_store_patterns
-
-sve_store_patterns:
- mov x1, x0
-
- index z0.b, #0, #1
- str q0, [x1]
-
- mov w8, #__NR_getpid
- svc #0
- str q0, [x1, #0x10]
-
- mov z1.d, z0.d
- str q0, [x1, #0x20]
-
- mov w8, #__NR_getpid
- svc #0
- str q0, [x1, #0x30]
-
- mov z1.d, z0.d
- str q0, [x1, #0x40]
-
- ret
-
-.size sve_store_patterns, . - sve_store_patterns
-.type sve_store_patterns, @function
diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
index b2282be6f938..28f6b996c5e2 100644
--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
@@ -1,15 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2015-2020 ARM Limited.
+ * Copyright (C) 2015-2021 ARM Limited.
* Original author: Dave Martin <Dave.Martin@arm.com>
*/
#include <errno.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
+#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
@@ -17,61 +19,113 @@
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_SVE
#define NT_ARM_SVE 0x405
#endif
-/* Number of registers filled in by sve_store_patterns */
-#define NR_VREGS 5
-
-void sve_store_patterns(__uint128_t v[NR_VREGS]);
-
-static void dump(const void *buf, size_t size)
-{
- size_t i;
- const unsigned char *p = buf;
-
- for (i = 0; i < size; ++i)
- printf(" %.2x", *p++);
-}
+#ifndef NT_ARM_SSVE
+#define NT_ARM_SSVE 0x40b
+#endif
-static int check_vregs(const __uint128_t vregs[NR_VREGS])
+/*
+ * The architecture defines the maximum VQ as 16 but for extensibility
+ * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
+ * a *lot* more tests than are useful if we use it. Until the
+ * architecture is extended let's limit our coverage to what is
+ * currently allowed, plus one extra to ensure we cover constraining
+ * the VL as expected.
+ */
+#define TEST_VQ_MAX 17
+
+struct vec_type {
+ const char *name;
+ unsigned long hwcap_type;
+ unsigned long hwcap;
+ int regset;
+ int prctl_set;
+};
+
+static const struct vec_type vec_types[] = {
+ {
+ .name = "SVE",
+ .hwcap_type = AT_HWCAP,
+ .hwcap = HWCAP_SVE,
+ .regset = NT_ARM_SVE,
+ .prctl_set = PR_SVE_SET_VL,
+ },
+ {
+ .name = "Streaming SVE",
+ .hwcap_type = AT_HWCAP2,
+ .hwcap = HWCAP2_SME,
+ .regset = NT_ARM_SSVE,
+ .prctl_set = PR_SME_SET_VL,
+ },
+};
+
+#define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4)
+#define FLAG_TESTS 4
+#define FPSIMD_TESTS 2
+
+#define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
+
+static void fill_buf(char *buf, size_t size)
{
int i;
- int ok = 1;
-
- for (i = 0; i < NR_VREGS; ++i) {
- printf("# v[%d]:", i);
- dump(&vregs[i], sizeof vregs[i]);
- putchar('\n');
-
- if (vregs[i] != vregs[0])
- ok = 0;
- }
- return ok;
+ for (i = 0; i < size; i++)
+ buf[i] = random();
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
- ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
+ ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n",
+ strerror(errno), errno);
if (raise(SIGSTOP))
- ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
+ ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n",
+ strerror(errno), errno);
return EXIT_SUCCESS;
}
-static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size)
+static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
+{
+ struct iovec iov;
+ int ret;
+
+ iov.iov_base = fpsimd;
+ iov.iov_len = sizeof(*fpsimd);
+ ret = ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_GETREGSET)");
+ return ret;
+}
+
+static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
+{
+ struct iovec iov;
+ int ret;
+
+ iov.iov_base = fpsimd;
+ iov.iov_len = sizeof(*fpsimd);
+ ret = ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_SETREGSET)");
+ return ret;
+}
+
+static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
+ void **buf, size_t *size)
{
struct user_sve_header *sve;
void *p;
- size_t sz = sizeof *sve;
+ size_t sz = sizeof(*sve);
struct iovec iov;
+ int ret;
while (1) {
if (*size < sz) {
@@ -87,8 +141,11 @@ static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size)
iov.iov_base = *buf;
iov.iov_len = sz;
- if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov))
+ ret = ptrace(PTRACE_GETREGSET, pid, type->regset, &iov);
+ if (ret) {
+ ksft_perror("ptrace(PTRACE_GETREGSET)");
goto error;
+ }
sve = *buf;
if (sve->size <= sz)
@@ -103,49 +160,592 @@ error:
return NULL;
}
-static int set_sve(pid_t pid, const struct user_sve_header *sve)
+static int set_sve(pid_t pid, const struct vec_type *type,
+ const struct user_sve_header *sve)
{
struct iovec iov;
+ int ret;
iov.iov_base = (void *)sve;
iov.iov_len = sve->size;
- return ptrace(PTRACE_SETREGSET, pid, NT_ARM_SVE, &iov);
+ ret = ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_SETREGSET)");
+ return ret;
+}
+
+/* A read operation fails */
+static void read_fails(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header *new_sve = NULL;
+ size_t new_sve_size = 0;
+ void *ret;
+
+ ret = get_sve(child, type, (void **)&new_sve, &new_sve_size);
+
+ ksft_test_result(ret == NULL, "%s unsupported read fails\n",
+ type->name);
+
+ free(new_sve);
+}
+
+/* A write operation fails */
+static void write_fails(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header sve;
+ int ret;
+
+ /* Just the header, no data */
+ memset(&sve, 0, sizeof(sve));
+ sve.size = sizeof(sve);
+ sve.flags = SVE_PT_REGS_SVE;
+ sve.vl = SVE_VL_MIN;
+ ret = set_sve(child, type, &sve);
+
+ ksft_test_result(ret != 0, "%s unsupported write fails\n",
+ type->name);
+}
+
+/* Validate setting and getting the inherit flag */
+static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header sve;
+ struct user_sve_header *new_sve = NULL;
+ size_t new_sve_size = 0;
+ int ret;
+
+ /* First set the flag */
+ memset(&sve, 0, sizeof(sve));
+ sve.size = sizeof(sve);
+ sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
+ sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
+ ret = set_sve(child, type, &sve);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
+ type->name);
+ return;
+ }
+
+ /*
+ * Read back the new register state and verify that we have
+ * set the flags we expected.
+ */
+ if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
+ ksft_test_result_fail("Failed to read %s SVE flags\n",
+ type->name);
+ return;
+ }
+
+ ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT,
+ "%s SVE_PT_VL_INHERIT set\n", type->name);
+
+ /* Now clear */
+ sve.flags &= ~SVE_PT_VL_INHERIT;
+ ret = set_sve(child, type, &sve);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n",
+ type->name);
+ return;
+ }
+
+ if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
+ ksft_test_result_fail("Failed to read %s SVE flags\n",
+ type->name);
+ return;
+ }
+
+ ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT),
+ "%s SVE_PT_VL_INHERIT cleared\n", type->name);
+
+ free(new_sve);
+}
+
+/* Validate attempting to set the specfied VL via ptrace */
+static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
+ unsigned int vl, bool *supported)
+{
+ struct user_sve_header sve;
+ struct user_sve_header *new_sve = NULL;
+ size_t new_sve_size = 0;
+ int ret, prctl_vl;
+
+ *supported = false;
+
+ /* Check if the VL is supported in this process */
+ prctl_vl = prctl(type->prctl_set, vl);
+ if (prctl_vl == -1)
+ ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n",
+ type->name, strerror(errno), errno);
+
+ /* If the VL is not supported then a supported VL will be returned */
+ *supported = (prctl_vl == vl);
+
+ /* Set the VL by doing a set with no register payload */
+ memset(&sve, 0, sizeof(sve));
+ sve.size = sizeof(sve);
+ sve.flags = SVE_PT_REGS_SVE;
+ sve.vl = vl;
+ ret = set_sve(child, type, &sve);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set %s VL %u\n",
+ type->name, vl);
+ return;
+ }
+
+ /*
+ * Read back the new register state and verify that we have the
+ * same VL that we got from prctl() on ourselves.
+ */
+ if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) {
+ ksft_test_result_fail("Failed to read %s VL %u\n",
+ type->name, vl);
+ return;
+ }
+
+ ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
+ type->name, vl);
+
+ free(new_sve);
+}
+
+static void check_u32(unsigned int vl, const char *reg,
+ uint32_t *in, uint32_t *out, int *errors)
+{
+ if (*in != *out) {
+ printf("# VL %d %s wrote %x read %x\n",
+ vl, reg, *in, *out);
+ (*errors)++;
+ }
+}
+
+/* Set out of range VLs */
+static void ptrace_set_vl_ranges(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header sve;
+ int ret;
+
+ memset(&sve, 0, sizeof(sve));
+ sve.flags = SVE_PT_REGS_SVE;
+ sve.size = sizeof(sve);
+
+ ret = set_sve(child, type, &sve);
+ ksft_test_result(ret != 0, "%s Set invalid VL 0\n", type->name);
+
+ sve.vl = SVE_VL_MAX + SVE_VQ_BYTES;
+ ret = set_sve(child, type, &sve);
+ ksft_test_result(ret != 0, "%s Set invalid VL %d\n", type->name,
+ SVE_VL_MAX + SVE_VQ_BYTES);
+}
+
+/* Access the FPSIMD registers via the SVE regset */
+static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
+{
+ void *svebuf;
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd, new_fpsimd;
+ unsigned int i, j;
+ unsigned char *p;
+ int ret;
+
+ svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ if (!svebuf) {
+ ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
+ return;
+ }
+
+ memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ sve = svebuf;
+ sve->flags = SVE_PT_REGS_FPSIMD;
+ sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
+ sve->vl = 16; /* We don't care what the VL is */
+
+ /* Try to set a known FPSIMD state via PT_REGS_SVE */
+ fpsimd = (struct user_fpsimd_state *)((char *)sve +
+ SVE_PT_FPSIMD_OFFSET);
+ for (i = 0; i < 32; ++i) {
+ p = (unsigned char *)&fpsimd->vregs[i];
+
+ for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
+ p[j] = j;
+ }
+
+ /* This should only succeed for SVE */
+ ret = set_sve(child, type, sve);
+ ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0),
+ "%s FPSIMD set via SVE: %d\n",
+ type->name, ret);
+ if (ret)
+ goto out;
+
+ /* Verify via the FPSIMD regset */
+ if (get_fpsimd(child, &new_fpsimd)) {
+ ksft_test_result_fail("get_fpsimd(): %s\n",
+ strerror(errno));
+ goto out;
+ }
+ if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0)
+ ksft_test_result_pass("%s get_fpsimd() gave same state\n",
+ type->name);
+ else
+ ksft_test_result_fail("%s get_fpsimd() gave different state\n",
+ type->name);
+
+out:
+ free(svebuf);
+}
+
+/* Write the FPSIMD registers via the SVE regset when SVE is not supported */
+static void ptrace_sve_fpsimd_no_sve(pid_t child)
+{
+ void *svebuf;
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd, new_fpsimd;
+ unsigned int i, j;
+ unsigned char *p;
+ int ret;
+
+ svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ if (!svebuf) {
+ ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
+ return;
+ }
+
+ /* On a system without SVE the VL should be set to 0 */
+ memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ sve = svebuf;
+ sve->flags = SVE_PT_REGS_FPSIMD;
+ sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
+ sve->vl = 0;
+
+ /* Try to set a known FPSIMD state via PT_REGS_SVE */
+ fpsimd = (struct user_fpsimd_state *)((char *)sve +
+ SVE_PT_FPSIMD_OFFSET);
+ for (i = 0; i < 32; ++i) {
+ p = (unsigned char *)&fpsimd->vregs[i];
+
+ for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
+ p[j] = j;
+ }
+
+ ret = set_sve(child, &vec_types[0], sve);
+ ksft_test_result(ret == 0, "FPSIMD write via SVE\n");
+ if (ret) {
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
+ goto out;
+ }
+
+ /* Verify via the FPSIMD regset */
+ if (get_fpsimd(child, &new_fpsimd)) {
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
+ goto out;
+ }
+ ksft_test_result(memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0,
+ "Verify FPSIMD write via SVE\n");
+
+out:
+ free(svebuf);
+}
+
+/* Validate attempting to set SVE data and read SVE data */
+static void ptrace_set_sve_get_sve_data(pid_t child,
+ const struct vec_type *type,
+ unsigned int vl)
+{
+ void *write_buf;
+ void *read_buf = NULL;
+ struct user_sve_header *write_sve;
+ struct user_sve_header *read_sve;
+ size_t read_sve_size = 0;
+ unsigned int vq = sve_vq_from_vl(vl);
+ int ret, i;
+ size_t data_size;
+ int errors = 0;
+
+ data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ write_buf = malloc(data_size);
+ if (!write_buf) {
+ ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
+ data_size, type->name, vl);
+ return;
+ }
+ write_sve = write_buf;
+
+ /* Set up some data and write it out */
+ memset(write_sve, 0, data_size);
+ write_sve->size = data_size;
+ write_sve->vl = vl;
+ write_sve->flags = SVE_PT_REGS_SVE;
+
+ for (i = 0; i < __SVE_NUM_ZREGS; i++)
+ fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ SVE_PT_SVE_ZREG_SIZE(vq));
+
+ for (i = 0; i < __SVE_NUM_PREGS; i++)
+ fill_buf(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
+ SVE_PT_SVE_PREG_SIZE(vq));
+
+ fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
+ fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
+
+ /* TODO: Generate a valid FFR pattern */
+
+ ret = set_sve(child, type, write_sve);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set %s VL %u data\n",
+ type->name, vl);
+ goto out;
+ }
+
+ /* Read the data back */
+ if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
+ ksft_test_result_fail("Failed to read %s VL %u data\n",
+ type->name, vl);
+ goto out;
+ }
+ read_sve = read_buf;
+
+ /* We might read more data if there's extensions we don't know */
+ if (read_sve->size < write_sve->size) {
+ ksft_test_result_fail("%s wrote %d bytes, only read %d\n",
+ type->name, write_sve->size,
+ read_sve->size);
+ goto out_read;
+ }
+
+ for (i = 0; i < __SVE_NUM_ZREGS; i++) {
+ if (memcmp(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ SVE_PT_SVE_ZREG_SIZE(vq)) != 0) {
+ printf("# Mismatch in %u Z%d\n", vl, i);
+ errors++;
+ }
+ }
+
+ for (i = 0; i < __SVE_NUM_PREGS; i++) {
+ if (memcmp(write_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
+ read_buf + SVE_PT_SVE_PREG_OFFSET(vq, i),
+ SVE_PT_SVE_PREG_SIZE(vq)) != 0) {
+ printf("# Mismatch in %u P%d\n", vl, i);
+ errors++;
+ }
+ }
+
+ check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
+ read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
+ check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
+ read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
+
+ ksft_test_result(errors == 0, "Set and get %s data for VL %u\n",
+ type->name, vl);
+
+out_read:
+ free(read_buf);
+out:
+ free(write_buf);
}
-static void dump_sve_regs(const struct user_sve_header *sve, unsigned int num,
- unsigned int vlmax)
+/* Validate attempting to set SVE data and read it via the FPSIMD regset */
+static void ptrace_set_sve_get_fpsimd_data(pid_t child,
+ const struct vec_type *type,
+ unsigned int vl)
{
- unsigned int vq;
- unsigned int i;
+ void *write_buf;
+ struct user_sve_header *write_sve;
+ unsigned int vq = sve_vq_from_vl(vl);
+ struct user_fpsimd_state fpsimd_state;
+ int ret, i;
+ size_t data_size;
+ int errors = 0;
+
+ if (__BYTE_ORDER == __BIG_ENDIAN) {
+ ksft_test_result_skip("Big endian not supported\n");
+ return;
+ }
- if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_SVE)
- ksft_exit_fail_msg("Dumping non-SVE register\n");
+ data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ write_buf = malloc(data_size);
+ if (!write_buf) {
+ ksft_test_result_fail("Error allocating %ld byte buffer for %s VL %u\n",
+ data_size, type->name, vl);
+ return;
+ }
+ write_sve = write_buf;
+
+ /* Set up some data and write it out */
+ memset(write_sve, 0, data_size);
+ write_sve->size = data_size;
+ write_sve->vl = vl;
+ write_sve->flags = SVE_PT_REGS_SVE;
+
+ for (i = 0; i < __SVE_NUM_ZREGS; i++)
+ fill_buf(write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ SVE_PT_SVE_ZREG_SIZE(vq));
+
+ fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE);
+ fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE);
+
+ ret = set_sve(child, type, write_sve);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set %s VL %u data\n",
+ type->name, vl);
+ goto out;
+ }
- if (vlmax > sve->vl)
- vlmax = sve->vl;
+ /* Read the data back */
+ if (get_fpsimd(child, &fpsimd_state)) {
+ ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n",
+ type->name, vl);
+ goto out;
+ }
- vq = sve_vq_from_vl(sve->vl);
- for (i = 0; i < num; ++i) {
- printf("# z%u:", i);
- dump((const char *)sve + SVE_PT_SVE_ZREG_OFFSET(vq, i),
- vlmax);
- printf("%s\n", vlmax == sve->vl ? "" : " ...");
+ for (i = 0; i < __SVE_NUM_ZREGS; i++) {
+ __uint128_t tmp = 0;
+
+ /*
+ * Z regs are stored endianness invariant, this won't
+ * work for big endian
+ */
+ memcpy(&tmp, write_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ sizeof(tmp));
+
+ if (tmp != fpsimd_state.vregs[i]) {
+ printf("# Mismatch in FPSIMD for %s VL %u Z%d\n",
+ type->name, vl, i);
+ errors++;
+ }
}
+
+ check_u32(vl, "FPSR", write_buf + SVE_PT_SVE_FPSR_OFFSET(vq),
+ &fpsimd_state.fpsr, &errors);
+ check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq),
+ &fpsimd_state.fpcr, &errors);
+
+ ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n",
+ type->name, vl);
+
+out:
+ free(write_buf);
+}
+
+/* Validate attempting to set FPSIMD data and read it via the SVE regset */
+static void ptrace_set_fpsimd_get_sve_data(pid_t child,
+ const struct vec_type *type,
+ unsigned int vl)
+{
+ void *read_buf = NULL;
+ unsigned char *p;
+ struct user_sve_header *read_sve;
+ unsigned int vq = sve_vq_from_vl(vl);
+ struct user_fpsimd_state write_fpsimd;
+ int ret, i, j;
+ size_t read_sve_size = 0;
+ size_t expected_size;
+ int errors = 0;
+
+ if (__BYTE_ORDER == __BIG_ENDIAN) {
+ ksft_test_result_skip("Big endian not supported\n");
+ return;
+ }
+
+ for (i = 0; i < 32; ++i) {
+ p = (unsigned char *)&write_fpsimd.vregs[i];
+
+ for (j = 0; j < sizeof(write_fpsimd.vregs[i]); ++j)
+ p[j] = j;
+ }
+
+ ret = set_fpsimd(child, &write_fpsimd);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set FPSIMD state: %d\n)",
+ ret);
+ return;
+ }
+
+ if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) {
+ ksft_test_result_fail("Failed to read %s VL %u data\n",
+ type->name, vl);
+ return;
+ }
+ read_sve = read_buf;
+
+ if (read_sve->vl != vl) {
+ ksft_test_result_fail("Child VL != expected VL: %u != %u\n",
+ read_sve->vl, vl);
+ goto out;
+ }
+
+ /* The kernel may return either SVE or FPSIMD format */
+ switch (read_sve->flags & SVE_PT_REGS_MASK) {
+ case SVE_PT_REGS_FPSIMD:
+ expected_size = SVE_PT_FPSIMD_SIZE(vq, SVE_PT_REGS_FPSIMD);
+ if (read_sve_size < expected_size) {
+ ksft_test_result_fail("Read %ld bytes, expected %ld\n",
+ read_sve_size, expected_size);
+ goto out;
+ }
+
+ ret = memcmp(&write_fpsimd, read_buf + SVE_PT_FPSIMD_OFFSET,
+ sizeof(write_fpsimd));
+ if (ret != 0) {
+ ksft_print_msg("Read FPSIMD data mismatch\n");
+ errors++;
+ }
+ break;
+
+ case SVE_PT_REGS_SVE:
+ expected_size = SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ if (read_sve_size < expected_size) {
+ ksft_test_result_fail("Read %ld bytes, expected %ld\n",
+ read_sve_size, expected_size);
+ goto out;
+ }
+
+ for (i = 0; i < __SVE_NUM_ZREGS; i++) {
+ __uint128_t tmp = 0;
+
+ /*
+ * Z regs are stored endianness invariant, this won't
+ * work for big endian
+ */
+ memcpy(&tmp, read_buf + SVE_PT_SVE_ZREG_OFFSET(vq, i),
+ sizeof(tmp));
+
+ if (tmp != write_fpsimd.vregs[i]) {
+ ksft_print_msg("Mismatch in FPSIMD for %s VL %u Z%d/V%d\n",
+ type->name, vl, i, i);
+ errors++;
+ }
+ }
+
+ check_u32(vl, "FPSR", &write_fpsimd.fpsr,
+ read_buf + SVE_PT_SVE_FPSR_OFFSET(vq), &errors);
+ check_u32(vl, "FPCR", &write_fpsimd.fpcr,
+ read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors);
+ break;
+ default:
+ ksft_print_msg("Unexpected regs type %d\n",
+ read_sve->flags & SVE_PT_REGS_MASK);
+ errors++;
+ break;
+ }
+
+ ksft_test_result(errors == 0, "Set FPSIMD, read via SVE for %s VL %u\n",
+ type->name, vl);
+
+out:
+ free(read_buf);
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
- int status;
+ int status, i;
siginfo_t si;
- void *svebuf = NULL, *newsvebuf;
- size_t svebufsz = 0, newsvebufsz;
- struct user_sve_header *sve, *new_sve;
- struct user_fpsimd_state *fpsimd;
- unsigned int i, j;
- unsigned char *p;
- unsigned int vq;
+ unsigned int vq, vl;
+ bool vl_supported;
+
+ ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
/* Attach to the child */
while (1) {
@@ -167,8 +767,6 @@ static int do_parent(pid_t child)
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
- ksft_test_result(WIFSTOPPED(status), "WIFSTOPPED(%d)\n",
- status);
if (!WIFSTOPPED(status))
goto error;
@@ -203,98 +801,90 @@ static int do_parent(pid_t child)
}
}
- sve = get_sve(pid, &svebuf, &svebufsz);
- if (!sve) {
- int e = errno;
-
- ksft_test_result_fail("get_sve: %s\n", strerror(errno));
- if (e == ESRCH)
- goto disappeared;
-
- goto error;
- } else {
- ksft_test_result_pass("get_sve\n");
- }
-
- ksft_test_result((sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD,
- "FPSIMD registers\n");
- if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_FPSIMD)
- goto error;
-
- fpsimd = (struct user_fpsimd_state *)((char *)sve +
- SVE_PT_FPSIMD_OFFSET);
- for (i = 0; i < 32; ++i) {
- p = (unsigned char *)&fpsimd->vregs[i];
-
- for (j = 0; j < sizeof fpsimd->vregs[i]; ++j)
- p[j] = j;
- }
-
- if (set_sve(pid, sve)) {
- int e = errno;
-
- ksft_test_result_fail("set_sve(FPSIMD): %s\n",
- strerror(errno));
- if (e == ESRCH)
- goto disappeared;
-
- goto error;
- }
-
- vq = sve_vq_from_vl(sve->vl);
-
- newsvebufsz = SVE_PT_SVE_ZREG_OFFSET(vq, 1);
- new_sve = newsvebuf = malloc(newsvebufsz);
- if (!new_sve) {
- errno = ENOMEM;
- perror(NULL);
- goto error;
- }
-
- *new_sve = *sve;
- new_sve->flags &= ~SVE_PT_REGS_MASK;
- new_sve->flags |= SVE_PT_REGS_SVE;
- memset((char *)new_sve + SVE_PT_SVE_ZREG_OFFSET(vq, 0),
- 0, SVE_PT_SVE_ZREG_SIZE(vq));
- new_sve->size = SVE_PT_SVE_ZREG_OFFSET(vq, 1);
- if (set_sve(pid, new_sve)) {
- int e = errno;
+ for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
+ /*
+ * If the vector type isn't supported reads and writes
+ * should fail.
+ */
+ if (!(getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap)) {
+ read_fails(child, &vec_types[i]);
+ write_fails(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s unsupported read fails\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s unsupported write fails\n",
+ vec_types[i].name);
+ }
- ksft_test_result_fail("set_sve(ZREG): %s\n", strerror(errno));
- if (e == ESRCH)
- goto disappeared;
+ /* FPSIMD via SVE regset */
+ if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
+ ptrace_sve_fpsimd(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s FPSIMD set via SVE\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s FPSIMD read\n",
+ vec_types[i].name);
+ }
- goto error;
- }
+ /* prctl() flags */
+ if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
+ ptrace_set_get_inherit(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s SVE_PT_VL_INHERIT set\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s SVE_PT_VL_INHERIT cleared\n",
+ vec_types[i].name);
+ }
- new_sve = get_sve(pid, &newsvebuf, &newsvebufsz);
- if (!new_sve) {
- int e = errno;
+ /* Setting out of bounds VLs should fail */
+ if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
+ ptrace_set_vl_ranges(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s Set invalid VL 0\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s Set invalid VL %d\n",
+ vec_types[i].name,
+ SVE_VL_MAX + SVE_VQ_BYTES);
+ }
- ksft_test_result_fail("get_sve(ZREG): %s\n", strerror(errno));
- if (e == ESRCH)
- goto disappeared;
+ /* Step through every possible VQ */
+ for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
+ vl = sve_vl_from_vq(vq);
+
+ /* First, try to set this vector length */
+ if (getauxval(vec_types[i].hwcap_type) &
+ vec_types[i].hwcap) {
+ ptrace_set_get_vl(child, &vec_types[i], vl,
+ &vl_supported);
+ } else {
+ ksft_test_result_skip("%s get/set VL %d\n",
+ vec_types[i].name, vl);
+ vl_supported = false;
+ }
- goto error;
+ /* If the VL is supported validate data set/get */
+ if (vl_supported) {
+ ptrace_set_sve_get_sve_data(child, &vec_types[i], vl);
+ ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl);
+ ptrace_set_fpsimd_get_sve_data(child, &vec_types[i], vl);
+ } else {
+ ksft_test_result_skip("%s set SVE get SVE for VL %d\n",
+ vec_types[i].name, vl);
+ ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n",
+ vec_types[i].name, vl);
+ ksft_test_result_skip("%s set FPSIMD get SVE for VL %d\n",
+ vec_types[i].name, vl);
+ }
+ }
}
- ksft_test_result((new_sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE,
- "SVE registers\n");
- if ((new_sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_SVE)
- goto error;
-
- dump_sve_regs(new_sve, 3, sizeof fpsimd->vregs[0]);
-
- p = (unsigned char *)new_sve + SVE_PT_SVE_ZREG_OFFSET(vq, 1);
- for (i = 0; i < sizeof fpsimd->vregs[0]; ++i) {
- unsigned char expected = i;
-
- if (__BYTE_ORDER == __BIG_ENDIAN)
- expected = sizeof fpsimd->vregs[0] - 1 - expected;
-
- ksft_test_result(p[i] == expected, "p[%d] == expected\n", i);
- if (p[i] != expected)
- goto error;
+ /* We support SVE writes of FPSMID format on SME only systems */
+ if (!(getauxval(AT_HWCAP) & HWCAP_SVE) &&
+ (getauxval(AT_HWCAP2) & HWCAP2_SME)) {
+ ptrace_sve_fpsimd_no_sve(child);
+ } else {
+ ksft_test_result_skip("FPSIMD write via SVE\n");
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
}
ret = EXIT_SUCCESS;
@@ -309,19 +899,12 @@ disappeared:
int main(void)
{
int ret = EXIT_SUCCESS;
- __uint128_t v[NR_VREGS];
pid_t child;
- ksft_print_header();
- ksft_set_plan(20);
-
- if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
- ksft_exit_skip("SVE not available\n");
+ srandom(getpid());
- sve_store_patterns(v);
-
- if (!check_vregs(v))
- ksft_exit_fail_msg("Initial check_vregs() failed\n");
+ ksft_print_header();
+ ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
@@ -332,5 +915,5 @@ int main(void)
ksft_print_cnts();
- return 0;
+ return ret;
}
diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S
index f95074c9b48b..80e072f221cd 100644
--- a/tools/testing/selftests/arm64/fp/sve-test.S
+++ b/tools/testing/selftests/arm64/fp/sve-test.S
@@ -13,6 +13,7 @@
#include <asm/unistd.h>
#include "assembler.h"
#include "asm-offsets.h"
+#include "sme-inst.h"
#define NZR 32
#define NPR 16
@@ -46,130 +47,6 @@ define_accessor getz, NZR, _sve_str_v
define_accessor setp, NPR, _sve_ldr_p
define_accessor getp, NPR, _sve_str_p
-// Print a single character x0 to stdout
-// Clobbers x0-x2,x8
-function putc
- str x0, [sp, #-16]!
-
- mov x0, #1 // STDOUT_FILENO
- mov x1, sp
- mov x2, #1
- mov x8, #__NR_write
- svc #0
-
- add sp, sp, #16
- ret
-endfunction
-
-// Print a NUL-terminated string starting at address x0 to stdout
-// Clobbers x0-x3,x8
-function puts
- mov x1, x0
-
- mov x2, #0
-0: ldrb w3, [x0], #1
- cbz w3, 1f
- add x2, x2, #1
- b 0b
-
-1: mov w0, #1 // STDOUT_FILENO
- mov x8, #__NR_write
- svc #0
-
- ret
-endfunction
-
-// Utility macro to print a literal string
-// Clobbers x0-x4,x8
-.macro puts string
- .pushsection .rodata.str1.1, "aMS", 1
-.L__puts_literal\@: .string "\string"
- .popsection
-
- ldr x0, =.L__puts_literal\@
- bl puts
-.endm
-
-// Print an unsigned decimal number x0 to stdout
-// Clobbers x0-x4,x8
-function putdec
- mov x1, sp
- str x30, [sp, #-32]! // Result can't be > 20 digits
-
- mov x2, #0
- strb w2, [x1, #-1]! // Write the NUL terminator
-
- mov x2, #10
-0: udiv x3, x0, x2 // div-mod loop to generate the digits
- msub x0, x3, x2, x0
- add w0, w0, #'0'
- strb w0, [x1, #-1]!
- mov x0, x3
- cbnz x3, 0b
-
- ldrb w0, [x1]
- cbnz w0, 1f
- mov w0, #'0' // Print "0" for 0, not ""
- strb w0, [x1, #-1]!
-
-1: mov x0, x1
- bl puts
-
- ldr x30, [sp], #32
- ret
-endfunction
-
-// Print an unsigned decimal number x0 to stdout, followed by a newline
-// Clobbers x0-x5,x8
-function putdecn
- mov x5, x30
-
- bl putdec
- mov x0, #'\n'
- bl putc
-
- ret x5
-endfunction
-
-// Clobbers x0-x3,x8
-function puthexb
- str x30, [sp, #-0x10]!
-
- mov w3, w0
- lsr w0, w0, #4
- bl puthexnibble
- mov w0, w3
-
- ldr x30, [sp], #0x10
- // fall through to puthexnibble
-endfunction
-// Clobbers x0-x2,x8
-function puthexnibble
- and w0, w0, #0xf
- cmp w0, #10
- blo 1f
- add w0, w0, #'a' - ('9' + 1)
-1: add w0, w0, #'0'
- b putc
-endfunction
-
-// x0=data in, x1=size in, clobbers x0-x5,x8
-function dumphex
- str x30, [sp, #-0x10]!
-
- mov x4, x0
- mov x5, x1
-
-0: subs x5, x5, #1
- b.lo 1f
- ldrb w0, [x4], #1
- bl puthexb
- b 0b
-
-1: ldr x30, [sp], #0x10
- ret
-endfunction
-
// Declare some storate space to shadow the SVE register contents:
.pushsection .text
.data
@@ -184,18 +61,6 @@ scratch:
.space MAXVL_B
.popsection
-// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
-// Clobbers x0-x3
-function memcpy
- cmp x2, #0
- b.eq 1f
-0: ldrb w3, [x1], #1
- strb w3, [x0], #1
- subs x2, x2, #1
- b.ne 0b
-1: ret
-endfunction
-
// Generate a test pattern for storage in SVE registers
// x0: pid (16 bits)
// x1: register number (6 bits)
@@ -284,16 +149,29 @@ endfunction
// Set up test pattern in the FFR
// x0: pid
// x2: generation
+//
+// We need to generate a canonical FFR value, which consists of a number of
+// low "1" bits, followed by a number of zeros. This gives us 17 unique values
+// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
+// generation, and use that as the initial number of ones in the pattern.
+// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0.
function setup_ffr
+#ifndef SSVE
mov x4, x30
- bl pattern
+ and w0, w0, #0x3
+ bfi w0, w2, #2, #2
+ mov w1, #1
+ lsl w1, w1, w0
+ sub w1, w1, #1
+
ldr x0, =ffrref
- ldr x1, =scratch
- rdvl x2, #1
- lsr x2, x2, #3
- bl memcpy
+ strh w1, [x0], 2
+ rdvl x1, #1
+ lsr x1, x1, #3
+ sub x1, x1, #2
+ bl memclr
mov x0, #0
ldr x1, =ffrref
@@ -302,33 +180,9 @@ function setup_ffr
wrffr p0.b
ret x4
-endfunction
-
-// Fill x1 bytes starting at x0 with 0xae (for canary purposes)
-// Clobbers x1, x2.
-function memfill_ae
- mov w2, #0xae
- b memfill
-endfunction
-
-// Fill x1 bytes starting at x0 with 0.
-// Clobbers x1, x2.
-function memclr
- mov w2, #0
-endfunction
- // fall through to memfill
-
-// Trivial memory fill: fill x1 bytes starting at address x0 with byte w2
-// Clobbers x1
-function memfill
- cmp x1, #0
- b.eq 1f
-
-0: strb w2, [x0], #1
- subs x1, x1, #1
- b.ne 0b
-
-1: ret
+#else
+ ret
+#endif
endfunction
// Trivial memory compare: compare x2 bytes starting at address x0 with
@@ -411,6 +265,7 @@ endfunction
// Beware -- corrupts P0.
// Clobbers x0-x5.
function check_ffr
+#ifndef SSVE
mov x3, x30
ldr x4, =scratch
@@ -431,10 +286,12 @@ function check_ffr
mov x2, x5
mov x30, x3
b memcmp
+#else
+ ret
+#endif
endfunction
-// Any SVE register modified here can cause corruption in the main
-// thread -- but *only* the registers modified here.
+// Modify live register state, the signal return will undo our changes
function irritator_handler
// Increment the irritation signal count (x23):
ldr x0, [x2, #ucontext_regs + 8 * 23]
@@ -442,14 +299,24 @@ function irritator_handler
str x0, [x2, #ucontext_regs + 8 * 23]
// Corrupt some random Z-regs
- adr x0, .text + (irritator_handler - .text) / 16 * 16
movi v0.8b, #1
movi v9.16b, #2
movi v31.8b, #3
// And P0
- rdffr p0.b
+ ptrue p0.d
+#ifndef SSVE
// And FFR
wrffr p15.b
+#endif
+
+ ret
+endfunction
+
+function tickle_handler
+ // Increment the signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
ret
endfunction
@@ -509,7 +376,37 @@ endfunction
// Main program entry point
.globl _start
function _start
-_start:
+ enable_gcs
+
+ mov x23, #0 // Irritation signal count
+
+ mov w0, #SIGINT
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGTERM
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGUSR1
+ adr x1, irritator_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ mov w0, #SIGUSR2
+ adr x1, tickle_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+#ifdef SSVE
+ puts "Streaming mode "
+ smstart_sm
+#endif
+
// Sanity-check and report the vector length
rdvl x19, #8
@@ -540,23 +437,9 @@ _start:
mov x0, x20
bl putdecn
- mov x23, #0 // Irritation signal count
-
- mov w0, #SIGINT
- adr x1, terminate_handler
- mov w2, #SA_SIGINFO
- bl setsignal
-
- mov w0, #SIGTERM
- adr x1, terminate_handler
- mov w2, #SA_SIGINFO
- bl setsignal
-
- mov w0, #SIGUSR1
- adr x1, irritator_handler
- mov w2, #SA_SIGINFO
- orr w2, w2, #SA_NODEFER
- bl setsignal
+#ifdef SSVE
+ smstart_sm // syscalls will have exited streaming mode
+#endif
mov x22, #0 // generation number, increments per iteration
.Ltest_loop:
@@ -590,6 +473,13 @@ _start:
// mov x8, #__NR_sched_yield // Encourage preemption
// svc #0
+#ifdef SSVE
+ mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=0,SM=1
+ and x1, x0, #3
+ cmp x1, #1
+ b.ne svcr_barf
+#endif
+
mov x21, #0
0: mov x0, x21
bl check_zreg
@@ -625,7 +515,11 @@ function barf
mov x11, x1 // actual data
mov x12, x2 // data size
- puts "Mistatch: PID="
+#ifdef SSVE
+ mrs x13, S3_3_C4_C2_2
+#endif
+
+ puts "Mismatch: PID="
mov x0, x20
bl putdec
puts ", iteration="
@@ -644,6 +538,12 @@ function barf
bl dumphex
puts "]\n"
+#ifdef SSVE
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
+#endif
+
mov x8, #__NR_getpid
svc #0
// fpsimd.c acitivty log dump hack
@@ -670,3 +570,15 @@ function vl_barf
mov x1, #1
svc #0
endfunction
+
+function svcr_barf
+ mov x10, x0
+
+ puts "Bad SVCR: "
+ mov x0, x10
+ bl putdecn
+
+ mov x8, #__NR_exit
+ mov x1, #1
+ svc #0
+endfunction
diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c
new file mode 100644
index 000000000000..8dd932fdcdc4
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Limited.
+ * Original author: Mark Brown <broonie@kernel.org>
+ */
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <asm/sigcontext.h>
+#include <asm/hwcap.h>
+
+#include "kselftest.h"
+#include "rdvl.h"
+
+#define ARCH_MIN_VL SVE_VL_MIN
+
+struct vec_data {
+ const char *name;
+ unsigned long hwcap_type;
+ unsigned long hwcap;
+ const char *rdvl_binary;
+ int (*rdvl)(void);
+
+ int prctl_get;
+ int prctl_set;
+ const char *default_vl_file;
+
+ int default_vl;
+ int min_vl;
+ int max_vl;
+};
+
+#define VEC_SVE 0
+#define VEC_SME 1
+
+static struct vec_data vec_data[] = {
+ [VEC_SVE] = {
+ .name = "SVE",
+ .hwcap_type = AT_HWCAP,
+ .hwcap = HWCAP_SVE,
+ .rdvl = rdvl_sve,
+ .rdvl_binary = "./rdvl-sve",
+ .prctl_get = PR_SVE_GET_VL,
+ .prctl_set = PR_SVE_SET_VL,
+ .default_vl_file = "/proc/sys/abi/sve_default_vector_length",
+ },
+ [VEC_SME] = {
+ .name = "SME",
+ .hwcap_type = AT_HWCAP2,
+ .hwcap = HWCAP2_SME,
+ .rdvl = rdvl_sme,
+ .rdvl_binary = "./rdvl-sme",
+ .prctl_get = PR_SME_GET_VL,
+ .prctl_set = PR_SME_SET_VL,
+ .default_vl_file = "/proc/sys/abi/sme_default_vector_length",
+ },
+};
+
+static bool vec_type_supported(struct vec_data *data)
+{
+ return getauxval(data->hwcap_type) & data->hwcap;
+}
+
+static int stdio_read_integer(FILE *f, const char *what, int *val)
+{
+ int n = 0;
+ int ret;
+
+ ret = fscanf(f, "%d%*1[\n]%n", val, &n);
+ if (ret < 1 || n < 1) {
+ ksft_print_msg("failed to parse integer from %s\n", what);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Start a new process and return the vector length it sees */
+static int get_child_rdvl(struct vec_data *data)
+{
+ FILE *out;
+ int pipefd[2];
+ pid_t pid, child;
+ int read_vl, ret;
+
+ ret = pipe(pipefd);
+ if (ret == -1) {
+ ksft_print_msg("pipe() failed: %d (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ fflush(stdout);
+
+ child = fork();
+ if (child == -1) {
+ ksft_print_msg("fork() failed: %d (%s)\n",
+ errno, strerror(errno));
+ close(pipefd[0]);
+ close(pipefd[1]);
+ return -1;
+ }
+
+ /* Child: put vector length on the pipe */
+ if (child == 0) {
+ /*
+ * Replace stdout with the pipe, errors to stderr from
+ * here as kselftest prints to stdout.
+ */
+ ret = dup2(pipefd[1], 1);
+ if (ret == -1) {
+ fprintf(stderr, "dup2() %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /* exec() a new binary which puts the VL on stdout */
+ ret = execl(data->rdvl_binary, data->rdvl_binary, NULL);
+ fprintf(stderr, "execl(%s) failed: %d (%s)\n",
+ data->rdvl_binary, errno, strerror(errno));
+
+ exit(EXIT_FAILURE);
+ }
+
+ close(pipefd[1]);
+
+ /* Parent; wait for the exit status from the child & verify it */
+ do {
+ pid = wait(&ret);
+ if (pid == -1) {
+ ksft_print_msg("wait() failed: %d (%s)\n",
+ errno, strerror(errno));
+ close(pipefd[0]);
+ return -1;
+ }
+ } while (pid != child);
+
+ assert(pid == child);
+
+ if (!WIFEXITED(ret)) {
+ ksft_print_msg("child exited abnormally\n");
+ close(pipefd[0]);
+ return -1;
+ }
+
+ if (WEXITSTATUS(ret) != 0) {
+ ksft_print_msg("child returned error %d\n",
+ WEXITSTATUS(ret));
+ close(pipefd[0]);
+ return -1;
+ }
+
+ out = fdopen(pipefd[0], "r");
+ if (!out) {
+ ksft_print_msg("failed to open child stdout\n");
+ close(pipefd[0]);
+ return -1;
+ }
+
+ ret = stdio_read_integer(out, "child", &read_vl);
+ fclose(out);
+ if (ret != 0)
+ return ret;
+
+ return read_vl;
+}
+
+static int file_read_integer(const char *name, int *val)
+{
+ FILE *f;
+ int ret;
+
+ f = fopen(name, "r");
+ if (!f) {
+ ksft_test_result_fail("Unable to open %s: %d (%s)\n",
+ name, errno,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = stdio_read_integer(f, name, val);
+ fclose(f);
+
+ return ret;
+}
+
+static int file_write_integer(const char *name, int val)
+{
+ FILE *f;
+
+ f = fopen(name, "w");
+ if (!f) {
+ ksft_test_result_fail("Unable to open %s: %d (%s)\n",
+ name, errno,
+ strerror(errno));
+ return -1;
+ }
+
+ fprintf(f, "%d", val);
+ fclose(f);
+
+ return 0;
+}
+
+/*
+ * Verify that we can read the default VL via proc, checking that it
+ * is set in a freshly spawned child.
+ */
+static void proc_read_default(struct vec_data *data)
+{
+ int default_vl, child_vl, ret;
+
+ ret = file_read_integer(data->default_vl_file, &default_vl);
+ if (ret != 0)
+ return;
+
+ /* Is this the actual default seen by new processes? */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != default_vl) {
+ ksft_test_result_fail("%s is %d but child VL is %d\n",
+ data->default_vl_file,
+ default_vl, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s default vector length %d\n", data->name,
+ default_vl);
+ data->default_vl = default_vl;
+}
+
+/* Verify that we can write a minimum value and have it take effect */
+static void proc_write_min(struct vec_data *data)
+{
+ int ret, new_default, child_vl;
+
+ if (geteuid() != 0) {
+ ksft_test_result_skip("Need to be root to write to /proc\n");
+ return;
+ }
+
+ ret = file_write_integer(data->default_vl_file, ARCH_MIN_VL);
+ if (ret != 0)
+ return;
+
+ /* What was the new value? */
+ ret = file_read_integer(data->default_vl_file, &new_default);
+ if (ret != 0)
+ return;
+
+ /* Did it take effect in a new process? */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != new_default) {
+ ksft_test_result_fail("%s is %d but child VL is %d\n",
+ data->default_vl_file,
+ new_default, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s minimum vector length %d\n", data->name,
+ new_default);
+ data->min_vl = new_default;
+
+ file_write_integer(data->default_vl_file, data->default_vl);
+}
+
+/* Verify that we can write a maximum value and have it take effect */
+static void proc_write_max(struct vec_data *data)
+{
+ int ret, new_default, child_vl;
+
+ if (geteuid() != 0) {
+ ksft_test_result_skip("Need to be root to write to /proc\n");
+ return;
+ }
+
+ /* -1 is accepted by the /proc interface as the maximum VL */
+ ret = file_write_integer(data->default_vl_file, -1);
+ if (ret != 0)
+ return;
+
+ /* What was the new value? */
+ ret = file_read_integer(data->default_vl_file, &new_default);
+ if (ret != 0)
+ return;
+
+ /* Did it take effect in a new process? */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != new_default) {
+ ksft_test_result_fail("%s is %d but child VL is %d\n",
+ data->default_vl_file,
+ new_default, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s maximum vector length %d\n", data->name,
+ new_default);
+ data->max_vl = new_default;
+
+ file_write_integer(data->default_vl_file, data->default_vl);
+}
+
+/* Can we read back a VL from prctl? */
+static void prctl_get(struct vec_data *data)
+{
+ int ret;
+
+ ret = prctl(data->prctl_get);
+ if (ret == -1) {
+ ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
+ data->name, errno, strerror(errno));
+ return;
+ }
+
+ /* Mask out any flags */
+ ret &= PR_SVE_VL_LEN_MASK;
+
+ /* Is that what we can read back directly? */
+ if (ret == data->rdvl())
+ ksft_test_result_pass("%s current VL is %d\n",
+ data->name, ret);
+ else
+ ksft_test_result_fail("%s prctl() VL %d but RDVL is %d\n",
+ data->name, ret, data->rdvl());
+}
+
+/* Does the prctl let us set the VL we already have? */
+static void prctl_set_same(struct vec_data *data)
+{
+ int cur_vl = data->rdvl();
+ int ret;
+
+ ret = prctl(data->prctl_set, cur_vl);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed: %d (%s)\n",
+ data->name, errno, strerror(errno));
+ return;
+ }
+
+ ksft_test_result(cur_vl == data->rdvl(),
+ "%s set VL %d and have VL %d\n",
+ data->name, cur_vl, data->rdvl());
+}
+
+/* Can we set a new VL for this process? */
+static void prctl_set(struct vec_data *data)
+{
+ int ret;
+
+ if (data->min_vl == data->max_vl) {
+ ksft_test_result_skip("%s only one VL supported\n",
+ data->name);
+ return;
+ }
+
+ /* Try to set the minimum VL */
+ ret = prctl(data->prctl_set, data->min_vl);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->min_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ if ((ret & PR_SVE_VL_LEN_MASK) != data->min_vl) {
+ ksft_test_result_fail("%s prctl set %d but return value is %d\n",
+ data->name, data->min_vl, data->rdvl());
+ return;
+ }
+
+ if (data->rdvl() != data->min_vl) {
+ ksft_test_result_fail("%s set %d but RDVL is %d\n",
+ data->name, data->min_vl, data->rdvl());
+ return;
+ }
+
+ /* Try to set the maximum VL */
+ ret = prctl(data->prctl_set, data->max_vl);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->max_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ if ((ret & PR_SVE_VL_LEN_MASK) != data->max_vl) {
+ ksft_test_result_fail("%s prctl() set %d but return value is %d\n",
+ data->name, data->max_vl, data->rdvl());
+ return;
+ }
+
+ /* The _INHERIT flag should not be present when we read the VL */
+ ret = prctl(data->prctl_get);
+ if (ret == -1) {
+ ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
+ data->name, errno, strerror(errno));
+ return;
+ }
+
+ if (ret & PR_SVE_VL_INHERIT) {
+ ksft_test_result_fail("%s prctl() reports _INHERIT\n",
+ data->name);
+ return;
+ }
+
+ ksft_test_result_pass("%s prctl() set min/max\n", data->name);
+}
+
+/* If we didn't request it a new VL shouldn't affect the child */
+static void prctl_set_no_child(struct vec_data *data)
+{
+ int ret, child_vl;
+
+ if (data->min_vl == data->max_vl) {
+ ksft_test_result_skip("%s only one VL supported\n",
+ data->name);
+ return;
+ }
+
+ ret = prctl(data->prctl_set, data->min_vl);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->min_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ /* Ensure the default VL is different */
+ ret = file_write_integer(data->default_vl_file, data->max_vl);
+ if (ret != 0)
+ return;
+
+ /* Check that the child has the default we just set */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != data->max_vl) {
+ ksft_test_result_fail("%s is %d but child VL is %d\n",
+ data->default_vl_file,
+ data->max_vl, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s vector length used default\n", data->name);
+
+ file_write_integer(data->default_vl_file, data->default_vl);
+}
+
+/* If we didn't request it a new VL shouldn't affect the child */
+static void prctl_set_for_child(struct vec_data *data)
+{
+ int ret, child_vl;
+
+ if (data->min_vl == data->max_vl) {
+ ksft_test_result_skip("%s only one VL supported\n",
+ data->name);
+ return;
+ }
+
+ ret = prctl(data->prctl_set, data->min_vl | PR_SVE_VL_INHERIT);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->min_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ /* The _INHERIT flag should be present when we read the VL */
+ ret = prctl(data->prctl_get);
+ if (ret == -1) {
+ ksft_test_result_fail("%s prctl() read failed: %d (%s)\n",
+ data->name, errno, strerror(errno));
+ return;
+ }
+ if (!(ret & PR_SVE_VL_INHERIT)) {
+ ksft_test_result_fail("%s prctl() does not report _INHERIT\n",
+ data->name);
+ return;
+ }
+
+ /* Ensure the default VL is different */
+ ret = file_write_integer(data->default_vl_file, data->max_vl);
+ if (ret != 0)
+ return;
+
+ /* Check that the child inherited our VL */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != data->min_vl) {
+ ksft_test_result_fail("%s is %d but child VL is %d\n",
+ data->default_vl_file,
+ data->min_vl, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s vector length was inherited\n", data->name);
+
+ file_write_integer(data->default_vl_file, data->default_vl);
+}
+
+/* _ONEXEC takes effect only in the child process */
+static void prctl_set_onexec(struct vec_data *data)
+{
+ int ret, child_vl;
+
+ if (data->min_vl == data->max_vl) {
+ ksft_test_result_skip("%s only one VL supported\n",
+ data->name);
+ return;
+ }
+
+ /* Set a known value for the default and our current VL */
+ ret = file_write_integer(data->default_vl_file, data->max_vl);
+ if (ret != 0)
+ return;
+
+ ret = prctl(data->prctl_set, data->max_vl);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->min_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ /* Set a different value for the child to have on exec */
+ ret = prctl(data->prctl_set, data->min_vl | PR_SVE_SET_VL_ONEXEC);
+ if (ret < 0) {
+ ksft_test_result_fail("%s prctl set failed for %d: %d (%s)\n",
+ data->name, data->min_vl,
+ errno, strerror(errno));
+ return;
+ }
+
+ /* Our current VL should stay the same */
+ if (data->rdvl() != data->max_vl) {
+ ksft_test_result_fail("%s VL changed by _ONEXEC prctl()\n",
+ data->name);
+ return;
+ }
+
+ /* Check that the child inherited our VL */
+ child_vl = get_child_rdvl(data);
+ if (child_vl != data->min_vl) {
+ ksft_test_result_fail("Set %d _ONEXEC but child VL is %d\n",
+ data->min_vl, child_vl);
+ return;
+ }
+
+ ksft_test_result_pass("%s vector length set on exec\n", data->name);
+
+ file_write_integer(data->default_vl_file, data->default_vl);
+}
+
+/* For each VQ verify that setting via prctl() does the right thing */
+static void prctl_set_all_vqs(struct vec_data *data)
+{
+ int ret, vq, vl, new_vl, i;
+ int orig_vls[ARRAY_SIZE(vec_data)];
+ int errors = 0;
+
+ if (!data->min_vl || !data->max_vl) {
+ ksft_test_result_skip("%s Failed to enumerate VLs, not testing VL setting\n",
+ data->name);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
+ if (!vec_type_supported(&vec_data[i]))
+ continue;
+ orig_vls[i] = vec_data[i].rdvl();
+ }
+
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
+ vl = sve_vl_from_vq(vq);
+
+ /* Attempt to set the VL */
+ ret = prctl(data->prctl_set, vl);
+ if (ret < 0) {
+ errors++;
+ ksft_print_msg("%s prctl set failed for %d: %d (%s)\n",
+ data->name, vl,
+ errno, strerror(errno));
+ continue;
+ }
+
+ new_vl = ret & PR_SVE_VL_LEN_MASK;
+
+ /* Check that we actually have the reported new VL */
+ if (data->rdvl() != new_vl) {
+ ksft_print_msg("Set %s VL %d but RDVL reports %d\n",
+ data->name, new_vl, data->rdvl());
+ errors++;
+ }
+
+ /* Did any other VLs change? */
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
+ if (&vec_data[i] == data)
+ continue;
+
+ if (!vec_type_supported(&vec_data[i]))
+ continue;
+
+ if (vec_data[i].rdvl() != orig_vls[i]) {
+ ksft_print_msg("%s VL changed from %d to %d\n",
+ vec_data[i].name, orig_vls[i],
+ vec_data[i].rdvl());
+ errors++;
+ }
+ }
+
+ /* Was that the VL we asked for? */
+ if (new_vl == vl)
+ continue;
+
+ /* Should round up to the minimum VL if below it */
+ if (vl < data->min_vl) {
+ if (new_vl != data->min_vl) {
+ ksft_print_msg("%s VL %d returned %d not minimum %d\n",
+ data->name, vl, new_vl,
+ data->min_vl);
+ errors++;
+ }
+
+ continue;
+ }
+
+ /* Should round down to maximum VL if above it */
+ if (vl > data->max_vl) {
+ if (new_vl != data->max_vl) {
+ ksft_print_msg("%s VL %d returned %d not maximum %d\n",
+ data->name, vl, new_vl,
+ data->max_vl);
+ errors++;
+ }
+
+ continue;
+ }
+
+ /* Otherwise we should've rounded down */
+ if (!(new_vl < vl)) {
+ ksft_print_msg("%s VL %d returned %d, did not round down\n",
+ data->name, vl, new_vl);
+ errors++;
+
+ continue;
+ }
+ }
+
+ ksft_test_result(errors == 0, "%s prctl() set all VLs, %d errors\n",
+ data->name, errors);
+}
+
+typedef void (*test_type)(struct vec_data *);
+
+static const test_type tests[] = {
+ /*
+ * The default/min/max tests must be first and in this order
+ * to provide data for other tests.
+ */
+ proc_read_default,
+ proc_write_min,
+ proc_write_max,
+
+ prctl_get,
+ prctl_set_same,
+ prctl_set,
+ prctl_set_no_child,
+ prctl_set_for_child,
+ prctl_set_onexec,
+ prctl_set_all_vqs,
+};
+
+static inline void smstart(void)
+{
+ asm volatile("msr S0_3_C4_C7_3, xzr");
+}
+
+static inline void smstart_sm(void)
+{
+ asm volatile("msr S0_3_C4_C3_3, xzr");
+}
+
+static inline void smstop(void)
+{
+ asm volatile("msr S0_3_C4_C6_3, xzr");
+}
+
+/*
+ * Verify we can change the SVE vector length while SME is active and
+ * continue to use SME afterwards.
+ */
+static void change_sve_with_za(void)
+{
+ struct vec_data *sve_data = &vec_data[VEC_SVE];
+ bool pass = true;
+ int ret, i;
+
+ if (sve_data->min_vl == sve_data->max_vl) {
+ ksft_print_msg("Only one SVE VL supported, can't change\n");
+ ksft_test_result_skip("change_sve_while_sme\n");
+ return;
+ }
+
+ /* Ensure we will trigger a change when we set the maximum */
+ ret = prctl(sve_data->prctl_set, sve_data->min_vl);
+ if (ret != sve_data->min_vl) {
+ ksft_print_msg("Failed to set SVE VL %d: %d\n",
+ sve_data->min_vl, ret);
+ pass = false;
+ }
+
+ /* Enable SM and ZA */
+ smstart();
+
+ /* Trigger another VL change */
+ ret = prctl(sve_data->prctl_set, sve_data->max_vl);
+ if (ret != sve_data->max_vl) {
+ ksft_print_msg("Failed to set SVE VL %d: %d\n",
+ sve_data->max_vl, ret);
+ pass = false;
+ }
+
+ /*
+ * Spin for a bit with SM enabled to try to trigger another
+ * save/restore. We can't use syscalls without exiting
+ * streaming mode.
+ */
+ for (i = 0; i < 100000000; i++)
+ smstart_sm();
+
+ /*
+ * TODO: Verify that ZA was preserved over the VL change and
+ * spin.
+ */
+
+ /* Clean up after ourselves */
+ smstop();
+ ret = prctl(sve_data->prctl_set, sve_data->default_vl);
+ if (ret != sve_data->default_vl) {
+ ksft_print_msg("Failed to restore SVE VL %d: %d\n",
+ sve_data->default_vl, ret);
+ pass = false;
+ }
+
+ ksft_test_result(pass, "change_sve_with_za\n");
+}
+
+typedef void (*test_all_type)(void);
+
+static const struct {
+ const char *name;
+ test_all_type test;
+} all_types_tests[] = {
+ { "change_sve_with_za", change_sve_with_za },
+};
+
+int main(void)
+{
+ bool all_supported = true;
+ int i, j;
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests) * ARRAY_SIZE(vec_data) +
+ ARRAY_SIZE(all_types_tests));
+
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
+ struct vec_data *data = &vec_data[i];
+ unsigned long supported;
+
+ supported = vec_type_supported(data);
+ if (!supported)
+ all_supported = false;
+
+ for (j = 0; j < ARRAY_SIZE(tests); j++) {
+ if (supported)
+ tests[j](data);
+ else
+ ksft_test_result_skip("%s not supported\n",
+ data->name);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(all_types_tests); i++) {
+ if (all_supported)
+ all_types_tests[i].test();
+ else
+ ksft_test_result_skip("%s\n", all_types_tests[i].name);
+ }
+
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/arm64/fp/vlset.c b/tools/testing/selftests/arm64/fp/vlset.c
index 308d27a68226..76912a581a95 100644
--- a/tools/testing/selftests/arm64/fp/vlset.c
+++ b/tools/testing/selftests/arm64/fp/vlset.c
@@ -22,12 +22,15 @@ static int inherit = 0;
static int no_inherit = 0;
static int force = 0;
static unsigned long vl;
+static int set_ctl = PR_SVE_SET_VL;
+static int get_ctl = PR_SVE_GET_VL;
static const struct option options[] = {
{ "force", no_argument, NULL, 'f' },
{ "inherit", no_argument, NULL, 'i' },
{ "max", no_argument, NULL, 'M' },
{ "no-inherit", no_argument, &no_inherit, 1 },
+ { "sme", no_argument, NULL, 's' },
{ "help", no_argument, NULL, '?' },
{}
};
@@ -50,6 +53,9 @@ static int parse_options(int argc, char **argv)
case 'M': vl = SVE_VL_MAX; break;
case 'f': force = 1; break;
case 'i': inherit = 1; break;
+ case 's': set_ctl = PR_SME_SET_VL;
+ get_ctl = PR_SME_GET_VL;
+ break;
case 0: break;
default: goto error;
}
@@ -125,14 +131,14 @@ int main(int argc, char **argv)
if (inherit)
flags |= PR_SVE_VL_INHERIT;
- t = prctl(PR_SVE_SET_VL, vl | flags);
+ t = prctl(set_ctl, vl | flags);
if (t < 0) {
fprintf(stderr, "%s: PR_SVE_SET_VL: %s\n",
program_name, strerror(errno));
goto error;
}
- t = prctl(PR_SVE_GET_VL);
+ t = prctl(get_ctl);
if (t == -1) {
fprintf(stderr, "%s: PR_SVE_GET_VL: %s\n",
program_name, strerror(errno));
diff --git a/tools/testing/selftests/arm64/fp/za-fork-asm.S b/tools/testing/selftests/arm64/fp/za-fork-asm.S
new file mode 100644
index 000000000000..2fafadd491c3
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/za-fork-asm.S
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 ARM Limited.
+
+#include "sme-inst.h"
+
+.arch_extension sve
+
+#define MAGIC 42
+
+#define MAXVL 2048
+#define MAXVL_B (MAXVL / 8)
+
+.pushsection .text
+.data
+.align 4
+scratch:
+ .space MAXVL_B
+.popsection
+
+.globl fork_test
+fork_test:
+ smstart_za
+
+ // For simplicity just set one word in one vector, other tests
+ // cover general data corruption issues.
+ ldr x0, =scratch
+ mov x1, #MAGIC
+ str x1, [x0]
+ mov w12, wzr
+ _ldr_za 12, 0 // ZA.H[W12] loaded from [X0]
+
+ // Tail call into the C portion that does the fork & verify
+ b fork_test_c
+
+.globl verify_fork
+verify_fork:
+ // SVCR should have ZA=1, SM=0
+ mrs x0, S3_3_C4_C2_2
+ and x1, x0, #3
+ cmp x1, #2
+ beq 1f
+ mov x0, xzr
+ b 100f
+1:
+
+ // ZA should still have the value we loaded
+ ldr x0, =scratch
+ mov w12, wzr
+ _str_za 12, 0 // ZA.H[W12] stored to [X0]
+ ldr x1, [x0]
+ cmp x1, #MAGIC
+ beq 2f
+ mov x0, xzr
+ b 100f
+
+2:
+ // All tests passed
+ mov x0, #1
+100:
+ ret
+
diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c
new file mode 100644
index 000000000000..587b94648222
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/za-fork.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 ARM Limited.
+ * Original author: Mark Brown <broonie@kernel.org>
+ */
+
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "kselftest.h"
+
+#define EXPECTED_TESTS 1
+
+int fork_test(void);
+int verify_fork(void);
+
+/*
+ * If we fork the value in the parent should be unchanged and the
+ * child should start with the same value. This is called from the
+ * fork_test() asm function.
+ */
+int fork_test_c(void)
+{
+ pid_t newpid, waiting;
+ int child_status, parent_result;
+
+ newpid = fork();
+ if (newpid == 0) {
+ /* In child */
+ if (!verify_fork()) {
+ ksft_print_msg("ZA state invalid in child\n");
+ exit(0);
+ } else {
+ exit(1);
+ }
+ }
+ if (newpid < 0) {
+ ksft_print_msg("fork() failed: %d\n", newpid);
+
+ return 0;
+ }
+
+ parent_result = verify_fork();
+ if (!parent_result)
+ ksft_print_msg("ZA state invalid in parent\n");
+
+ for (;;) {
+ waiting = waitpid(newpid, &child_status, 0);
+
+ if (waiting < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_print_msg("waitpid() failed: %d\n", errno);
+ return 0;
+ }
+ if (waiting != newpid) {
+ ksft_print_msg("waitpid() returned wrong PID\n");
+ return 0;
+ }
+
+ if (!WIFEXITED(child_status)) {
+ ksft_print_msg("child did not exit\n");
+ return 0;
+ }
+
+ return WEXITSTATUS(child_status) && parent_result;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int ret, i;
+
+ ksft_print_header();
+ ksft_set_plan(EXPECTED_TESTS);
+
+ ksft_print_msg("PID: %d\n", getpid());
+
+ /*
+ * This test is run with nolibc which doesn't support hwcap and
+ * it's probably disproportionate to implement so instead check
+ * for the default vector length configuration in /proc.
+ */
+ ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
+ if (ret >= 0) {
+ ksft_test_result(fork_test(), "fork_test\n");
+
+ } else {
+ ksft_print_msg("SME not supported\n");
+ for (i = 0; i < EXPECTED_TESTS; i++) {
+ ksft_test_result_skip("fork_test\n");
+ }
+ }
+
+ ksft_finished();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/fp/za-ptrace.c b/tools/testing/selftests/arm64/fp/za-ptrace.c
new file mode 100644
index 000000000000..787eed22d059
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/za-ptrace.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Limited.
+ */
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <asm/sigcontext.h>
+#include <asm/ptrace.h>
+
+#include "kselftest.h"
+
+/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
+#ifndef NT_ARM_ZA
+#define NT_ARM_ZA 0x40c
+#endif
+
+/*
+ * The architecture defines the maximum VQ as 16 but for extensibility
+ * the kernel specifies the SVE_VQ_MAX as 512 resulting in us running
+ * a *lot* more tests than are useful if we use it. Until the
+ * architecture is extended let's limit our coverage to what is
+ * currently allowed, plus one extra to ensure we cover constraining
+ * the VL as expected.
+ */
+#define TEST_VQ_MAX 17
+
+#define EXPECTED_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 3)
+
+static void fill_buf(char *buf, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ buf[i] = random();
+}
+
+static int do_child(void)
+{
+ if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
+ ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)",
+ strerror(errno), errno);
+
+ if (raise(SIGSTOP))
+ ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ return EXIT_SUCCESS;
+}
+
+static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size)
+{
+ struct user_za_header *za;
+ void *p;
+ size_t sz = sizeof(*za);
+ struct iovec iov;
+
+ while (1) {
+ if (*size < sz) {
+ p = realloc(*buf, sz);
+ if (!p) {
+ errno = ENOMEM;
+ goto error;
+ }
+
+ *buf = p;
+ *size = sz;
+ }
+
+ iov.iov_base = *buf;
+ iov.iov_len = sz;
+ if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov))
+ goto error;
+
+ za = *buf;
+ if (za->size <= sz)
+ break;
+
+ sz = za->size;
+ }
+
+ return za;
+
+error:
+ return NULL;
+}
+
+static int set_za(pid_t pid, const struct user_za_header *za)
+{
+ struct iovec iov;
+
+ iov.iov_base = (void *)za;
+ iov.iov_len = za->size;
+ return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov);
+}
+
+/* Validate attempting to set the specfied VL via ptrace */
+static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported)
+{
+ struct user_za_header za;
+ struct user_za_header *new_za = NULL;
+ size_t new_za_size = 0;
+ int ret, prctl_vl;
+
+ *supported = false;
+
+ /* Check if the VL is supported in this process */
+ prctl_vl = prctl(PR_SME_SET_VL, vl);
+ if (prctl_vl == -1)
+ ksft_exit_fail_msg("prctl(PR_SME_SET_VL) failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ /* If the VL is not supported then a supported VL will be returned */
+ *supported = (prctl_vl == vl);
+
+ /* Set the VL by doing a set with no register payload */
+ memset(&za, 0, sizeof(za));
+ za.size = sizeof(za);
+ za.vl = vl;
+ ret = set_za(child, &za);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set VL %u\n", vl);
+ return;
+ }
+
+ /*
+ * Read back the new register state and verify that we have the
+ * same VL that we got from prctl() on ourselves.
+ */
+ if (!get_za(child, (void **)&new_za, &new_za_size)) {
+ ksft_test_result_fail("Failed to read VL %u\n", vl);
+ return;
+ }
+
+ ksft_test_result(new_za->vl = prctl_vl, "Set VL %u\n", vl);
+
+ free(new_za);
+}
+
+/* Validate attempting to set no ZA data and read it back */
+static void ptrace_set_no_data(pid_t child, unsigned int vl)
+{
+ void *read_buf = NULL;
+ struct user_za_header write_za;
+ struct user_za_header *read_za;
+ size_t read_za_size = 0;
+ int ret;
+
+ /* Set up some data and write it out */
+ memset(&write_za, 0, sizeof(write_za));
+ write_za.size = ZA_PT_ZA_OFFSET;
+ write_za.vl = vl;
+
+ ret = set_za(child, &write_za);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set VL %u no data\n", vl);
+ return;
+ }
+
+ /* Read the data back */
+ if (!get_za(child, (void **)&read_buf, &read_za_size)) {
+ ksft_test_result_fail("Failed to read VL %u no data\n", vl);
+ return;
+ }
+ read_za = read_buf;
+
+ /* We might read more data if there's extensions we don't know */
+ if (read_za->size < write_za.size) {
+ ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n",
+ vl, write_za.size, read_za->size);
+ goto out_read;
+ }
+
+ ksft_test_result(read_za->size == write_za.size,
+ "Disabled ZA for VL %u\n", vl);
+
+out_read:
+ free(read_buf);
+}
+
+/* Validate attempting to set data and read it back */
+static void ptrace_set_get_data(pid_t child, unsigned int vl)
+{
+ void *write_buf;
+ void *read_buf = NULL;
+ struct user_za_header *write_za;
+ struct user_za_header *read_za;
+ size_t read_za_size = 0;
+ unsigned int vq = sve_vq_from_vl(vl);
+ int ret;
+ size_t data_size;
+
+ data_size = ZA_PT_SIZE(vq);
+ write_buf = malloc(data_size);
+ if (!write_buf) {
+ ksft_test_result_fail("Error allocating %ld byte buffer for VL %u\n",
+ data_size, vl);
+ return;
+ }
+ write_za = write_buf;
+
+ /* Set up some data and write it out */
+ memset(write_za, 0, data_size);
+ write_za->size = data_size;
+ write_za->vl = vl;
+
+ fill_buf(write_buf + ZA_PT_ZA_OFFSET, ZA_PT_ZA_SIZE(vq));
+
+ ret = set_za(child, write_za);
+ if (ret != 0) {
+ ksft_test_result_fail("Failed to set VL %u data\n", vl);
+ goto out;
+ }
+
+ /* Read the data back */
+ if (!get_za(child, (void **)&read_buf, &read_za_size)) {
+ ksft_test_result_fail("Failed to read VL %u data\n", vl);
+ goto out;
+ }
+ read_za = read_buf;
+
+ /* We might read more data if there's extensions we don't know */
+ if (read_za->size < write_za->size) {
+ ksft_test_result_fail("VL %u wrote %d bytes, only read %d\n",
+ vl, write_za->size, read_za->size);
+ goto out_read;
+ }
+
+ ksft_test_result(memcmp(write_buf + ZA_PT_ZA_OFFSET,
+ read_buf + ZA_PT_ZA_OFFSET,
+ ZA_PT_ZA_SIZE(vq)) == 0,
+ "Data match for VL %u\n", vl);
+
+out_read:
+ free(read_buf);
+out:
+ free(write_buf);
+}
+
+static int do_parent(pid_t child)
+{
+ int ret = EXIT_FAILURE;
+ pid_t pid;
+ int status;
+ siginfo_t si;
+ unsigned int vq, vl;
+ bool vl_supported;
+
+ /* Attach to the child */
+ while (1) {
+ int sig;
+
+ pid = wait(&status);
+ if (pid == -1) {
+ perror("wait");
+ goto error;
+ }
+
+ /*
+ * This should never happen but it's hard to flag in
+ * the framework.
+ */
+ if (pid != child)
+ continue;
+
+ if (WIFEXITED(status) || WIFSIGNALED(status))
+ ksft_exit_fail_msg("Child died unexpectedly\n");
+
+ if (!WIFSTOPPED(status))
+ goto error;
+
+ sig = WSTOPSIG(status);
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ if (errno == EINVAL) {
+ sig = 0; /* bust group-stop */
+ goto cont;
+ }
+
+ ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ if (sig == SIGSTOP && si.si_code == SI_TKILL &&
+ si.si_pid == pid)
+ break;
+
+ cont:
+ if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ ksft_test_result_fail("PTRACE_CONT: %s\n",
+ strerror(errno));
+ goto error;
+ }
+ }
+
+ ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
+
+ /* Step through every possible VQ */
+ for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
+ vl = sve_vl_from_vq(vq);
+
+ /* First, try to set this vector length */
+ ptrace_set_get_vl(child, vl, &vl_supported);
+
+ /* If the VL is supported validate data set/get */
+ if (vl_supported) {
+ ptrace_set_no_data(child, vl);
+ ptrace_set_get_data(child, vl);
+ } else {
+ ksft_test_result_skip("Disabled ZA for VL %u\n", vl);
+ ksft_test_result_skip("Get and set data for VL %u\n",
+ vl);
+ }
+ }
+
+ ret = EXIT_SUCCESS;
+
+error:
+ kill(child, SIGKILL);
+
+disappeared:
+ return ret;
+}
+
+int main(void)
+{
+ int ret = EXIT_SUCCESS;
+ pid_t child;
+
+ srandom(getpid());
+
+ ksft_print_header();
+
+ if (!(getauxval(AT_HWCAP2) & HWCAP2_SME)) {
+ ksft_set_plan(1);
+ ksft_exit_skip("SME not available\n");
+ }
+
+ ksft_set_plan(EXPECTED_TESTS);
+
+ child = fork();
+ if (!child)
+ return do_child();
+
+ if (do_parent(child))
+ ret = EXIT_FAILURE;
+
+ ksft_print_cnts();
+
+ return ret;
+}
diff --git a/tools/testing/selftests/arm64/fp/za-stress b/tools/testing/selftests/arm64/fp/za-stress
new file mode 100644
index 000000000000..5ac386b55b95
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/za-stress
@@ -0,0 +1,59 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2015-2019 ARM Limited.
+# Original author: Dave Martin <Dave.Martin@arm.com>
+
+set -ue
+
+NR_CPUS=`nproc`
+
+pids=
+logs=
+
+cleanup () {
+ trap - INT TERM CHLD
+ set +e
+
+ if [ -n "$pids" ]; then
+ kill $pids
+ wait $pids
+ pids=
+ fi
+
+ if [ -n "$logs" ]; then
+ cat $logs
+ rm $logs
+ logs=
+ fi
+}
+
+interrupt () {
+ cleanup
+ exit 0
+}
+
+child_died () {
+ cleanup
+ exit 1
+}
+
+trap interrupt INT TERM EXIT
+
+for x in `seq 0 $((NR_CPUS * 4))`; do
+ log=`mktemp`
+ logs=$logs\ $log
+ ./za-test >$log &
+ pids=$pids\ $!
+done
+
+# Wait for all child processes to be created:
+sleep 10
+
+while :; do
+ kill -USR1 $pids
+done &
+pids=$pids\ $!
+
+wait
+
+exit 1
diff --git a/tools/testing/selftests/arm64/fp/za-test.S b/tools/testing/selftests/arm64/fp/za-test.S
new file mode 100644
index 000000000000..9c33e13e9dc4
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/za-test.S
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021 ARM Limited.
+// Original author: Mark Brown <broonie@kernel.org>
+//
+// Scalable Matrix Extension ZA context switch test
+// Repeatedly writes unique test patterns into each ZA tile
+// and reads them back to verify integrity.
+//
+// for x in `seq 1 NR_CPUS`; do sve-test & pids=$pids\ $! ; done
+// (leave it running for as long as you want...)
+// kill $pids
+
+#include <asm/unistd.h>
+#include "assembler.h"
+#include "asm-offsets.h"
+#include "sme-inst.h"
+
+.arch_extension sve
+
+#define MAXVL 2048
+#define MAXVL_B (MAXVL / 8)
+
+// Declare some storage space to shadow ZA register contents and a
+// scratch buffer for a vector.
+.pushsection .text
+.data
+.align 4
+zaref:
+ .space MAXVL_B * MAXVL_B
+scratch:
+ .space MAXVL_B
+.popsection
+
+// Trivial memory copy: copy x2 bytes, starting at address x1, to address x0.
+// Clobbers x0-x3
+function memcpy
+ cmp x2, #0
+ b.eq 1f
+0: ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne 0b
+1: ret
+endfunction
+
+// Generate a test pattern for storage in ZA
+// x0: pid
+// x1: row in ZA
+// x2: generation
+
+// These values are used to constuct a 32-bit pattern that is repeated in the
+// scratch buffer as many times as will fit:
+// bits 31:28 generation number (increments once per test_loop)
+// bits 27:16 pid
+// bits 15: 8 row number
+// bits 7: 0 32-bit lane index
+
+function pattern
+ mov w3, wzr
+ bfi w3, w0, #16, #12 // PID
+ bfi w3, w1, #8, #8 // Row
+ bfi w3, w2, #28, #4 // Generation
+
+ ldr x0, =scratch
+ mov w1, #MAXVL_B / 4
+
+0: str w3, [x0], #4
+ add w3, w3, #1 // Lane
+ subs w1, w1, #1
+ b.ne 0b
+
+ ret
+endfunction
+
+// Get the address of shadow data for ZA horizontal vector xn
+.macro _adrza xd, xn, nrtmp
+ ldr \xd, =zaref
+ rdsvl \nrtmp, 1
+ madd \xd, x\nrtmp, \xn, \xd
+.endm
+
+// Set up test pattern in a ZA horizontal vector
+// x0: pid
+// x1: row number
+// x2: generation
+function setup_za
+ mov x4, x30
+ mov x12, x1 // Use x12 for vector select
+
+ bl pattern // Get pattern in scratch buffer
+ _adrza x0, x12, 2 // Shadow buffer pointer to x0 and x5
+ mov x5, x0
+ ldr x1, =scratch
+ bl memcpy // length set up in x2 by _adrza
+
+ _ldr_za 12, 5 // load vector w12 from pointer x5
+
+ ret x4
+endfunction
+
+// Trivial memory compare: compare x2 bytes starting at address x0 with
+// bytes starting at address x1.
+// Returns only if all bytes match; otherwise, the program is aborted.
+// Clobbers x0-x5.
+function memcmp
+ cbz x2, 2f
+
+ stp x0, x1, [sp, #-0x20]!
+ str x2, [sp, #0x10]
+
+ mov x5, #0
+0: ldrb w3, [x0, x5]
+ ldrb w4, [x1, x5]
+ add x5, x5, #1
+ cmp w3, w4
+ b.ne 1f
+ subs x2, x2, #1
+ b.ne 0b
+
+1: ldr x2, [sp, #0x10]
+ ldp x0, x1, [sp], #0x20
+ b.ne barf
+
+2: ret
+endfunction
+
+// Verify that a ZA vector matches its shadow in memory, else abort
+// x0: row number
+// Clobbers x0-x7 and x12.
+function check_za
+ mov x3, x30
+
+ mov x12, x0
+ _adrza x5, x0, 6 // pointer to expected value in x5
+ mov x4, x0
+ ldr x7, =scratch // x7 is scratch
+
+ mov x0, x7 // Poison scratch
+ mov x1, x6
+ bl memfill_ae
+
+ _str_za 12, 7 // save vector w12 to pointer x7
+
+ mov x0, x5
+ mov x1, x7
+ mov x2, x6
+ mov x30, x3
+ b memcmp
+endfunction
+
+// Modify the live SME register state, signal return will undo our changes
+function irritator_handler
+ // Increment the irritation signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
+
+ // This will reset ZA to all bits 0
+ smstop
+ smstart_za
+
+ ret
+endfunction
+
+function tickle_handler
+ // Increment the signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
+
+ ret
+endfunction
+
+function terminate_handler
+ mov w21, w0
+ mov x20, x2
+
+ puts "Terminated by signal "
+ mov w0, w21
+ bl putdec
+ puts ", no error, iterations="
+ ldr x0, [x20, #ucontext_regs + 8 * 22]
+ bl putdec
+ puts ", signals="
+ ldr x0, [x20, #ucontext_regs + 8 * 23]
+ bl putdecn
+
+ mov x0, #0
+ mov x8, #__NR_exit
+ svc #0
+endfunction
+
+// w0: signal number
+// x1: sa_action
+// w2: sa_flags
+// Clobbers x0-x6,x8
+function setsignal
+ str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
+
+ mov w4, w0
+ mov x5, x1
+ mov w6, w2
+
+ add x0, sp, #16
+ mov x1, #sa_sz
+ bl memclr
+
+ mov w0, w4
+ add x1, sp, #16
+ str w6, [x1, #sa_flags]
+ str x5, [x1, #sa_handler]
+ mov x2, #0
+ mov x3, #sa_mask_sz
+ mov x8, #__NR_rt_sigaction
+ svc #0
+
+ cbz w0, 1f
+
+ puts "sigaction failure\n"
+ b .Labort
+
+1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
+ ret
+endfunction
+
+// Main program entry point
+.globl _start
+function _start
+ enable_gcs
+
+ mov x23, #0 // signal count
+
+ mov w0, #SIGINT
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGTERM
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGUSR1
+ adr x1, irritator_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ mov w0, #SIGUSR2
+ adr x1, tickle_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ puts "Streaming mode "
+ smstart_za
+
+ // Sanity-check and report the vector length
+
+ rdsvl 19, 8
+ cmp x19, #128
+ b.lo 1f
+ cmp x19, #2048
+ b.hi 1f
+ tst x19, #(8 - 1)
+ b.eq 2f
+
+1: puts "bad vector length: "
+ mov x0, x19
+ bl putdecn
+ b .Labort
+
+2: puts "vector length:\t"
+ mov x0, x19
+ bl putdec
+ puts " bits\n"
+
+ // Obtain our PID, to ensure test pattern uniqueness between processes
+ mov x8, #__NR_getpid
+ svc #0
+ mov x20, x0
+
+ puts "PID:\t"
+ mov x0, x20
+ bl putdecn
+
+ mov x22, #0 // generation number, increments per iteration
+.Ltest_loop:
+ rdsvl 0, 8
+ cmp x0, x19
+ b.ne vl_barf
+
+ rdsvl 21, 1 // Set up ZA & shadow with test pattern
+0: mov x0, x20
+ sub x1, x21, #1
+ mov x2, x22
+ bl setup_za
+ subs x21, x21, #1
+ b.ne 0b
+
+ mov x8, #__NR_sched_yield // encourage preemption
+1:
+ svc #0
+
+ mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0
+ and x1, x0, #3
+ cmp x1, #2
+ b.ne svcr_barf
+
+ rdsvl 21, 1 // Verify that the data made it through
+ rdsvl 24, 1 // Verify that the data made it through
+0: sub x0, x24, x21
+ bl check_za
+ subs x21, x21, #1
+ bne 0b
+
+ add x22, x22, #1 // Everything still working
+ b .Ltest_loop
+
+.Labort:
+ mov x0, #0
+ mov x1, #SIGABRT
+ mov x8, #__NR_kill
+ svc #0
+endfunction
+
+function barf
+// fpsimd.c acitivty log dump hack
+// ldr w0, =0xdeadc0de
+// mov w8, #__NR_exit
+// svc #0
+// end hack
+
+ mrs x13, S3_3_C4_C2_2
+
+ smstop
+ mov x10, x0 // expected data
+ mov x11, x1 // actual data
+ mov x12, x2 // data size
+
+ puts "Mismatch: PID="
+ mov x0, x20
+ bl putdec
+ puts ", iteration="
+ mov x0, x22
+ bl putdec
+ puts ", row="
+ mov x0, x21
+ bl putdecn
+ puts "\tExpected ["
+ mov x0, x10
+ mov x1, x12
+ bl dumphex
+ puts "]\n\tGot ["
+ mov x0, x11
+ mov x1, x12
+ bl dumphex
+ puts "]\n"
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
+
+ mov x8, #__NR_getpid
+ svc #0
+// fpsimd.c acitivty log dump hack
+// ldr w0, =0xdeadc0de
+// mov w8, #__NR_exit
+// svc #0
+// ^ end of hack
+ mov x1, #SIGABRT
+ mov x8, #__NR_kill
+ svc #0
+// mov x8, #__NR_exit
+// mov x1, #1
+// svc #0
+endfunction
+
+function vl_barf
+ mov x10, x0
+
+ puts "Bad active VL: "
+ mov x0, x10
+ bl putdecn
+
+ mov x8, #__NR_exit
+ mov x1, #1
+ svc #0
+endfunction
+
+function svcr_barf
+ mov x10, x0
+
+ puts "Bad SVCR: "
+ mov x0, x10
+ bl putdecn
+
+ mov x8, #__NR_exit
+ mov x1, #1
+ svc #0
+endfunction
diff --git a/tools/testing/selftests/arm64/fp/zt-ptrace.c b/tools/testing/selftests/arm64/fp/zt-ptrace.c
new file mode 100644
index 000000000000..f3fa49fd0fbd
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/zt-ptrace.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Limited.
+ */
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <asm/sigcontext.h>
+#include <asm/ptrace.h>
+
+#include "kselftest.h"
+
+/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
+#ifndef NT_ARM_ZA
+#define NT_ARM_ZA 0x40c
+#endif
+#ifndef NT_ARM_ZT
+#define NT_ARM_ZT 0x40d
+#endif
+
+#define EXPECTED_TESTS 3
+
+static int sme_vl;
+
+static void fill_buf(char *buf, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ buf[i] = random();
+}
+
+static int do_child(void)
+{
+ if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
+ ksft_exit_fail_msg("ptrace(PTRACE_TRACEME) failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ if (raise(SIGSTOP))
+ ksft_exit_fail_msg("raise(SIGSTOP) failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ return EXIT_SUCCESS;
+}
+
+static struct user_za_header *get_za(pid_t pid, void **buf, size_t *size)
+{
+ struct user_za_header *za;
+ void *p;
+ size_t sz = sizeof(*za);
+ struct iovec iov;
+
+ while (1) {
+ if (*size < sz) {
+ p = realloc(*buf, sz);
+ if (!p) {
+ errno = ENOMEM;
+ goto error;
+ }
+
+ *buf = p;
+ *size = sz;
+ }
+
+ iov.iov_base = *buf;
+ iov.iov_len = sz;
+ if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZA, &iov))
+ goto error;
+
+ za = *buf;
+ if (za->size <= sz)
+ break;
+
+ sz = za->size;
+ }
+
+ return za;
+
+error:
+ return NULL;
+}
+
+static int set_za(pid_t pid, const struct user_za_header *za)
+{
+ struct iovec iov;
+
+ iov.iov_base = (void *)za;
+ iov.iov_len = za->size;
+ return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZA, &iov);
+}
+
+static int get_zt(pid_t pid, char zt[ZT_SIG_REG_BYTES])
+{
+ struct iovec iov;
+
+ iov.iov_base = zt;
+ iov.iov_len = ZT_SIG_REG_BYTES;
+ return ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZT, &iov);
+}
+
+static int set_zt(pid_t pid, const char zt[ZT_SIG_REG_BYTES])
+{
+ struct iovec iov;
+
+ iov.iov_base = (void *)zt;
+ iov.iov_len = ZT_SIG_REG_BYTES;
+ return ptrace(PTRACE_SETREGSET, pid, NT_ARM_ZT, &iov);
+}
+
+/* Reading with ZA disabled returns all zeros */
+static void ptrace_za_disabled_read_zt(pid_t child)
+{
+ struct user_za_header za;
+ char zt[ZT_SIG_REG_BYTES];
+ int ret, i;
+ bool fail = false;
+
+ /* Disable PSTATE.ZA using the ZA interface */
+ memset(&za, 0, sizeof(za));
+ za.vl = sme_vl;
+ za.size = sizeof(za);
+
+ ret = set_za(child, &za);
+ if (ret != 0) {
+ ksft_print_msg("Failed to disable ZA\n");
+ fail = true;
+ }
+
+ /* Read back ZT */
+ ret = get_zt(child, zt);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read ZT\n");
+ fail = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(zt); i++) {
+ if (zt[i]) {
+ ksft_print_msg("zt[%d]: 0x%x != 0\n", i, zt[i]);
+ fail = true;
+ }
+ }
+
+ ksft_test_result(!fail, "ptrace_za_disabled_read_zt\n");
+}
+
+/* Writing then reading ZT should return the data written */
+static void ptrace_set_get_zt(pid_t child)
+{
+ char zt_in[ZT_SIG_REG_BYTES];
+ char zt_out[ZT_SIG_REG_BYTES];
+ int ret, i;
+ bool fail = false;
+
+ fill_buf(zt_in, sizeof(zt_in));
+
+ ret = set_zt(child, zt_in);
+ if (ret != 0) {
+ ksft_print_msg("Failed to set ZT\n");
+ fail = true;
+ }
+
+ ret = get_zt(child, zt_out);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read ZT\n");
+ fail = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(zt_in); i++) {
+ if (zt_in[i] != zt_out[i]) {
+ ksft_print_msg("zt[%d]: 0x%x != 0x%x\n", i,
+ zt_in[i], zt_out[i]);
+ fail = true;
+ }
+ }
+
+ ksft_test_result(!fail, "ptrace_set_get_zt\n");
+}
+
+/* Writing ZT should set PSTATE.ZA */
+static void ptrace_enable_za_via_zt(pid_t child)
+{
+ struct user_za_header za_in;
+ struct user_za_header *za_out;
+ char zt[ZT_SIG_REG_BYTES];
+ char *za_data;
+ size_t za_out_size;
+ int ret, i, vq;
+ bool fail = false;
+
+ /* Disable PSTATE.ZA using the ZA interface */
+ memset(&za_in, 0, sizeof(za_in));
+ za_in.vl = sme_vl;
+ za_in.size = sizeof(za_in);
+
+ ret = set_za(child, &za_in);
+ if (ret != 0) {
+ ksft_print_msg("Failed to disable ZA\n");
+ fail = true;
+ }
+
+ /* Write ZT */
+ fill_buf(zt, sizeof(zt));
+ ret = set_zt(child, zt);
+ if (ret != 0) {
+ ksft_print_msg("Failed to set ZT\n");
+ fail = true;
+ }
+
+ /* Read back ZA and check for register data */
+ za_out = NULL;
+ za_out_size = 0;
+ if (get_za(child, (void **)&za_out, &za_out_size)) {
+ /* Should have an unchanged VL */
+ if (za_out->vl != sme_vl) {
+ ksft_print_msg("VL changed from %d to %d\n",
+ sme_vl, za_out->vl);
+ fail = true;
+ }
+ vq = __sve_vq_from_vl(za_out->vl);
+ za_data = (char *)za_out + ZA_PT_ZA_OFFSET;
+
+ /* Should have register data */
+ if (za_out->size < ZA_PT_SIZE(vq)) {
+ ksft_print_msg("ZA data less than expected: %u < %u\n",
+ za_out->size, (unsigned int)ZA_PT_SIZE(vq));
+ fail = true;
+ vq = 0;
+ }
+
+ /* That register data should be non-zero */
+ for (i = 0; i < ZA_PT_ZA_SIZE(vq); i++) {
+ if (za_data[i]) {
+ ksft_print_msg("ZA byte %d is %x\n",
+ i, za_data[i]);
+ fail = true;
+ }
+ }
+ } else {
+ ksft_print_msg("Failed to read ZA\n");
+ fail = true;
+ }
+
+ ksft_test_result(!fail, "ptrace_enable_za_via_zt\n");
+}
+
+static int do_parent(pid_t child)
+{
+ int ret = EXIT_FAILURE;
+ pid_t pid;
+ int status;
+ siginfo_t si;
+
+ /* Attach to the child */
+ while (1) {
+ int sig;
+
+ pid = wait(&status);
+ if (pid == -1) {
+ perror("wait");
+ goto error;
+ }
+
+ /*
+ * This should never happen but it's hard to flag in
+ * the framework.
+ */
+ if (pid != child)
+ continue;
+
+ if (WIFEXITED(status) || WIFSIGNALED(status))
+ ksft_exit_fail_msg("Child died unexpectedly\n");
+
+ if (!WIFSTOPPED(status))
+ goto error;
+
+ sig = WSTOPSIG(status);
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ if (errno == EINVAL) {
+ sig = 0; /* bust group-stop */
+ goto cont;
+ }
+
+ ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ if (sig == SIGSTOP && si.si_code == SI_TKILL &&
+ si.si_pid == pid)
+ break;
+
+ cont:
+ if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
+ if (errno == ESRCH)
+ goto disappeared;
+
+ ksft_test_result_fail("PTRACE_CONT: %s\n",
+ strerror(errno));
+ goto error;
+ }
+ }
+
+ ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
+
+ ptrace_za_disabled_read_zt(child);
+ ptrace_set_get_zt(child);
+ ptrace_enable_za_via_zt(child);
+
+ ret = EXIT_SUCCESS;
+
+error:
+ kill(child, SIGKILL);
+
+disappeared:
+ return ret;
+}
+
+int main(void)
+{
+ int ret = EXIT_SUCCESS;
+ pid_t child;
+
+ srandom(getpid());
+
+ ksft_print_header();
+
+ if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2)) {
+ ksft_set_plan(1);
+ ksft_exit_skip("SME2 not available\n");
+ }
+
+ /* We need a valid SME VL to enable/disable ZA */
+ sme_vl = prctl(PR_SME_GET_VL);
+ if (sme_vl == -1) {
+ ksft_set_plan(1);
+ ksft_exit_skip("Failed to read SME VL: %d (%s)\n",
+ errno, strerror(errno));
+ }
+
+ ksft_set_plan(EXPECTED_TESTS);
+
+ child = fork();
+ if (!child)
+ return do_child();
+
+ if (do_parent(child))
+ ret = EXIT_FAILURE;
+
+ ksft_print_cnts();
+
+ return ret;
+}
diff --git a/tools/testing/selftests/arm64/fp/zt-test.S b/tools/testing/selftests/arm64/fp/zt-test.S
new file mode 100644
index 000000000000..a8df05771670
--- /dev/null
+++ b/tools/testing/selftests/arm64/fp/zt-test.S
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2021-2 ARM Limited.
+// Original author: Mark Brown <broonie@kernel.org>
+//
+// Scalable Matrix Extension ZT context switch test
+// Repeatedly writes unique test patterns into ZT0
+// and reads them back to verify integrity.
+
+#include <asm/unistd.h>
+#include "assembler.h"
+#include "asm-offsets.h"
+#include "sme-inst.h"
+
+.arch_extension sve
+
+#define ZT_SZ 512
+#define ZT_B (ZT_SZ / 8)
+
+// Declare some storage space to shadow ZT register contents and a
+// scratch buffer.
+.pushsection .text
+.data
+.align 4
+ztref:
+ .space ZT_B
+scratch:
+ .space ZT_B
+.popsection
+
+
+// Generate a test pattern for storage in ZT
+// x0: pid
+// x1: generation
+
+// These values are used to construct a 32-bit pattern that is repeated in the
+// scratch buffer as many times as will fit:
+// bits 31:24 generation number (increments once per test_loop)
+// bits 23: 8 pid
+// bits 7: 0 32-bit lane index
+
+function pattern
+ mov w3, wzr
+ bfi w3, w0, #8, #16 // PID
+ bfi w3, w1, #24, #8 // Generation
+
+ ldr x0, =scratch
+ mov w1, #ZT_B / 4
+
+0: str w3, [x0], #4
+ add w3, w3, #1 // Lane
+ subs w1, w1, #1
+ b.ne 0b
+
+ ret
+endfunction
+
+// Set up test pattern in a ZT horizontal vector
+// x0: pid
+// x1: generation
+function setup_zt
+ mov x4, x30
+
+ bl pattern // Get pattern in scratch buffer
+ ldr x0, =ztref
+ ldr x1, =scratch
+ mov x2, #ZT_B
+ bl memcpy
+
+ ldr x0, =ztref
+ _ldr_zt 0 // load zt0 from pointer x0
+
+ ret x4
+endfunction
+
+// Trivial memory compare: compare x2 bytes starting at address x0 with
+// bytes starting at address x1.
+// Returns only if all bytes match; otherwise, the program is aborted.
+// Clobbers x0-x5.
+function memcmp
+ cbz x2, 2f
+
+ stp x0, x1, [sp, #-0x20]!
+ str x2, [sp, #0x10]
+
+ mov x5, #0
+0: ldrb w3, [x0, x5]
+ ldrb w4, [x1, x5]
+ add x5, x5, #1
+ cmp w3, w4
+ b.ne 1f
+ subs x2, x2, #1
+ b.ne 0b
+
+1: ldr x2, [sp, #0x10]
+ ldp x0, x1, [sp], #0x20
+ b.ne barf
+
+2: ret
+endfunction
+
+// Verify that a ZT vector matches its shadow in memory, else abort
+// Clobbers x0-x3
+function check_zt
+ mov x3, x30
+
+ ldr x0, =scratch // Poison scratch
+ mov x1, #ZT_B
+ bl memfill_ae
+
+ ldr x0, =scratch
+ _str_zt 0
+
+ ldr x0, =ztref
+ ldr x1, =scratch
+ mov x2, #ZT_B
+ mov x30, x3
+ b memcmp
+endfunction
+
+// Modify the live SME register state, signal return will undo our changes
+function irritator_handler
+ // Increment the irritation signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
+
+ // This will reset ZT to all bits 0
+ smstop
+ smstart_za
+
+ ret
+endfunction
+
+function tickle_handler
+ // Increment the signal count (x23):
+ ldr x0, [x2, #ucontext_regs + 8 * 23]
+ add x0, x0, #1
+ str x0, [x2, #ucontext_regs + 8 * 23]
+
+ ret
+endfunction
+
+function terminate_handler
+ mov w21, w0
+ mov x20, x2
+
+ puts "Terminated by signal "
+ mov w0, w21
+ bl putdec
+ puts ", no error, iterations="
+ ldr x0, [x20, #ucontext_regs + 8 * 22]
+ bl putdec
+ puts ", signals="
+ ldr x0, [x20, #ucontext_regs + 8 * 23]
+ bl putdecn
+
+ mov x0, #0
+ mov x8, #__NR_exit
+ svc #0
+endfunction
+
+// w0: signal number
+// x1: sa_action
+// w2: sa_flags
+// Clobbers x0-x6,x8
+function setsignal
+ str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
+
+ mov w4, w0
+ mov x5, x1
+ mov w6, w2
+
+ add x0, sp, #16
+ mov x1, #sa_sz
+ bl memclr
+
+ mov w0, w4
+ add x1, sp, #16
+ str w6, [x1, #sa_flags]
+ str x5, [x1, #sa_handler]
+ mov x2, #0
+ mov x3, #sa_mask_sz
+ mov x8, #__NR_rt_sigaction
+ svc #0
+
+ cbz w0, 1f
+
+ puts "sigaction failure\n"
+ b .Labort
+
+1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
+ ret
+endfunction
+
+// Main program entry point
+.globl _start
+function _start
+ enable_gcs
+
+ mov x23, #0 // signal count
+
+ mov w0, #SIGINT
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGTERM
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGUSR1
+ adr x1, irritator_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ mov w0, #SIGUSR2
+ adr x1, tickle_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ smstart_za
+
+ // Obtain our PID, to ensure test pattern uniqueness between processes
+ mov x8, #__NR_getpid
+ svc #0
+ mov x20, x0
+
+ puts "PID:\t"
+ mov x0, x20
+ bl putdecn
+
+ mov x22, #0 // generation number, increments per iteration
+.Ltest_loop:
+ mov x0, x20
+ mov x1, x22
+ bl setup_zt
+
+ mov x8, #__NR_sched_yield // Encourage preemption
+ svc #0
+
+ mrs x0, S3_3_C4_C2_2 // SVCR should have ZA=1,SM=0
+ and x1, x0, #3
+ cmp x1, #2
+ b.ne svcr_barf
+
+ bl check_zt
+
+ add x22, x22, #1 // Everything still working
+ b .Ltest_loop
+
+.Labort:
+ mov x0, #0
+ mov x1, #SIGABRT
+ mov x8, #__NR_kill
+ svc #0
+endfunction
+
+function barf
+// fpsimd.c acitivty log dump hack
+// ldr w0, =0xdeadc0de
+// mov w8, #__NR_exit
+// svc #0
+// end hack
+
+ mrs x13, S3_3_C4_C2_2
+ smstop
+ mov x10, x0 // expected data
+ mov x11, x1 // actual data
+ mov x12, x2 // data size
+
+ puts "Mismatch: PID="
+ mov x0, x20
+ bl putdec
+ puts ", iteration="
+ mov x0, x22
+ bl putdecn
+ puts "\tExpected ["
+ mov x0, x10
+ mov x1, x12
+ bl dumphex
+ puts "]\n\tGot ["
+ mov x0, x11
+ mov x1, x12
+ bl dumphex
+ puts "]\n"
+ puts "\tSVCR: "
+ mov x0, x13
+ bl putdecn
+
+ mov x8, #__NR_getpid
+ svc #0
+// fpsimd.c acitivty log dump hack
+// ldr w0, =0xdeadc0de
+// mov w8, #__NR_exit
+// svc #0
+// ^ end of hack
+ mov x1, #SIGABRT
+ mov x8, #__NR_kill
+ svc #0
+// mov x8, #__NR_exit
+// mov x1, #1
+// svc #0
+endfunction
+
+function svcr_barf
+ mov x10, x0
+
+ puts "Bad SVCR: "
+ mov x0, x10
+ bl putdecn
+
+ mov x8, #__NR_exit
+ mov x1, #1
+ svc #0
+endfunction
diff --git a/tools/testing/selftests/arm64/gcs/.gitignore b/tools/testing/selftests/arm64/gcs/.gitignore
new file mode 100644
index 000000000000..bbb8e40a7e52
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/.gitignore
@@ -0,0 +1,7 @@
+basic-gcs
+libc-gcs
+gcs-locking
+gcs-stress
+gcs-stress-thread
+gcspushm
+gcsstr
diff --git a/tools/testing/selftests/arm64/gcs/Makefile b/tools/testing/selftests/arm64/gcs/Makefile
new file mode 100644
index 000000000000..1fbbf0ca1f02
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2023 ARM Limited
+#
+# In order to avoid interaction with the toolchain and dynamic linker the
+# portions of these tests that interact with the GCS are implemented using
+# nolibc.
+#
+
+TEST_GEN_PROGS := basic-gcs libc-gcs gcs-locking gcs-stress gcspushm gcsstr
+TEST_GEN_PROGS_EXTENDED := gcs-stress-thread
+
+LDLIBS+=-lpthread
+
+include ../../lib.mk
+
+$(OUTPUT)/basic-gcs: basic-gcs.c
+ $(CC) $(CFLAGS) -fno-asynchronous-unwind-tables -fno-ident -s -nostdlib -nostdinc \
+ -static -I../../../../include/nolibc -include ../../../../include/nolibc/nolibc.h \
+ -I../../../../../usr/include \
+ -std=gnu99 -I../.. -g \
+ -ffreestanding $^ -o $@ -lgcc
+
+$(OUTPUT)/gcs-stress-thread: gcs-stress-thread.S
+ $(CC) -nostdlib $^ -o $@
+
+$(OUTPUT)/gcspushm: gcspushm.S
+ $(CC) -nostdlib $^ -o $@
+
+$(OUTPUT)/gcsstr: gcsstr.S
+ $(CC) -nostdlib $^ -o $@
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/selftests/arm64/gcs/asm-offsets.h
index e69de29bb2d1..e69de29bb2d1 100644
--- a/tools/testing/radix-tree/linux/compiler_types.h
+++ b/tools/testing/selftests/arm64/gcs/asm-offsets.h
diff --git a/tools/testing/selftests/arm64/gcs/basic-gcs.c b/tools/testing/selftests/arm64/gcs/basic-gcs.c
new file mode 100644
index 000000000000..250977abc398
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/basic-gcs.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Limited.
+ */
+
+#include <limits.h>
+#include <stdbool.h>
+
+#include <linux/prctl.h>
+
+#include <sys/mman.h>
+#include <asm/mman.h>
+#include <asm/hwcap.h>
+#include <linux/sched.h>
+
+#include "kselftest.h"
+#include "gcs-util.h"
+
+/* nolibc doesn't have sysconf(), just hard code the maximum */
+static size_t page_size = 65536;
+
+static __attribute__((noinline)) void valid_gcs_function(void)
+{
+ /* Do something the compiler can't optimise out */
+ my_syscall1(__NR_prctl, PR_SVE_GET_VL);
+}
+
+static inline int gcs_set_status(unsigned long mode)
+{
+ bool enabling = mode & PR_SHADOW_STACK_ENABLE;
+ int ret;
+ unsigned long new_mode;
+
+ /*
+ * The prctl takes 1 argument but we need to ensure that the
+ * other 3 values passed in registers to the syscall are zero
+ * since the kernel validates them.
+ */
+ ret = my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, mode,
+ 0, 0, 0);
+
+ if (ret == 0) {
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &new_mode, 0, 0, 0);
+ if (ret == 0) {
+ if (new_mode != mode) {
+ ksft_print_msg("Mode set to %lx not %lx\n",
+ new_mode, mode);
+ ret = -EINVAL;
+ }
+ } else {
+ ksft_print_msg("Failed to validate mode: %d\n", ret);
+ }
+
+ if (enabling != chkfeat_gcs()) {
+ ksft_print_msg("%senabled by prctl but %senabled in CHKFEAT\n",
+ enabling ? "" : "not ",
+ chkfeat_gcs() ? "" : "not ");
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/* Try to read the status */
+static bool read_status(void)
+{
+ unsigned long state;
+ int ret;
+
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &state, 0, 0, 0);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read state: %d\n", ret);
+ return false;
+ }
+
+ return state & PR_SHADOW_STACK_ENABLE;
+}
+
+/* Just a straight enable */
+static bool base_enable(void)
+{
+ int ret;
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE);
+ if (ret) {
+ ksft_print_msg("PR_SHADOW_STACK_ENABLE failed %d\n", ret);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check we can read GCSPR_EL0 when GCS is enabled */
+static bool read_gcspr_el0(void)
+{
+ unsigned long *gcspr_el0;
+
+ ksft_print_msg("GET GCSPR\n");
+ gcspr_el0 = get_gcspr();
+ ksft_print_msg("GCSPR_EL0 is %p\n", gcspr_el0);
+
+ return true;
+}
+
+/* Also allow writes to stack */
+static bool enable_writeable(void)
+{
+ int ret;
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE);
+ if (ret) {
+ ksft_print_msg("PR_SHADOW_STACK_ENABLE writeable failed: %d\n", ret);
+ return false;
+ }
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE);
+ if (ret) {
+ ksft_print_msg("failed to restore plain enable %d\n", ret);
+ return false;
+ }
+
+ return true;
+}
+
+/* Also allow writes to stack */
+static bool enable_push_pop(void)
+{
+ int ret;
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_PUSH);
+ if (ret) {
+ ksft_print_msg("PR_SHADOW_STACK_ENABLE with push failed: %d\n",
+ ret);
+ return false;
+ }
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE);
+ if (ret) {
+ ksft_print_msg("failed to restore plain enable %d\n", ret);
+ return false;
+ }
+
+ return true;
+}
+
+/* Enable GCS and allow everything */
+static bool enable_all(void)
+{
+ int ret;
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_PUSH |
+ PR_SHADOW_STACK_WRITE);
+ if (ret) {
+ ksft_print_msg("PR_SHADOW_STACK_ENABLE with everything failed: %d\n",
+ ret);
+ return false;
+ }
+
+ ret = gcs_set_status(PR_SHADOW_STACK_ENABLE);
+ if (ret) {
+ ksft_print_msg("failed to restore plain enable %d\n", ret);
+ return false;
+ }
+
+ return true;
+}
+
+static bool enable_invalid(void)
+{
+ int ret = gcs_set_status(ULONG_MAX);
+ if (ret == 0) {
+ ksft_print_msg("GCS_SET_STATUS %lx succeeded\n", ULONG_MAX);
+ return false;
+ }
+
+ return true;
+}
+
+/* Map a GCS */
+static bool map_guarded_stack(void)
+{
+ int ret;
+ uint64_t *buf;
+ uint64_t expected_cap;
+ int elem;
+ bool pass = true;
+
+ buf = (void *)my_syscall3(__NR_map_shadow_stack, 0, page_size,
+ SHADOW_STACK_SET_MARKER |
+ SHADOW_STACK_SET_TOKEN);
+ if (buf == MAP_FAILED) {
+ ksft_print_msg("Failed to map %lu byte GCS: %d\n",
+ page_size, errno);
+ return false;
+ }
+ ksft_print_msg("Mapped GCS at %p-%p\n", buf,
+ (void *)((uint64_t)buf + page_size));
+
+ /* The top of the newly allocated region should be 0 */
+ elem = (page_size / sizeof(uint64_t)) - 1;
+ if (buf[elem]) {
+ ksft_print_msg("Last entry is 0x%llx not 0x0\n", buf[elem]);
+ pass = false;
+ }
+
+ /* Then a valid cap token */
+ elem--;
+ expected_cap = ((uint64_t)buf + page_size - 16);
+ expected_cap &= GCS_CAP_ADDR_MASK;
+ expected_cap |= GCS_CAP_VALID_TOKEN;
+ if (buf[elem] != expected_cap) {
+ ksft_print_msg("Cap entry is 0x%llx not 0x%llx\n",
+ buf[elem], expected_cap);
+ pass = false;
+ }
+ ksft_print_msg("cap token is 0x%llx\n", buf[elem]);
+
+ /* The rest should be zeros */
+ for (elem = 0; elem < page_size / sizeof(uint64_t) - 2; elem++) {
+ if (!buf[elem])
+ continue;
+ ksft_print_msg("GCS slot %d is 0x%llx not 0x0\n",
+ elem, buf[elem]);
+ pass = false;
+ }
+
+ ret = munmap(buf, page_size);
+ if (ret != 0) {
+ ksft_print_msg("Failed to unmap %ld byte GCS: %d\n",
+ page_size, errno);
+ pass = false;
+ }
+
+ return pass;
+}
+
+/* A fork()ed process can run */
+static bool test_fork(void)
+{
+ unsigned long child_mode;
+ int ret, status;
+ pid_t pid;
+ bool pass = true;
+
+ pid = fork();
+ if (pid == -1) {
+ ksft_print_msg("fork() failed: %d\n", errno);
+ pass = false;
+ goto out;
+ }
+ if (pid == 0) {
+ /* In child, make sure we can call a function, read
+ * the GCS pointer and status and then exit */
+ valid_gcs_function();
+ get_gcspr();
+
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &child_mode, 0, 0, 0);
+ if (ret == 0 && !(child_mode & PR_SHADOW_STACK_ENABLE)) {
+ ksft_print_msg("GCS not enabled in child\n");
+ ret = -EINVAL;
+ }
+
+ exit(ret);
+ }
+
+ /*
+ * In parent, check we can still do function calls then block
+ * for the child.
+ */
+ valid_gcs_function();
+
+ ksft_print_msg("Waiting for child %d\n", pid);
+
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ ksft_print_msg("Failed to wait for child: %d\n",
+ errno);
+ return false;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("Child exited due to signal %d\n",
+ WTERMSIG(status));
+ pass = false;
+ } else {
+ if (WEXITSTATUS(status)) {
+ ksft_print_msg("Child exited with status %d\n",
+ WEXITSTATUS(status));
+ pass = false;
+ }
+ }
+
+out:
+
+ return pass;
+}
+
+/* A vfork()ed process can run and exit */
+static bool test_vfork(void)
+{
+ unsigned long child_mode;
+ int ret, status;
+ pid_t pid;
+ bool pass = true;
+
+ pid = vfork();
+ if (pid == -1) {
+ ksft_print_msg("vfork() failed: %d\n", errno);
+ pass = false;
+ goto out;
+ }
+ if (pid == 0) {
+ /*
+ * In child, make sure we can call a function, read
+ * the GCS pointer and status and then exit.
+ */
+ valid_gcs_function();
+ get_gcspr();
+
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &child_mode, 0, 0, 0);
+ if (ret == 0 && !(child_mode & PR_SHADOW_STACK_ENABLE)) {
+ ksft_print_msg("GCS not enabled in child\n");
+ ret = EXIT_FAILURE;
+ }
+
+ _exit(ret);
+ }
+
+ /*
+ * In parent, check we can still do function calls then check
+ * on the child.
+ */
+ valid_gcs_function();
+
+ ksft_print_msg("Waiting for child %d\n", pid);
+
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ ksft_print_msg("Failed to wait for child: %d\n",
+ errno);
+ return false;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("Child exited due to signal %d\n",
+ WTERMSIG(status));
+ pass = false;
+ } else if (WEXITSTATUS(status)) {
+ ksft_print_msg("Child exited with status %d\n",
+ WEXITSTATUS(status));
+ pass = false;
+ }
+
+out:
+
+ return pass;
+}
+
+typedef bool (*gcs_test)(void);
+
+static struct {
+ char *name;
+ gcs_test test;
+ bool needs_enable;
+} tests[] = {
+ { "read_status", read_status },
+ { "base_enable", base_enable, true },
+ { "read_gcspr_el0", read_gcspr_el0 },
+ { "enable_writeable", enable_writeable, true },
+ { "enable_push_pop", enable_push_pop, true },
+ { "enable_all", enable_all, true },
+ { "enable_invalid", enable_invalid, true },
+ { "map_guarded_stack", map_guarded_stack },
+ { "fork", test_fork },
+ { "vfork", test_vfork },
+};
+
+int main(void)
+{
+ int i, ret;
+ unsigned long gcs_mode;
+
+ ksft_print_header();
+
+ if (!(getauxval(AT_HWCAP) & HWCAP_GCS))
+ ksft_exit_skip("SKIP GCS not supported\n");
+
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &gcs_mode, 0, 0, 0);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to read GCS state: %d\n", ret);
+
+ if (!(gcs_mode & PR_SHADOW_STACK_ENABLE)) {
+ gcs_mode = PR_SHADOW_STACK_ENABLE;
+ ret = my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ gcs_mode, 0, 0, 0);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to enable GCS: %d\n", ret);
+ }
+
+ ksft_set_plan(ARRAY_SIZE(tests));
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ ksft_test_result((*tests[i].test)(), "%s\n", tests[i].name);
+ }
+
+ /* One last test: disable GCS, we can do this one time */
+ ret = my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0, 0, 0, 0);
+ if (ret != 0)
+ ksft_print_msg("Failed to disable GCS: %d\n", ret);
+
+ ksft_finished();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/gcs/gcs-locking.c b/tools/testing/selftests/arm64/gcs/gcs-locking.c
new file mode 100644
index 000000000000..1e6abb136ffd
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcs-locking.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Limited.
+ *
+ * Tests for GCS mode locking. These tests rely on both having GCS
+ * unconfigured on entry and on the kselftest harness running each
+ * test in a fork()ed process which will have it's own mode.
+ */
+
+#include <limits.h>
+
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+
+#include <asm/hwcap.h>
+
+#include "kselftest_harness.h"
+
+#include "gcs-util.h"
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = 0; \
+ register long _arg4 __asm__ ("x3") = 0; \
+ register long _arg5 __asm__ ("x4") = 0; \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_arg3), "r"(_arg4), \
+ "r"(_arg5), "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+/* No mode bits are rejected for locking */
+TEST(lock_all_modes)
+{
+ int ret;
+
+ ret = prctl(PR_LOCK_SHADOW_STACK_STATUS, ULONG_MAX, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE(valid_modes)
+{
+};
+
+FIXTURE_VARIANT(valid_modes)
+{
+ unsigned long mode;
+};
+
+FIXTURE_VARIANT_ADD(valid_modes, enable)
+{
+ .mode = PR_SHADOW_STACK_ENABLE,
+};
+
+FIXTURE_VARIANT_ADD(valid_modes, enable_write)
+{
+ .mode = PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE,
+};
+
+FIXTURE_VARIANT_ADD(valid_modes, enable_push)
+{
+ .mode = PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_PUSH,
+};
+
+FIXTURE_VARIANT_ADD(valid_modes, enable_write_push)
+{
+ .mode = PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH,
+};
+
+FIXTURE_SETUP(valid_modes)
+{
+}
+
+FIXTURE_TEARDOWN(valid_modes)
+{
+}
+
+/* We can set the mode at all */
+TEST_F(valid_modes, set)
+{
+ int ret;
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ variant->mode);
+ ASSERT_EQ(ret, 0);
+
+ _exit(0);
+}
+
+/* Enabling, locking then disabling is rejected */
+TEST_F(valid_modes, enable_lock_disable)
+{
+ unsigned long mode;
+ int ret;
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ variant->mode);
+ ASSERT_EQ(ret, 0);
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mode, variant->mode);
+
+ ret = prctl(PR_LOCK_SHADOW_STACK_STATUS, variant->mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0);
+ ASSERT_EQ(ret, -EBUSY);
+
+ _exit(0);
+}
+
+/* Locking then enabling is rejected */
+TEST_F(valid_modes, lock_enable)
+{
+ unsigned long mode;
+ int ret;
+
+ ret = prctl(PR_LOCK_SHADOW_STACK_STATUS, variant->mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ variant->mode);
+ ASSERT_EQ(ret, -EBUSY);
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mode, 0);
+
+ _exit(0);
+}
+
+/* Locking then changing other modes is fine */
+TEST_F(valid_modes, lock_enable_disable_others)
+{
+ unsigned long mode;
+ int ret;
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ variant->mode);
+ ASSERT_EQ(ret, 0);
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mode, variant->mode);
+
+ ret = prctl(PR_LOCK_SHADOW_STACK_STATUS, variant->mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ PR_SHADOW_STACK_ALL_MODES);
+ ASSERT_EQ(ret, 0);
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mode, PR_SHADOW_STACK_ALL_MODES);
+
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ variant->mode);
+ ASSERT_EQ(ret, 0);
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mode, variant->mode);
+
+ _exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long mode;
+ int ret;
+
+ if (!(getauxval(AT_HWCAP) & HWCAP_GCS))
+ ksft_exit_skip("SKIP GCS not supported\n");
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ if (ret) {
+ ksft_print_msg("Failed to read GCS state: %d\n", ret);
+ return EXIT_FAILURE;
+ }
+
+ if (mode & PR_SHADOW_STACK_ENABLE) {
+ ksft_print_msg("GCS was enabled, test unsupported\n");
+ return KSFT_SKIP;
+ }
+
+ return test_harness_run(argc, argv);
+}
diff --git a/tools/testing/selftests/arm64/gcs/gcs-stress-thread.S b/tools/testing/selftests/arm64/gcs/gcs-stress-thread.S
new file mode 100644
index 000000000000..b88b25217da5
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcs-stress-thread.S
@@ -0,0 +1,311 @@
+// Program that loops for ever doing lots of recursions and system calls,
+// intended to be used as part of a stress test for GCS context switching.
+//
+// Copyright 2015-2023 Arm Ltd
+
+#include <asm/unistd.h>
+
+#define sa_sz 32
+#define sa_flags 8
+#define sa_handler 0
+#define sa_mask_sz 8
+
+#define si_code 8
+
+#define SIGINT 2
+#define SIGABRT 6
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGTERM 15
+#define SEGV_CPERR 10
+
+#define SA_NODEFER 1073741824
+#define SA_SIGINFO 4
+#define ucontext_regs 184
+
+#define PR_SET_SHADOW_STACK_STATUS 75
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+
+#define GCSPR_EL0 S3_3_C2_C5_1
+
+.macro function name
+ .macro endfunction
+ .type \name, @function
+ .purgem endfunction
+ .endm
+\name:
+.endm
+
+// Print a single character x0 to stdout
+// Clobbers x0-x2,x8
+function putc
+ str x0, [sp, #-16]!
+
+ mov x0, #1 // STDOUT_FILENO
+ mov x1, sp
+ mov x2, #1
+ mov x8, #__NR_write
+ svc #0
+
+ add sp, sp, #16
+ ret
+endfunction
+.globl putc
+
+// Print a NUL-terminated string starting at address x0 to stdout
+// Clobbers x0-x3,x8
+function puts
+ mov x1, x0
+
+ mov x2, #0
+0: ldrb w3, [x0], #1
+ cbz w3, 1f
+ add x2, x2, #1
+ b 0b
+
+1: mov w0, #1 // STDOUT_FILENO
+ mov x8, #__NR_write
+ svc #0
+
+ ret
+endfunction
+.globl puts
+
+// Utility macro to print a literal string
+// Clobbers x0-x4,x8
+.macro puts string
+ .pushsection .rodata.str1.1, "aMS", @progbits, 1
+.L__puts_literal\@: .string "\string"
+ .popsection
+
+ ldr x0, =.L__puts_literal\@
+ bl puts
+.endm
+
+// Print an unsigned decimal number x0 to stdout
+// Clobbers x0-x4,x8
+function putdec
+ mov x1, sp
+ str x30, [sp, #-32]! // Result can't be > 20 digits
+
+ mov x2, #0
+ strb w2, [x1, #-1]! // Write the NUL terminator
+
+ mov x2, #10
+0: udiv x3, x0, x2 // div-mod loop to generate the digits
+ msub x0, x3, x2, x0
+ add w0, w0, #'0'
+ strb w0, [x1, #-1]!
+ mov x0, x3
+ cbnz x3, 0b
+
+ ldrb w0, [x1]
+ cbnz w0, 1f
+ mov w0, #'0' // Print "0" for 0, not ""
+ strb w0, [x1, #-1]!
+
+1: mov x0, x1
+ bl puts
+
+ ldr x30, [sp], #32
+ ret
+endfunction
+.globl putdec
+
+// Print an unsigned decimal number x0 to stdout, followed by a newline
+// Clobbers x0-x5,x8
+function putdecn
+ mov x5, x30
+
+ bl putdec
+ mov x0, #'\n'
+ bl putc
+
+ ret x5
+endfunction
+.globl putdecn
+
+// Fill x1 bytes starting at x0 with 0.
+// Clobbers x1, x2.
+function memclr
+ mov w2, #0
+endfunction
+.globl memclr
+ // fall through to memfill
+
+// Trivial memory fill: fill x1 bytes starting at address x0 with byte w2
+// Clobbers x1
+function memfill
+ cmp x1, #0
+ b.eq 1f
+
+0: strb w2, [x0], #1
+ subs x1, x1, #1
+ b.ne 0b
+
+1: ret
+endfunction
+.globl memfill
+
+// w0: signal number
+// x1: sa_action
+// w2: sa_flags
+// Clobbers x0-x6,x8
+function setsignal
+ str x30, [sp, #-((sa_sz + 15) / 16 * 16 + 16)]!
+
+ mov w4, w0
+ mov x5, x1
+ mov w6, w2
+
+ add x0, sp, #16
+ mov x1, #sa_sz
+ bl memclr
+
+ mov w0, w4
+ add x1, sp, #16
+ str w6, [x1, #sa_flags]
+ str x5, [x1, #sa_handler]
+ mov x2, #0
+ mov x3, #sa_mask_sz
+ mov x8, #__NR_rt_sigaction
+ svc #0
+
+ cbz w0, 1f
+
+ puts "sigaction failure\n"
+ b abort
+
+1: ldr x30, [sp], #((sa_sz + 15) / 16 * 16 + 16)
+ ret
+endfunction
+
+
+function tickle_handler
+ // Perhaps collect GCSPR_EL0 here in future?
+ ret
+endfunction
+
+function terminate_handler
+ mov w21, w0
+ mov x20, x2
+
+ puts "Terminated by signal "
+ mov w0, w21
+ bl putdec
+ puts ", no error\n"
+
+ mov x0, #0
+ mov x8, #__NR_exit
+ svc #0
+endfunction
+
+function segv_handler
+ // stash the siginfo_t *
+ mov x20, x1
+
+ // Disable GCS, we don't want additional faults logging things
+ mov x0, PR_SET_SHADOW_STACK_STATUS
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ mov x4, xzr
+ mov x5, xzr
+ mov x8, #__NR_prctl
+ svc #0
+
+ puts "Got SIGSEGV code "
+
+ ldr x21, [x20, #si_code]
+ mov x0, x21
+ bl putdec
+
+ // GCS faults should have si_code SEGV_CPERR
+ cmp x21, #SEGV_CPERR
+ bne 1f
+
+ puts " (GCS violation)"
+1:
+ mov x0, '\n'
+ bl putc
+ b abort
+endfunction
+
+// Recurse x20 times
+.macro recurse id
+function recurse\id
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ cmp x20, 0
+ beq 1f
+ sub x20, x20, 1
+ bl recurse\id
+
+1:
+ ldp x29, x30, [sp], #16
+
+ // Do a syscall immediately prior to returning to try to provoke
+ // scheduling and migration at a point where coherency issues
+ // might trigger.
+ mov x8, #__NR_getpid
+ svc #0
+
+ ret
+endfunction
+.endm
+
+// Generate and use two copies so we're changing the GCS contents
+recurse 1
+recurse 2
+
+.globl _start
+function _start
+ // Run with GCS
+ mov x0, PR_SET_SHADOW_STACK_STATUS
+ mov x1, PR_SHADOW_STACK_ENABLE
+ mov x2, xzr
+ mov x3, xzr
+ mov x4, xzr
+ mov x5, xzr
+ mov x8, #__NR_prctl
+ svc #0
+ cbz x0, 1f
+ puts "Failed to enable GCS\n"
+ b abort
+1:
+
+ mov w0, #SIGTERM
+ adr x1, terminate_handler
+ mov w2, #SA_SIGINFO
+ bl setsignal
+
+ mov w0, #SIGUSR1
+ adr x1, tickle_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ mov w0, #SIGSEGV
+ adr x1, segv_handler
+ mov w2, #SA_SIGINFO
+ orr w2, w2, #SA_NODEFER
+ bl setsignal
+
+ puts "Running\n"
+
+loop:
+ // Small recursion depth so we're frequently flipping between
+ // the two recursors and changing what's on the stack
+ mov x20, #5
+ bl recurse1
+ mov x20, #5
+ bl recurse2
+ b loop
+endfunction
+
+abort:
+ mov x0, #255
+ mov x8, #__NR_exit
+ svc #0
diff --git a/tools/testing/selftests/arm64/gcs/gcs-stress.c b/tools/testing/selftests/arm64/gcs/gcs-stress.c
new file mode 100644
index 000000000000..86d8cd42aee7
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcs-stress.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022-3 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+#define _POSIX_C_SOURCE 199309L
+
+#include <errno.h>
+#include <getopt.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/auxv.h>
+#include <sys/epoll.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include <asm/hwcap.h>
+
+#include "kselftest.h"
+
+struct child_data {
+ char *name, *output;
+ pid_t pid;
+ int stdout;
+ bool output_seen;
+ bool exited;
+ int exit_status;
+ int exit_signal;
+};
+
+static int epoll_fd;
+static struct child_data *children;
+static struct epoll_event *evs;
+static int tests;
+static int num_children;
+static bool terminate;
+
+static int startup_pipe[2];
+
+static int num_processors(void)
+{
+ long nproc = sysconf(_SC_NPROCESSORS_CONF);
+ if (nproc < 0) {
+ perror("Unable to read number of processors\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return nproc;
+}
+
+static void start_thread(struct child_data *child, int id)
+{
+ int ret, pipefd[2], i;
+ struct epoll_event ev;
+
+ ret = pipe(pipefd);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to create stdout pipe: %s (%d)\n",
+ strerror(errno), errno);
+
+ child->pid = fork();
+ if (child->pid == -1)
+ ksft_exit_fail_msg("fork() failed: %s (%d)\n",
+ strerror(errno), errno);
+
+ if (!child->pid) {
+ /*
+ * In child, replace stdout with the pipe, errors to
+ * stderr from here as kselftest prints to stdout.
+ */
+ ret = dup2(pipefd[1], 1);
+ if (ret == -1) {
+ fprintf(stderr, "dup2() %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Duplicate the read side of the startup pipe to
+ * FD 3 so we can close everything else.
+ */
+ ret = dup2(startup_pipe[0], 3);
+ if (ret == -1) {
+ fprintf(stderr, "dup2() %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Very dumb mechanism to clean open FDs other than
+ * stdio. We don't want O_CLOEXEC for the pipes...
+ */
+ for (i = 4; i < 8192; i++)
+ close(i);
+
+ /*
+ * Read from the startup pipe, there should be no data
+ * and we should block until it is closed. We just
+ * carry on on error since this isn't super critical.
+ */
+ ret = read(3, &i, sizeof(i));
+ if (ret < 0)
+ fprintf(stderr, "read(startp pipe) failed: %s (%d)\n",
+ strerror(errno), errno);
+ if (ret > 0)
+ fprintf(stderr, "%d bytes of data on startup pipe\n",
+ ret);
+ close(3);
+
+ ret = execl("gcs-stress-thread", "gcs-stress-thread", NULL);
+ fprintf(stderr, "execl(gcs-stress-thread) failed: %d (%s)\n",
+ errno, strerror(errno));
+
+ exit(EXIT_FAILURE);
+ } else {
+ /*
+ * In parent, remember the child and close our copy of the
+ * write side of stdout.
+ */
+ close(pipefd[1]);
+ child->stdout = pipefd[0];
+ child->output = NULL;
+ child->exited = false;
+ child->output_seen = false;
+
+ ev.events = EPOLLIN | EPOLLHUP;
+ ev.data.ptr = child;
+
+ ret = asprintf(&child->name, "Thread-%d", id);
+ if (ret == -1)
+ ksft_exit_fail_msg("asprintf() failed\n");
+
+ ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, child->stdout, &ev);
+ if (ret < 0) {
+ ksft_exit_fail_msg("%s EPOLL_CTL_ADD failed: %s (%d)\n",
+ child->name, strerror(errno), errno);
+ }
+ }
+
+ ksft_print_msg("Started %s\n", child->name);
+ num_children++;
+}
+
+static bool child_output_read(struct child_data *child)
+{
+ char read_data[1024];
+ char work[1024];
+ int ret, len, cur_work, cur_read;
+
+ ret = read(child->stdout, read_data, sizeof(read_data));
+ if (ret < 0) {
+ if (errno == EINTR)
+ return true;
+
+ ksft_print_msg("%s: read() failed: %s (%d)\n",
+ child->name, strerror(errno),
+ errno);
+ return false;
+ }
+ len = ret;
+
+ child->output_seen = true;
+
+ /* Pick up any partial read */
+ if (child->output) {
+ strncpy(work, child->output, sizeof(work) - 1);
+ cur_work = strnlen(work, sizeof(work));
+ free(child->output);
+ child->output = NULL;
+ } else {
+ cur_work = 0;
+ }
+
+ cur_read = 0;
+ while (cur_read < len) {
+ work[cur_work] = read_data[cur_read++];
+
+ if (work[cur_work] == '\n') {
+ work[cur_work] = '\0';
+ ksft_print_msg("%s: %s\n", child->name, work);
+ cur_work = 0;
+ } else {
+ cur_work++;
+ }
+ }
+
+ if (cur_work) {
+ work[cur_work] = '\0';
+ ret = asprintf(&child->output, "%s", work);
+ if (ret == -1)
+ ksft_exit_fail_msg("Out of memory\n");
+ }
+
+ return false;
+}
+
+static void child_output(struct child_data *child, uint32_t events,
+ bool flush)
+{
+ bool read_more;
+
+ if (events & EPOLLIN) {
+ do {
+ read_more = child_output_read(child);
+ } while (read_more);
+ }
+
+ if (events & EPOLLHUP) {
+ close(child->stdout);
+ child->stdout = -1;
+ flush = true;
+ }
+
+ if (flush && child->output) {
+ ksft_print_msg("%s: %s<EOF>\n", child->name, child->output);
+ free(child->output);
+ child->output = NULL;
+ }
+}
+
+static void child_tickle(struct child_data *child)
+{
+ if (child->output_seen && !child->exited)
+ kill(child->pid, SIGUSR1);
+}
+
+static void child_stop(struct child_data *child)
+{
+ if (!child->exited)
+ kill(child->pid, SIGTERM);
+}
+
+static void child_cleanup(struct child_data *child)
+{
+ pid_t ret;
+ int status;
+ bool fail = false;
+
+ if (!child->exited) {
+ do {
+ ret = waitpid(child->pid, &status, 0);
+ if (ret == -1 && errno == EINTR)
+ continue;
+
+ if (ret == -1) {
+ ksft_print_msg("waitpid(%d) failed: %s (%d)\n",
+ child->pid, strerror(errno),
+ errno);
+ fail = true;
+ break;
+ }
+
+ if (WIFEXITED(status)) {
+ child->exit_status = WEXITSTATUS(status);
+ child->exited = true;
+ }
+
+ if (WIFSIGNALED(status)) {
+ child->exit_signal = WTERMSIG(status);
+ ksft_print_msg("%s: Exited due to signal %d\n",
+ child->name, child->exit_signal);
+ fail = true;
+ child->exited = true;
+ }
+ } while (!child->exited);
+ }
+
+ if (!child->output_seen) {
+ ksft_print_msg("%s no output seen\n", child->name);
+ fail = true;
+ }
+
+ if (child->exit_status != 0) {
+ ksft_print_msg("%s exited with error code %d\n",
+ child->name, child->exit_status);
+ fail = true;
+ }
+
+ ksft_test_result(!fail, "%s\n", child->name);
+}
+
+static void handle_child_signal(int sig, siginfo_t *info, void *context)
+{
+ int i;
+ bool found = false;
+
+ for (i = 0; i < num_children; i++) {
+ if (children[i].pid == info->si_pid) {
+ children[i].exited = true;
+ children[i].exit_status = info->si_status;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ ksft_print_msg("SIGCHLD for unknown PID %d with status %d\n",
+ info->si_pid, info->si_status);
+}
+
+static void handle_exit_signal(int sig, siginfo_t *info, void *context)
+{
+ int i;
+
+ /* If we're already exiting then don't signal again */
+ if (terminate)
+ return;
+
+ ksft_print_msg("Got signal, exiting...\n");
+
+ terminate = true;
+
+ /*
+ * This should be redundant, the main loop should clean up
+ * after us, but for safety stop everything we can here.
+ */
+ for (i = 0; i < num_children; i++)
+ child_stop(&children[i]);
+}
+
+/* Handle any pending output without blocking */
+static void drain_output(bool flush)
+{
+ int ret = 1;
+ int i;
+
+ while (ret > 0) {
+ ret = epoll_wait(epoll_fd, evs, tests, 0);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_print_msg("epoll_wait() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ for (i = 0; i < ret; i++)
+ child_output(evs[i].data.ptr, evs[i].events, flush);
+ }
+}
+
+static const struct option options[] = {
+ { "timeout", required_argument, NULL, 't' },
+ { }
+};
+
+int main(int argc, char **argv)
+{
+ int seen_children;
+ bool all_children_started = false;
+ int gcs_threads;
+ int timeout = 10;
+ int ret, cpus, i, c;
+ struct sigaction sa;
+
+ while ((c = getopt_long(argc, argv, "t:", options, NULL)) != -1) {
+ switch (c) {
+ case 't':
+ ret = sscanf(optarg, "%d", &timeout);
+ if (ret != 1)
+ ksft_exit_fail_msg("Failed to parse timeout %s\n",
+ optarg);
+ break;
+ default:
+ ksft_exit_fail_msg("Unknown argument\n");
+ }
+ }
+
+ cpus = num_processors();
+ tests = 0;
+
+ if (getauxval(AT_HWCAP) & HWCAP_GCS) {
+ /* One extra thread, trying to trigger migrations */
+ gcs_threads = cpus + 1;
+ tests += gcs_threads;
+ } else {
+ gcs_threads = 0;
+ }
+
+ ksft_print_header();
+ ksft_set_plan(tests);
+
+ ksft_print_msg("%d CPUs, %d GCS threads\n",
+ cpus, gcs_threads);
+
+ if (!tests)
+ ksft_exit_skip("No tests scheduled\n");
+
+ if (timeout > 0)
+ ksft_print_msg("Will run for %ds\n", timeout);
+ else
+ ksft_print_msg("Will run until terminated\n");
+
+ children = calloc(sizeof(*children), tests);
+ if (!children)
+ ksft_exit_fail_msg("Unable to allocate child data\n");
+
+ ret = epoll_create1(EPOLL_CLOEXEC);
+ if (ret < 0)
+ ksft_exit_fail_msg("epoll_create1() failed: %s (%d)\n",
+ strerror(errno), ret);
+ epoll_fd = ret;
+
+ /* Create a pipe which children will block on before execing */
+ ret = pipe(startup_pipe);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to create startup pipe: %s (%d)\n",
+ strerror(errno), errno);
+
+ /* Get signal handers ready before we start any children */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handle_exit_signal;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sigemptyset(&sa.sa_mask);
+ ret = sigaction(SIGINT, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGINT handler: %s (%d)\n",
+ strerror(errno), errno);
+ ret = sigaction(SIGTERM, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGTERM handler: %s (%d)\n",
+ strerror(errno), errno);
+ sa.sa_sigaction = handle_child_signal;
+ ret = sigaction(SIGCHLD, &sa, NULL);
+ if (ret < 0)
+ ksft_print_msg("Failed to install SIGCHLD handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ evs = calloc(tests, sizeof(*evs));
+ if (!evs)
+ ksft_exit_fail_msg("Failed to allocate %d epoll events\n",
+ tests);
+
+ for (i = 0; i < gcs_threads; i++)
+ start_thread(&children[i], i);
+
+ /*
+ * All children started, close the startup pipe and let them
+ * run.
+ */
+ close(startup_pipe[0]);
+ close(startup_pipe[1]);
+
+ timeout *= 10;
+ for (;;) {
+ /* Did we get a signal asking us to exit? */
+ if (terminate)
+ break;
+
+ /*
+ * Timeout is counted in 100ms with no output, the
+ * tests print during startup then are silent when
+ * running so this should ensure they all ran enough
+ * to install the signal handler, this is especially
+ * useful in emulation where we will both be slow and
+ * likely to have a large set of VLs.
+ */
+ ret = epoll_wait(epoll_fd, evs, tests, 100);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ ksft_exit_fail_msg("epoll_wait() failed: %s (%d)\n",
+ strerror(errno), errno);
+ }
+
+ /* Output? */
+ if (ret > 0) {
+ for (i = 0; i < ret; i++) {
+ child_output(evs[i].data.ptr, evs[i].events,
+ false);
+ }
+ continue;
+ }
+
+ /* Otherwise epoll_wait() timed out */
+
+ /*
+ * If the child processes have not produced output they
+ * aren't actually running the tests yet.
+ */
+ if (!all_children_started) {
+ seen_children = 0;
+
+ for (i = 0; i < num_children; i++)
+ if (children[i].output_seen ||
+ children[i].exited)
+ seen_children++;
+
+ if (seen_children != num_children) {
+ ksft_print_msg("Waiting for %d children\n",
+ num_children - seen_children);
+ continue;
+ }
+
+ all_children_started = true;
+ }
+
+ ksft_print_msg("Sending signals, timeout remaining: %d00ms\n",
+ timeout);
+
+ for (i = 0; i < num_children; i++)
+ child_tickle(&children[i]);
+
+ /* Negative timeout means run indefinitely */
+ if (timeout < 0)
+ continue;
+ if (--timeout == 0)
+ break;
+ }
+
+ ksft_print_msg("Finishing up...\n");
+ terminate = true;
+
+ for (i = 0; i < tests; i++)
+ child_stop(&children[i]);
+
+ drain_output(false);
+
+ for (i = 0; i < tests; i++)
+ child_cleanup(&children[i]);
+
+ drain_output(true);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/arm64/gcs/gcs-util.h b/tools/testing/selftests/arm64/gcs/gcs-util.h
new file mode 100644
index 000000000000..c99a6b39ac14
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcs-util.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 ARM Limited.
+ */
+
+#ifndef GCS_UTIL_H
+#define GCS_UTIL_H
+
+#include <stdbool.h>
+
+#ifndef __NR_map_shadow_stack
+#define __NR_map_shadow_stack 453
+#endif
+
+#ifndef __NR_prctl
+#define __NR_prctl 167
+#endif
+
+#ifndef NT_ARM_GCS
+#define NT_ARM_GCS 0x410
+
+struct user_gcs {
+ __u64 features_enabled;
+ __u64 features_locked;
+ __u64 gcspr_el0;
+};
+#endif
+
+/* Shadow Stack/Guarded Control Stack interface */
+#define PR_GET_SHADOW_STACK_STATUS 74
+#define PR_SET_SHADOW_STACK_STATUS 75
+#define PR_LOCK_SHADOW_STACK_STATUS 76
+
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+# define PR_SHADOW_STACK_WRITE (1UL << 1)
+# define PR_SHADOW_STACK_PUSH (1UL << 2)
+
+#define PR_SHADOW_STACK_ALL_MODES \
+ PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE | PR_SHADOW_STACK_PUSH
+
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
+#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack merker in the shadow stack */
+
+#define GCS_CAP_ADDR_MASK (0xfffffffffffff000UL)
+#define GCS_CAP_TOKEN_MASK (0x0000000000000fffUL)
+#define GCS_CAP_VALID_TOKEN 1
+#define GCS_CAP_IN_PROGRESS_TOKEN 5
+
+#define GCS_CAP(x) (((unsigned long)(x) & GCS_CAP_ADDR_MASK) | \
+ GCS_CAP_VALID_TOKEN)
+
+static inline unsigned long *get_gcspr(void)
+{
+ unsigned long *gcspr;
+
+ asm volatile(
+ "mrs %0, S3_3_C2_C5_1"
+ : "=r" (gcspr)
+ :
+ : "cc");
+
+ return gcspr;
+}
+
+static inline void __attribute__((always_inline)) gcsss1(unsigned long *Xt)
+{
+ asm volatile (
+ "sys #3, C7, C7, #2, %0\n"
+ :
+ : "rZ" (Xt)
+ : "memory");
+}
+
+static inline unsigned long __attribute__((always_inline)) *gcsss2(void)
+{
+ unsigned long *Xt;
+
+ asm volatile(
+ "SYSL %0, #3, C7, C7, #3\n"
+ : "=r" (Xt)
+ :
+ : "memory");
+
+ return Xt;
+}
+
+static inline bool chkfeat_gcs(void)
+{
+ register long val __asm__ ("x16") = 1;
+
+ /* CHKFEAT x16 */
+ asm volatile(
+ "hint #0x28\n"
+ : "=r" (val)
+ : "r" (val));
+
+ return val != 1;
+}
+
+#endif
diff --git a/tools/testing/selftests/arm64/gcs/gcspushm.S b/tools/testing/selftests/arm64/gcs/gcspushm.S
new file mode 100644
index 000000000000..bbe17c1325ac
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcspushm.S
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2024 Arm Limited
+//
+// Give ourselves GCS push permissions then use them
+
+#include <asm/unistd.h>
+
+/* Shadow Stack/Guarded Control Stack interface */
+#define PR_GET_SHADOW_STACK_STATUS 74
+#define PR_SET_SHADOW_STACK_STATUS 75
+#define PR_LOCK_SHADOW_STACK_STATUS 76
+
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+# define PR_SHADOW_STACK_WRITE (1UL << 1)
+# define PR_SHADOW_STACK_PUSH (1UL << 2)
+
+#define KSFT_SKIP 4
+
+.macro function name
+ .macro endfunction
+ .type \name, @function
+ .purgem endfunction
+ .endm
+\name:
+.endm
+
+// Print a single character x0 to stdout
+// Clobbers x0-x2,x8
+function putc
+ str x0, [sp, #-16]!
+
+ mov x0, #1 // STDOUT_FILENO
+ mov x1, sp
+ mov x2, #1
+ mov x8, #__NR_write
+ svc #0
+
+ add sp, sp, #16
+ ret
+endfunction
+.globl putc
+
+// Print a NUL-terminated string starting at address x0 to stdout
+// Clobbers x0-x3,x8
+function puts
+ mov x1, x0
+
+ mov x2, #0
+0: ldrb w3, [x0], #1
+ cbz w3, 1f
+ add x2, x2, #1
+ b 0b
+
+1: mov w0, #1 // STDOUT_FILENO
+ mov x8, #__NR_write
+ svc #0
+
+ ret
+endfunction
+.globl puts
+
+// Utility macro to print a literal string
+// Clobbers x0-x4,x8
+.macro puts string
+ .pushsection .rodata.str1.1, "aMS", @progbits, 1
+.L__puts_literal\@: .string "\string"
+ .popsection
+
+ ldr x0, =.L__puts_literal\@
+ bl puts
+.endm
+
+.globl _start
+function _start
+ // Run with GCS
+ mov x0, PR_SET_SHADOW_STACK_STATUS
+ mov x1, PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_PUSH
+ mov x2, xzr
+ mov x3, xzr
+ mov x4, xzr
+ mov x5, xzr
+ mov x8, #__NR_prctl
+ svc #0
+ cbz x0, 1f
+ puts "Failed to enable GCS with push permission\n"
+ mov x0, #KSFT_SKIP
+ b 2f
+1:
+ sys #3, c7, c7, #0, x0 // GCSPUSHM
+ sysl x0, #3, c7, c7, #1 // GCSPOPM
+
+ mov x0, #0
+2:
+ mov x8, #__NR_exit
+ svc #0
diff --git a/tools/testing/selftests/arm64/gcs/gcsstr.S b/tools/testing/selftests/arm64/gcs/gcsstr.S
new file mode 100644
index 000000000000..a42bba6e30b1
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/gcsstr.S
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2024 Arm Limited
+//
+// Give ourselves GCS write permissions then use them
+
+#include <asm/unistd.h>
+
+/* Shadow Stack/Guarded Control Stack interface */
+#define PR_GET_SHADOW_STACK_STATUS 74
+#define PR_SET_SHADOW_STACK_STATUS 75
+#define PR_LOCK_SHADOW_STACK_STATUS 76
+
+# define PR_SHADOW_STACK_ENABLE (1UL << 0)
+# define PR_SHADOW_STACK_WRITE (1UL << 1)
+# define PR_SHADOW_STACK_PUSH (1UL << 2)
+
+#define GCSPR_EL0 S3_3_C2_C5_1
+
+#define KSFT_SKIP 4
+
+.macro function name
+ .macro endfunction
+ .type \name, @function
+ .purgem endfunction
+ .endm
+\name:
+.endm
+
+// Print a single character x0 to stdout
+// Clobbers x0-x2,x8
+function putc
+ str x0, [sp, #-16]!
+
+ mov x0, #1 // STDOUT_FILENO
+ mov x1, sp
+ mov x2, #1
+ mov x8, #__NR_write
+ svc #0
+
+ add sp, sp, #16
+ ret
+endfunction
+.globl putc
+
+// Print a NUL-terminated string starting at address x0 to stdout
+// Clobbers x0-x3,x8
+function puts
+ mov x1, x0
+
+ mov x2, #0
+0: ldrb w3, [x0], #1
+ cbz w3, 1f
+ add x2, x2, #1
+ b 0b
+
+1: mov w0, #1 // STDOUT_FILENO
+ mov x8, #__NR_write
+ svc #0
+
+ ret
+endfunction
+.globl puts
+
+// Utility macro to print a literal string
+// Clobbers x0-x4,x8
+.macro puts string
+ .pushsection .rodata.str1.1, "aMS", @progbits, 1
+.L__puts_literal\@: .string "\string"
+ .popsection
+
+ ldr x0, =.L__puts_literal\@
+ bl puts
+.endm
+
+.globl _start
+function _start
+ // Run with GCS
+ mov x0, PR_SET_SHADOW_STACK_STATUS
+ mov x1, PR_SHADOW_STACK_ENABLE | PR_SHADOW_STACK_WRITE
+ mov x2, xzr
+ mov x3, xzr
+ mov x4, xzr
+ mov x5, xzr
+ mov x8, #__NR_prctl
+ svc #0
+ cbz x0, 1f
+ puts "Failed to enable GCS with write permission\n"
+ mov x0, #KSFT_SKIP
+ b 2f
+1:
+ mrs x0, GCSPR_EL0
+ sub x0, x0, #8
+ .inst 0xd91f1c01 // GCSSTR x1, x0
+
+ mov x0, #0
+2:
+ mov x8, #__NR_exit
+ svc #0
diff --git a/tools/testing/selftests/arm64/gcs/libc-gcs.c b/tools/testing/selftests/arm64/gcs/libc-gcs.c
new file mode 100644
index 000000000000..17b2fabfec38
--- /dev/null
+++ b/tools/testing/selftests/arm64/gcs/libc-gcs.c
@@ -0,0 +1,728 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Limited.
+ */
+
+#define _GNU_SOURCE
+
+#include <pthread.h>
+#include <stdbool.h>
+
+#include <sys/auxv.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/uio.h>
+
+#include <asm/hwcap.h>
+#include <asm/mman.h>
+
+#include <linux/compiler.h>
+
+#include "kselftest_harness.h"
+
+#include "gcs-util.h"
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = 0; \
+ register long _arg4 __asm__ ("x3") = 0; \
+ register long _arg5 __asm__ ("x4") = 0; \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_arg3), "r"(_arg4), \
+ "r"(_arg5), "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+static noinline void gcs_recurse(int depth)
+{
+ if (depth)
+ gcs_recurse(depth - 1);
+
+ /* Prevent tail call optimization so we actually recurse */
+ asm volatile("dsb sy" : : : "memory");
+}
+
+/* Smoke test that a function call and return works*/
+TEST(can_call_function)
+{
+ gcs_recurse(0);
+}
+
+static void *gcs_test_thread(void *arg)
+{
+ int ret;
+ unsigned long mode;
+
+ /*
+ * Some libcs don't seem to fill unused arguments with 0 but
+ * the kernel validates this so we supply all 5 arguments.
+ */
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &mode, 0, 0, 0);
+ if (ret != 0) {
+ ksft_print_msg("PR_GET_SHADOW_STACK_STATUS failed: %d\n", ret);
+ return NULL;
+ }
+
+ if (!(mode & PR_SHADOW_STACK_ENABLE)) {
+ ksft_print_msg("GCS not enabled in thread, mode is %lu\n",
+ mode);
+ return NULL;
+ }
+
+ /* Just in case... */
+ gcs_recurse(0);
+
+ /* Use a non-NULL value to indicate a pass */
+ return &gcs_test_thread;
+}
+
+/* Verify that if we start a new thread it has GCS enabled */
+TEST(gcs_enabled_thread)
+{
+ pthread_t thread;
+ void *thread_ret;
+ int ret;
+
+ ret = pthread_create(&thread, NULL, gcs_test_thread, NULL);
+ ASSERT_TRUE(ret == 0);
+ if (ret != 0)
+ return;
+
+ ret = pthread_join(thread, &thread_ret);
+ ASSERT_TRUE(ret == 0);
+ if (ret != 0)
+ return;
+
+ ASSERT_TRUE(thread_ret != NULL);
+}
+
+/* Read the GCS until we find the terminator */
+TEST(gcs_find_terminator)
+{
+ unsigned long *gcs, *cur;
+
+ gcs = get_gcspr();
+ cur = gcs;
+ while (*cur)
+ cur++;
+
+ ksft_print_msg("GCS in use from %p-%p\n", gcs, cur);
+
+ /*
+ * We should have at least whatever called into this test so
+ * the two pointer should differ.
+ */
+ ASSERT_TRUE(gcs != cur);
+}
+
+/*
+ * We can access a GCS via ptrace
+ *
+ * This could usefully have a fixture but note that each test is
+ * fork()ed into a new child whcih causes issues. Might be better to
+ * lift at least some of this out into a separate, non-harness, test
+ * program.
+ */
+TEST(ptrace_read_write)
+{
+ pid_t child, pid;
+ int ret, status;
+ siginfo_t si;
+ uint64_t val, rval, gcspr;
+ struct user_gcs child_gcs;
+ struct iovec iov, local_iov, remote_iov;
+
+ child = fork();
+ if (child == -1) {
+ ksft_print_msg("fork() failed: %d (%s)\n",
+ errno, strerror(errno));
+ ASSERT_NE(child, -1);
+ }
+
+ if (child == 0) {
+ /*
+ * In child, make sure there's something on the stack and
+ * ask to be traced.
+ */
+ gcs_recurse(0);
+ if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
+ ksft_exit_fail_msg("PTRACE_TRACEME %s",
+ strerror(errno));
+
+ if (raise(SIGSTOP))
+ ksft_exit_fail_msg("raise(SIGSTOP) %s",
+ strerror(errno));
+
+ return;
+ }
+
+ ksft_print_msg("Child: %d\n", child);
+
+ /* Attach to the child */
+ while (1) {
+ int sig;
+
+ pid = wait(&status);
+ if (pid == -1) {
+ ksft_print_msg("wait() failed: %s",
+ strerror(errno));
+ goto error;
+ }
+
+ /*
+ * This should never happen but it's hard to flag in
+ * the framework.
+ */
+ if (pid != child)
+ continue;
+
+ if (WIFEXITED(status) || WIFSIGNALED(status))
+ ksft_exit_fail_msg("Child died unexpectedly\n");
+
+ if (!WIFSTOPPED(status))
+ goto error;
+
+ sig = WSTOPSIG(status);
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
+ if (errno == ESRCH) {
+ ASSERT_NE(errno, ESRCH);
+ return;
+ }
+
+ if (errno == EINVAL) {
+ sig = 0; /* bust group-stop */
+ goto cont;
+ }
+
+ ksft_print_msg("PTRACE_GETSIGINFO: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ if (sig == SIGSTOP && si.si_code == SI_TKILL &&
+ si.si_pid == pid)
+ break;
+
+ cont:
+ if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
+ if (errno == ESRCH) {
+ ASSERT_NE(errno, ESRCH);
+ return;
+ }
+
+ ksft_print_msg("PTRACE_CONT: %s\n", strerror(errno));
+ goto error;
+ }
+ }
+
+ /* Where is the child GCS? */
+ iov.iov_base = &child_gcs;
+ iov.iov_len = sizeof(child_gcs);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_GCS, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read child GCS state: %s (%d)\n",
+ strerror(errno), errno);
+ goto error;
+ }
+
+ /* We should have inherited GCS over fork(), confirm */
+ if (!(child_gcs.features_enabled & PR_SHADOW_STACK_ENABLE)) {
+ ASSERT_TRUE(child_gcs.features_enabled &
+ PR_SHADOW_STACK_ENABLE);
+ goto error;
+ }
+
+ gcspr = child_gcs.gcspr_el0;
+ ksft_print_msg("Child GCSPR 0x%lx, flags %llx, locked %llx\n",
+ gcspr, child_gcs.features_enabled,
+ child_gcs.features_locked);
+
+ /* Ideally we'd cross check with the child memory map */
+
+ errno = 0;
+ val = ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL);
+ ret = errno;
+ if (ret != 0)
+ ksft_print_msg("PTRACE_PEEKDATA failed: %s (%d)\n",
+ strerror(ret), ret);
+ EXPECT_EQ(ret, 0);
+
+ /* The child should be in a function, the GCSPR shouldn't be 0 */
+ EXPECT_NE(val, 0);
+
+ /* Same thing via process_vm_readv() */
+ local_iov.iov_base = &rval;
+ local_iov.iov_len = sizeof(rval);
+ remote_iov.iov_base = (void *)gcspr;
+ remote_iov.iov_len = sizeof(rval);
+ ret = process_vm_readv(child, &local_iov, 1, &remote_iov, 1, 0);
+ if (ret == -1)
+ ksft_print_msg("process_vm_readv() failed: %s (%d)\n",
+ strerror(errno), errno);
+ EXPECT_EQ(ret, sizeof(rval));
+ EXPECT_EQ(val, rval);
+
+ /* Write data via a peek */
+ ret = ptrace(PTRACE_POKEDATA, child, (void *)gcspr, NULL);
+ if (ret == -1)
+ ksft_print_msg("PTRACE_POKEDATA failed: %s (%d)\n",
+ strerror(errno), errno);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(0, ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL));
+
+ /* Restore what we had before */
+ ret = ptrace(PTRACE_POKEDATA, child, (void *)gcspr, val);
+ if (ret == -1)
+ ksft_print_msg("PTRACE_POKEDATA failed: %s (%d)\n",
+ strerror(errno), errno);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(val, ptrace(PTRACE_PEEKDATA, child, (void *)gcspr, NULL));
+
+ /* That's all, folks */
+ kill(child, SIGKILL);
+ return;
+
+error:
+ kill(child, SIGKILL);
+ ASSERT_FALSE(true);
+}
+
+FIXTURE(map_gcs)
+{
+ unsigned long *stack;
+};
+
+FIXTURE_VARIANT(map_gcs)
+{
+ size_t stack_size;
+ unsigned long flags;
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s2k_cap_marker)
+{
+ .stack_size = 2 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s2k_cap)
+{
+ .stack_size = 2 * 1024,
+ .flags = SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s2k_marker)
+{
+ .stack_size = 2 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s2k)
+{
+ .stack_size = 2 * 1024,
+ .flags = 0,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s4k_cap_marker)
+{
+ .stack_size = 4 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s4k_cap)
+{
+ .stack_size = 4 * 1024,
+ .flags = SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s3k_marker)
+{
+ .stack_size = 4 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s4k)
+{
+ .stack_size = 4 * 1024,
+ .flags = 0,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s16k_cap_marker)
+{
+ .stack_size = 16 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s16k_cap)
+{
+ .stack_size = 16 * 1024,
+ .flags = SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s16k_marker)
+{
+ .stack_size = 16 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s16k)
+{
+ .stack_size = 16 * 1024,
+ .flags = 0,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s64k_cap_marker)
+{
+ .stack_size = 64 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s64k_cap)
+{
+ .stack_size = 64 * 1024,
+ .flags = SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s64k_marker)
+{
+ .stack_size = 64 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s64k)
+{
+ .stack_size = 64 * 1024,
+ .flags = 0,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s128k_cap_marker)
+{
+ .stack_size = 128 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s128k_cap)
+{
+ .stack_size = 128 * 1024,
+ .flags = SHADOW_STACK_SET_TOKEN,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s128k_marker)
+{
+ .stack_size = 128 * 1024,
+ .flags = SHADOW_STACK_SET_MARKER,
+};
+
+FIXTURE_VARIANT_ADD(map_gcs, s128k)
+{
+ .stack_size = 128 * 1024,
+ .flags = 0,
+};
+
+FIXTURE_SETUP(map_gcs)
+{
+ self->stack = (void *)syscall(__NR_map_shadow_stack, 0,
+ variant->stack_size,
+ variant->flags);
+ ASSERT_FALSE(self->stack == MAP_FAILED);
+ ksft_print_msg("Allocated stack from %p-%p\n", self->stack,
+ self->stack + variant->stack_size);
+}
+
+FIXTURE_TEARDOWN(map_gcs)
+{
+ int ret;
+
+ if (self->stack != MAP_FAILED) {
+ ret = munmap(self->stack, variant->stack_size);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+/* The stack has a cap token */
+TEST_F(map_gcs, stack_capped)
+{
+ unsigned long *stack = self->stack;
+ size_t cap_index;
+
+ cap_index = (variant->stack_size / sizeof(unsigned long));
+
+ switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
+ case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
+ cap_index -= 2;
+ break;
+ case SHADOW_STACK_SET_TOKEN:
+ cap_index -= 1;
+ break;
+ case SHADOW_STACK_SET_MARKER:
+ case 0:
+ /* No cap, no test */
+ return;
+ }
+
+ ASSERT_EQ(stack[cap_index], GCS_CAP(&stack[cap_index]));
+}
+
+/* The top of the stack is 0 */
+TEST_F(map_gcs, stack_terminated)
+{
+ unsigned long *stack = self->stack;
+ size_t term_index;
+
+ if (!(variant->flags & SHADOW_STACK_SET_MARKER))
+ return;
+
+ term_index = (variant->stack_size / sizeof(unsigned long)) - 1;
+
+ ASSERT_EQ(stack[term_index], 0);
+}
+
+/* Writes should fault */
+TEST_F_SIGNAL(map_gcs, not_writeable, SIGSEGV)
+{
+ self->stack[0] = 0;
+}
+
+/* Put it all together, we can safely switch to and from the stack */
+TEST_F(map_gcs, stack_switch)
+{
+ size_t cap_index;
+ cap_index = (variant->stack_size / sizeof(unsigned long));
+ unsigned long *orig_gcspr_el0, *pivot_gcspr_el0;
+
+ /* Skip over the stack terminator and point at the cap */
+ switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
+ case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
+ cap_index -= 2;
+ break;
+ case SHADOW_STACK_SET_TOKEN:
+ cap_index -= 1;
+ break;
+ case SHADOW_STACK_SET_MARKER:
+ case 0:
+ /* No cap, no test */
+ return;
+ }
+ pivot_gcspr_el0 = &self->stack[cap_index];
+
+ /* Pivot to the new GCS */
+ ksft_print_msg("Pivoting to %p from %p, target has value 0x%lx\n",
+ pivot_gcspr_el0, get_gcspr(),
+ *pivot_gcspr_el0);
+ gcsss1(pivot_gcspr_el0);
+ orig_gcspr_el0 = gcsss2();
+ ksft_print_msg("Pivoted to %p from %p, target has value 0x%lx\n",
+ get_gcspr(), orig_gcspr_el0,
+ *pivot_gcspr_el0);
+
+ ksft_print_msg("Pivoted, GCSPR_EL0 now %p\n", get_gcspr());
+
+ /* New GCS must be in the new buffer */
+ ASSERT_TRUE((unsigned long)get_gcspr() > (unsigned long)self->stack);
+ ASSERT_TRUE((unsigned long)get_gcspr() <=
+ (unsigned long)self->stack + variant->stack_size);
+
+ /* We should be able to use all but 2 slots of the new stack */
+ ksft_print_msg("Recursing %zu levels\n", cap_index - 1);
+ gcs_recurse(cap_index - 1);
+
+ /* Pivot back to the original GCS */
+ gcsss1(orig_gcspr_el0);
+ pivot_gcspr_el0 = gcsss2();
+
+ gcs_recurse(0);
+ ksft_print_msg("Pivoted back to GCSPR_EL0 0x%p\n", get_gcspr());
+}
+
+/* We fault if we try to go beyond the end of the stack */
+TEST_F_SIGNAL(map_gcs, stack_overflow, SIGSEGV)
+{
+ size_t cap_index;
+ cap_index = (variant->stack_size / sizeof(unsigned long));
+ unsigned long *orig_gcspr_el0, *pivot_gcspr_el0;
+
+ /* Skip over the stack terminator and point at the cap */
+ switch (variant->flags & (SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN)) {
+ case SHADOW_STACK_SET_MARKER | SHADOW_STACK_SET_TOKEN:
+ cap_index -= 2;
+ break;
+ case SHADOW_STACK_SET_TOKEN:
+ cap_index -= 1;
+ break;
+ case SHADOW_STACK_SET_MARKER:
+ case 0:
+ /* No cap, no test but we need to SEGV to avoid a false fail */
+ orig_gcspr_el0 = get_gcspr();
+ *orig_gcspr_el0 = 0;
+ return;
+ }
+ pivot_gcspr_el0 = &self->stack[cap_index];
+
+ /* Pivot to the new GCS */
+ ksft_print_msg("Pivoting to %p from %p, target has value 0x%lx\n",
+ pivot_gcspr_el0, get_gcspr(),
+ *pivot_gcspr_el0);
+ gcsss1(pivot_gcspr_el0);
+ orig_gcspr_el0 = gcsss2();
+ ksft_print_msg("Pivoted to %p from %p, target has value 0x%lx\n",
+ pivot_gcspr_el0, orig_gcspr_el0,
+ *pivot_gcspr_el0);
+
+ ksft_print_msg("Pivoted, GCSPR_EL0 now %p\n", get_gcspr());
+
+ /* New GCS must be in the new buffer */
+ ASSERT_TRUE((unsigned long)get_gcspr() > (unsigned long)self->stack);
+ ASSERT_TRUE((unsigned long)get_gcspr() <=
+ (unsigned long)self->stack + variant->stack_size);
+
+ /* Now try to recurse, we should fault doing this. */
+ ksft_print_msg("Recursing %zu levels...\n", cap_index + 1);
+ gcs_recurse(cap_index + 1);
+ ksft_print_msg("...done\n");
+
+ /* Clean up properly to try to guard against spurious passes. */
+ gcsss1(orig_gcspr_el0);
+ pivot_gcspr_el0 = gcsss2();
+ ksft_print_msg("Pivoted back to GCSPR_EL0 0x%p\n", get_gcspr());
+}
+
+FIXTURE(map_invalid_gcs)
+{
+};
+
+FIXTURE_VARIANT(map_invalid_gcs)
+{
+ size_t stack_size;
+};
+
+FIXTURE_SETUP(map_invalid_gcs)
+{
+}
+
+FIXTURE_TEARDOWN(map_invalid_gcs)
+{
+}
+
+/* GCS must be larger than 16 bytes */
+FIXTURE_VARIANT_ADD(map_invalid_gcs, too_small)
+{
+ .stack_size = 8,
+};
+
+/* GCS size must be 16 byte aligned */
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_1) { .stack_size = 1024 + 1 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_2) { .stack_size = 1024 + 2 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_3) { .stack_size = 1024 + 3 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_4) { .stack_size = 1024 + 4 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_5) { .stack_size = 1024 + 5 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_6) { .stack_size = 1024 + 6 };
+FIXTURE_VARIANT_ADD(map_invalid_gcs, unligned_7) { .stack_size = 1024 + 7 };
+
+TEST_F(map_invalid_gcs, do_map)
+{
+ void *stack;
+
+ stack = (void *)syscall(__NR_map_shadow_stack, 0,
+ variant->stack_size, 0);
+ ASSERT_TRUE(stack == MAP_FAILED);
+ if (stack != MAP_FAILED)
+ munmap(stack, variant->stack_size);
+}
+
+FIXTURE(invalid_mprotect)
+{
+ unsigned long *stack;
+ size_t stack_size;
+};
+
+FIXTURE_VARIANT(invalid_mprotect)
+{
+ unsigned long flags;
+};
+
+FIXTURE_SETUP(invalid_mprotect)
+{
+ self->stack_size = sysconf(_SC_PAGE_SIZE);
+ self->stack = (void *)syscall(__NR_map_shadow_stack, 0,
+ self->stack_size, 0);
+ ASSERT_FALSE(self->stack == MAP_FAILED);
+ ksft_print_msg("Allocated stack from %p-%p\n", self->stack,
+ self->stack + self->stack_size);
+}
+
+FIXTURE_TEARDOWN(invalid_mprotect)
+{
+ int ret;
+
+ if (self->stack != MAP_FAILED) {
+ ret = munmap(self->stack, self->stack_size);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+FIXTURE_VARIANT_ADD(invalid_mprotect, exec)
+{
+ .flags = PROT_EXEC,
+};
+
+TEST_F(invalid_mprotect, do_map)
+{
+ int ret;
+
+ ret = mprotect(self->stack, self->stack_size, variant->flags);
+ ASSERT_EQ(ret, -1);
+}
+
+TEST_F(invalid_mprotect, do_map_read)
+{
+ int ret;
+
+ ret = mprotect(self->stack, self->stack_size,
+ variant->flags | PROT_READ);
+ ASSERT_EQ(ret, -1);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long gcs_mode;
+ int ret;
+
+ if (!(getauxval(AT_HWCAP) & HWCAP_GCS))
+ ksft_exit_skip("SKIP GCS not supported\n");
+
+ /*
+ * Force shadow stacks on, our tests *should* be fine with or
+ * without libc support and with or without this having ended
+ * up tagged for GCS and enabled by the dynamic linker. We
+ * can't use the libc prctl() function since we can't return
+ * from enabling the stack.
+ */
+ ret = my_syscall2(__NR_prctl, PR_GET_SHADOW_STACK_STATUS, &gcs_mode);
+ if (ret) {
+ ksft_print_msg("Failed to read GCS state: %d\n", ret);
+ return EXIT_FAILURE;
+ }
+
+ if (!(gcs_mode & PR_SHADOW_STACK_ENABLE)) {
+ gcs_mode = PR_SHADOW_STACK_ENABLE;
+ ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
+ gcs_mode);
+ if (ret) {
+ ksft_print_msg("Failed to configure GCS: %d\n", ret);
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Avoid returning in case libc doesn't understand GCS */
+ exit(test_harness_run(argc, argv));
+}
diff --git a/tools/testing/selftests/arm64/mte/.gitignore b/tools/testing/selftests/arm64/mte/.gitignore
index bc3ac63f3314..052d0f9f92b3 100644
--- a/tools/testing/selftests/arm64/mte/.gitignore
+++ b/tools/testing/selftests/arm64/mte/.gitignore
@@ -1,6 +1,8 @@
check_buffer_fill
+check_gcr_el1_cswitch
check_tags_inclusion
check_child_memory
check_mmap_options
+check_prctl
check_ksm_options
check_user_mem
diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
index 0b3af552632a..0d7ac3db8390 100644
--- a/tools/testing/selftests/arm64/mte/Makefile
+++ b/tools/testing/selftests/arm64/mte/Makefile
@@ -1,29 +1,41 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 ARM Limited
-CFLAGS += -std=gnu99 -I. -lpthread
+CFLAGS += -std=gnu99 -I. -pthread
+LDFLAGS += -pthread
SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
PROGS := $(patsubst %.c,%,$(SRCS))
-#Add mte compiler option
-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
-CFLAGS += -march=armv8.5-a+memtag
+ifeq ($(LLVM),)
+# For GCC check that the toolchain has MTE support.
+
+# preserve CC value from top level Makefile
+ifeq ($(CC),cc)
+CC := $(CROSS_COMPILE)gcc
endif
#check if the compiler works well
-mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
+mte_cc_support := $(shell if ($(CC) $(CFLAGS) -march=armv8.5-a+memtag -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
+
+else
+
+# All supported clang versions also support MTE.
+mte_cc_support := 1
+
+endif
ifeq ($(mte_cc_support),1)
# Generated binaries to be installed by top KSFT script
TEST_GEN_PROGS := $(PROGS)
-# Get Kernel headers installed and use them.
-KSFT_KHDR_INSTALL := 1
+else
+ $(warning compiler "$(CC)" does not support the ARMv8.5 MTE extension.)
+ $(warning test program "mte" will not be created.)
endif
# Include KSFT lib.mk.
include ../../lib.mk
ifeq ($(mte_cc_support),1)
-$(TEST_GEN_PROGS): mte_common_util.c mte_common_util.h mte_helper.S
+$(TEST_GEN_PROGS): mte_common_util.c mte_helper.S
endif
diff --git a/tools/testing/selftests/arm64/mte/check_buffer_fill.c b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
index c9fa141ebdcc..ff4e07503349 100644
--- a/tools/testing/selftests/arm64/mte/check_buffer_fill.c
+++ b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
@@ -31,8 +31,8 @@ static int check_buffer_by_byte(int mem_type, int mode)
int i, j, item;
bool err;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
- item = sizeof(sizes)/sizeof(int);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory(sizes[i], mem_type, 0, true);
@@ -68,8 +68,8 @@ static int check_buffer_underflow_by_byte(int mem_type, int mode,
bool err;
char *und_ptr = NULL;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
- item = sizeof(sizes)/sizeof(int);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
underflow_range, 0);
@@ -81,7 +81,7 @@ static int check_buffer_underflow_by_byte(int mem_type, int mode,
last_index = 0;
/* Set some value in tagged memory and make the buffer underflow */
for (j = sizes[i] - 1; (j >= -underflow_range) &&
- (cur_mte_cxt.fault_valid == false); j--) {
+ (!cur_mte_cxt.fault_valid); j--) {
ptr[j] = '1';
last_index = j;
}
@@ -91,7 +91,7 @@ static int check_buffer_underflow_by_byte(int mem_type, int mode,
for (j = 0; j < sizes[i]; j++) {
if (ptr[j] != '1') {
err = true;
- ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
+ ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%p\n",
j, ptr);
break;
}
@@ -164,8 +164,8 @@ static int check_buffer_overflow_by_byte(int mem_type, int mode,
size_t tagged_size, overflow_size;
char *over_ptr = NULL;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
- item = sizeof(sizes)/sizeof(int);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
0, overflow_range);
@@ -189,7 +189,7 @@ static int check_buffer_overflow_by_byte(int mem_type, int mode,
for (j = 0; j < sizes[i]; j++) {
if (ptr[j] != '1') {
err = true;
- ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
+ ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%p\n",
j, ptr);
break;
}
@@ -337,8 +337,8 @@ static int check_buffer_by_block(int mem_type, int mode)
{
int i, item, result = KSFT_PASS;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
- item = sizeof(sizes)/sizeof(int);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ item = ARRAY_SIZE(sizes);
cur_mte_cxt.fault_valid = false;
for (i = 0; i < item; i++) {
result = check_buffer_by_block_iterate(mem_type, mode, sizes[i]);
@@ -366,9 +366,9 @@ static int check_memory_initial_tags(int mem_type, int mode, int mapping)
{
char *ptr;
int run, fd;
- int total = sizeof(sizes)/sizeof(int);
+ int total = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
/* check initial tags for anonymous mmap */
ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
@@ -404,7 +404,7 @@ int main(int argc, char *argv[])
{
int err;
size_t page_size = getpagesize();
- int item = sizeof(sizes)/sizeof(int);
+ int item = ARRAY_SIZE(sizes);
sizes[item - 3] = page_size - 1;
sizes[item - 2] = page_size;
@@ -415,7 +415,7 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(20);
diff --git a/tools/testing/selftests/arm64/mte/check_child_memory.c b/tools/testing/selftests/arm64/mte/check_child_memory.c
index 43bd94f853ba..5e97ee792e4d 100644
--- a/tools/testing/selftests/arm64/mte/check_child_memory.c
+++ b/tools/testing/selftests/arm64/mte/check_child_memory.c
@@ -85,10 +85,10 @@ static int check_child_memory_mapping(int mem_type, int mode, int mapping)
{
char *ptr;
int run, result;
- int item = sizeof(sizes)/sizeof(int);
+ int item = ARRAY_SIZE(sizes);
- item = sizeof(sizes)/sizeof(int);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ item = ARRAY_SIZE(sizes);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < item; run++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
UNDERFLOW, OVERFLOW);
@@ -107,9 +107,9 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping)
{
char *ptr, *map_ptr;
int run, fd, map_size, result = KSFT_PASS;
- int total = sizeof(sizes)/sizeof(int);
+ int total = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
@@ -144,7 +144,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping)
int main(int argc, char *argv[])
{
int err;
- int item = sizeof(sizes)/sizeof(int);
+ int item = ARRAY_SIZE(sizes);
page_size = getpagesize();
if (!page_size) {
@@ -160,8 +160,8 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
- mte_register_signal(SIGBUS, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
+ mte_register_signal(SIGBUS, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(12);
diff --git a/tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c b/tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c
index a876db1f096a..325bca0de0f6 100644
--- a/tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c
+++ b/tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c
@@ -19,17 +19,6 @@
#include "kselftest.h"
#include "mte_common_util.h"
-#define PR_SET_TAGGED_ADDR_CTRL 55
-#define PR_GET_TAGGED_ADDR_CTRL 56
-# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
-# define PR_MTE_TCF_SHIFT 1
-# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TAG_SHIFT 3
-# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
-
#include "mte_def.h"
#define NUM_ITERATIONS 1024
diff --git a/tools/testing/selftests/arm64/mte/check_hugetlb_options.c b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
new file mode 100644
index 000000000000..aad1234c7e0f
--- /dev/null
+++ b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 Ampere Computing LLC
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "kselftest.h"
+#include "mte_common_util.h"
+#include "mte_def.h"
+
+#define TAG_CHECK_ON 0
+#define TAG_CHECK_OFF 1
+
+static unsigned long default_huge_page_size(void)
+{
+ unsigned long hps = 0;
+ char *line = NULL;
+ size_t linelen = 0;
+ FILE *f = fopen("/proc/meminfo", "r");
+
+ if (!f)
+ return 0;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(f);
+ return hps;
+}
+
+static bool is_hugetlb_allocated(void)
+{
+ unsigned long hps = 0;
+ char *line = NULL;
+ size_t linelen = 0;
+ FILE *f = fopen("/proc/meminfo", "r");
+
+ if (!f)
+ return false;
+ while (getline(&line, &linelen, f) > 0) {
+ if (sscanf(line, "Hugetlb: %lu kB", &hps) == 1) {
+ hps <<= 10;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(f);
+
+ if (hps > 0)
+ return true;
+
+ return false;
+}
+
+static void write_sysfs(char *str, unsigned long val)
+{
+ FILE *f;
+
+ f = fopen(str, "w");
+ if (!f) {
+ ksft_print_msg("ERR: missing %s\n", str);
+ return;
+ }
+ fprintf(f, "%lu", val);
+ fclose(f);
+}
+
+static void allocate_hugetlb()
+{
+ write_sysfs("/proc/sys/vm/nr_hugepages", 2);
+}
+
+static void free_hugetlb()
+{
+ write_sysfs("/proc/sys/vm/nr_hugepages", 0);
+}
+
+static int check_child_tag_inheritance(char *ptr, int size, int mode)
+{
+ int i, parent_tag, child_tag, fault, child_status;
+ pid_t child;
+
+ parent_tag = MT_FETCH_TAG((uintptr_t)ptr);
+ fault = 0;
+
+ child = fork();
+ if (child == -1) {
+ ksft_print_msg("FAIL: child process creation\n");
+ return KSFT_FAIL;
+ } else if (child == 0) {
+ mte_initialize_current_context(mode, (uintptr_t)ptr, size);
+ /* Do copy on write */
+ memset(ptr, '1', size);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true) {
+ fault = 1;
+ goto check_child_tag_inheritance_err;
+ }
+ for (i = 0; i < size; i += MT_GRANULE_SIZE) {
+ child_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
+ if (parent_tag != child_tag) {
+ ksft_print_msg("FAIL: child mte tag (%d) mismatch\n", i);
+ fault = 1;
+ goto check_child_tag_inheritance_err;
+ }
+ }
+check_child_tag_inheritance_err:
+ _exit(fault);
+ }
+ /* Wait for child process to terminate */
+ wait(&child_status);
+ if (WIFEXITED(child_status))
+ fault = WEXITSTATUS(child_status);
+ else
+ fault = 1;
+ return (fault) ? KSFT_FAIL : KSFT_PASS;
+}
+
+static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
+{
+ mte_initialize_current_context(mode, (uintptr_t)ptr, size);
+ memset(ptr, '1', size);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true)
+ return KSFT_FAIL;
+
+ return KSFT_PASS;
+}
+
+static int check_hugetlb_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
+{
+ char *ptr, *map_ptr;
+ int result;
+ unsigned long map_size;
+
+ map_size = default_huge_page_size();
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
+ if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS)
+ return KSFT_FAIL;
+
+ mte_initialize_current_context(mode, (uintptr_t)map_ptr, map_size);
+ /* Only mte enabled memory will allow tag insertion */
+ ptr = mte_insert_tags((void *)map_ptr, map_size);
+ if (!ptr || cur_mte_cxt.fault_valid == true) {
+ ksft_print_msg("FAIL: Insert tags on anonymous mmap memory\n");
+ munmap((void *)map_ptr, map_size);
+ return KSFT_FAIL;
+ }
+ result = check_mte_memory(ptr, map_size, mode, tag_check);
+ mte_clear_tags((void *)ptr, map_size);
+ mte_free_memory((void *)map_ptr, map_size, mem_type, false);
+ if (result == KSFT_FAIL)
+ return KSFT_FAIL;
+
+ return KSFT_PASS;
+}
+
+static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
+{
+ char *map_ptr;
+ int prot_flag, result;
+ unsigned long map_size;
+
+ prot_flag = PROT_READ | PROT_WRITE;
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ map_size = default_huge_page_size();
+ map_ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
+ 0, 0);
+ if (check_allocated_memory_range(map_ptr, map_size, mem_type,
+ 0, 0) != KSFT_PASS)
+ return KSFT_FAIL;
+ /* Try to clear PROT_MTE property and verify it by tag checking */
+ if (mprotect(map_ptr, map_size, prot_flag)) {
+ mte_free_memory_tag_range((void *)map_ptr, map_size, mem_type,
+ 0, 0);
+ ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
+ return KSFT_FAIL;
+ }
+ result = check_mte_memory(map_ptr, map_size, mode, TAG_CHECK_ON);
+ mte_free_memory_tag_range((void *)map_ptr, map_size, mem_type, 0, 0);
+ if (result != KSFT_PASS)
+ return KSFT_FAIL;
+
+ return KSFT_PASS;
+}
+
+static int check_child_hugetlb_memory_mapping(int mem_type, int mode, int mapping)
+{
+ char *ptr;
+ int result;
+ unsigned long map_size;
+
+ map_size = default_huge_page_size();
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
+ ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
+ 0, 0);
+ if (check_allocated_memory_range(ptr, map_size, mem_type,
+ 0, 0) != KSFT_PASS)
+ return KSFT_FAIL;
+ result = check_child_tag_inheritance(ptr, map_size, mode);
+ mte_free_memory_tag_range((void *)ptr, map_size, mem_type, 0, 0);
+ if (result == KSFT_FAIL)
+ return result;
+
+ return KSFT_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ int err;
+ void *map_ptr;
+ unsigned long map_size;
+
+ err = mte_default_setup();
+ if (err)
+ return err;
+
+ /* Register signal handlers */
+ mte_register_signal(SIGBUS, mte_default_handler, false);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
+
+ allocate_hugetlb();
+
+ if (!is_hugetlb_allocated()) {
+ ksft_print_msg("ERR: Unable allocate hugetlb pages\n");
+ return KSFT_FAIL;
+ }
+
+ /* Check if MTE supports hugetlb mappings */
+ map_size = default_huge_page_size();
+ map_ptr = mmap(NULL, map_size, PROT_READ | PROT_MTE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
+ if (map_ptr == MAP_FAILED)
+ ksft_exit_skip("PROT_MTE not supported with MAP_HUGETLB mappings\n");
+ else
+ munmap(map_ptr, map_size);
+
+ /* Set test plan */
+ ksft_set_plan(12);
+
+ mte_enable_pstate_tco();
+
+ evaluate_test(check_hugetlb_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_OFF),
+ "Check hugetlb memory with private mapping, sync error mode, mmap memory and tag check off\n");
+
+ mte_disable_pstate_tco();
+ evaluate_test(check_hugetlb_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_OFF),
+ "Check hugetlb memory with private mapping, no error mode, mmap memory and tag check off\n");
+
+ evaluate_test(check_hugetlb_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_ON),
+ "Check hugetlb memory with private mapping, sync error mode, mmap memory and tag check on\n");
+ evaluate_test(check_hugetlb_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_ON),
+ "Check hugetlb memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
+ evaluate_test(check_hugetlb_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_ON),
+ "Check hugetlb memory with private mapping, async error mode, mmap memory and tag check on\n");
+ evaluate_test(check_hugetlb_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB, TAG_CHECK_ON),
+ "Check hugetlb memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
+
+ evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
+ evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
+
+ evaluate_test(check_child_hugetlb_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check child hugetlb memory with private mapping, sync error mode and mmap memory\n");
+ evaluate_test(check_child_hugetlb_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check child hugetlb memory with private mapping, async error mode and mmap memory\n");
+ evaluate_test(check_child_hugetlb_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check child hugetlb memory with private mapping, sync error mode and mmap/mprotect memory\n");
+ evaluate_test(check_child_hugetlb_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
+ "Check child hugetlb memory with private mapping, async error mode and mmap/mprotect memory\n");
+
+ mte_restore_setup();
+ free_hugetlb();
+ ksft_print_cnts();
+ return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
+}
diff --git a/tools/testing/selftests/arm64/mte/check_ksm_options.c b/tools/testing/selftests/arm64/mte/check_ksm_options.c
index 3b23c4d61d38..0cf5faef1724 100644
--- a/tools/testing/selftests/arm64/mte/check_ksm_options.c
+++ b/tools/testing/selftests/arm64/mte/check_ksm_options.c
@@ -33,7 +33,10 @@ static unsigned long read_sysfs(char *str)
ksft_print_msg("ERR: missing %s\n", str);
return 0;
}
- fscanf(f, "%lu", &val);
+ if (fscanf(f, "%lu", &val) != 1) {
+ ksft_print_msg("ERR: parsing %s\n", str);
+ val = 0;
+ }
fclose(f);
return val;
}
@@ -103,7 +106,7 @@ static int check_madvise_options(int mem_type, int mode, int mapping)
return err;
}
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
ptr = mte_allocate_memory(TEST_UNIT * page_sz, mem_type, mapping, true);
if (check_allocated_memory(ptr, TEST_UNIT * page_sz, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
@@ -138,8 +141,8 @@ int main(int argc, char *argv[])
return KSFT_FAIL;
}
/* Register signal handlers */
- mte_register_signal(SIGBUS, mte_default_handler);
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGBUS, mte_default_handler, false);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(4);
diff --git a/tools/testing/selftests/arm64/mte/check_mmap_options.c b/tools/testing/selftests/arm64/mte/check_mmap_options.c
index a04b12c21ac9..c100af3012cb 100644
--- a/tools/testing/selftests/arm64/mte/check_mmap_options.c
+++ b/tools/testing/selftests/arm64/mte/check_mmap_options.c
@@ -3,6 +3,7 @@
#define _GNU_SOURCE
+#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -23,6 +24,35 @@
#define OVERFLOW MT_GRANULE_SIZE
#define TAG_CHECK_ON 0
#define TAG_CHECK_OFF 1
+#define ATAG_CHECK_ON 1
+#define ATAG_CHECK_OFF 0
+
+#define TEST_NAME_MAX 256
+
+enum mte_mem_check_type {
+ CHECK_ANON_MEM = 0,
+ CHECK_FILE_MEM = 1,
+ CHECK_CLEAR_PROT_MTE = 2,
+};
+
+enum mte_tag_op_type {
+ TAG_OP_ALL = 0,
+ TAG_OP_STONLY = 1,
+};
+
+struct check_mmap_testcase {
+ int check_type;
+ int mem_type;
+ int mte_sync;
+ int mapping;
+ int tag_check;
+ int atag_check;
+ int tag_op;
+ bool enable_tco;
+};
+
+#define TAG_OP_ALL 0
+#define TAG_OP_STONLY 1
static size_t page_size;
static int sizes[] = {
@@ -30,8 +60,17 @@ static int sizes[] = {
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
};
-static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
+static int check_mte_memory(char *ptr, int size, int mode,
+ int tag_check,int atag_check, int tag_op)
{
+ char buf[MT_GRANULE_SIZE];
+
+ if (!mtefar_support && atag_check == ATAG_CHECK_ON)
+ return KSFT_SKIP;
+
+ if (atag_check == ATAG_CHECK_ON)
+ ptr = mte_insert_atag(ptr);
+
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
memset(ptr, '1', size);
mte_wait_after_trig();
@@ -54,17 +93,34 @@ static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
return KSFT_FAIL;
+ if (tag_op == TAG_OP_STONLY) {
+ mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
+ memcpy(buf, ptr - UNDERFLOW, MT_GRANULE_SIZE);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true)
+ return KSFT_FAIL;
+
+ mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
+ memcpy(buf, ptr + size, MT_GRANULE_SIZE);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true)
+ return KSFT_FAIL;
+ }
+
return KSFT_PASS;
}
-static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
+static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping,
+ int tag_check, int atag_check, int tag_op)
{
char *ptr, *map_ptr;
int run, result, map_size;
- int item = sizeof(sizes)/sizeof(int);
+ int item = ARRAY_SIZE(sizes);
- item = sizeof(sizes)/sizeof(int);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ if (tag_op == TAG_OP_STONLY && !mtestonly_support)
+ return KSFT_SKIP;
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, tag_op);
for (run = 0; run < item; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
@@ -80,23 +136,27 @@ static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, i
munmap((void *)map_ptr, map_size);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, tag_check);
+ result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check, tag_op);
mte_clear_tags((void *)ptr, sizes[run]);
mte_free_memory((void *)map_ptr, map_size, mem_type, false);
- if (result == KSFT_FAIL)
- return KSFT_FAIL;
+ if (result != KSFT_PASS)
+ return result;
}
return KSFT_PASS;
}
-static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
+static int check_file_memory_mapping(int mem_type, int mode, int mapping,
+ int tag_check, int atag_check, int tag_op)
{
char *ptr, *map_ptr;
int run, fd, map_size;
- int total = sizeof(sizes)/sizeof(int);
+ int total = ARRAY_SIZE(sizes);
int result = KSFT_PASS;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ if (tag_op == TAG_OP_STONLY && !mtestonly_support)
+ return KSFT_SKIP;
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, tag_op);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
@@ -118,24 +178,24 @@ static int check_file_memory_mapping(int mem_type, int mode, int mapping, int ta
close(fd);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, tag_check);
+ result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check, tag_op);
mte_clear_tags((void *)ptr, sizes[run]);
munmap((void *)map_ptr, map_size);
close(fd);
- if (result == KSFT_FAIL)
- break;
+ if (result != KSFT_PASS)
+ return result;
}
- return result;
+ return KSFT_PASS;
}
-static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
+static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check)
{
char *ptr, *map_ptr;
int run, prot_flag, result, fd, map_size;
- int total = sizeof(sizes)/sizeof(int);
+ int total = ARRAY_SIZE(sizes);
prot_flag = PROT_READ | PROT_WRITE;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
@@ -151,10 +211,10 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
+ result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check, TAG_OP_ALL);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
if (result != KSFT_PASS)
- return KSFT_FAIL;
+ return result;
fd = create_temp_file();
if (fd == -1)
@@ -175,19 +235,715 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
close(fd);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
+ result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check, TAG_OP_ALL);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
close(fd);
if (result != KSFT_PASS)
- return KSFT_FAIL;
+ return result;
}
return KSFT_PASS;
}
+const char *format_test_name(struct check_mmap_testcase *tc)
+{
+ static char test_name[TEST_NAME_MAX];
+ const char *check_type_str;
+ const char *mem_type_str;
+ const char *sync_str;
+ const char *mapping_str;
+ const char *tag_check_str;
+ const char *atag_check_str;
+ const char *tag_op_str;
+
+ switch (tc->check_type) {
+ case CHECK_ANON_MEM:
+ check_type_str = "anonymous memory";
+ break;
+ case CHECK_FILE_MEM:
+ check_type_str = "file memory";
+ break;
+ case CHECK_CLEAR_PROT_MTE:
+ check_type_str = "clear PROT_MTE flags";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mem_type) {
+ case USE_MMAP:
+ mem_type_str = "mmap";
+ break;
+ case USE_MPROTECT:
+ mem_type_str = "mmap/mprotect";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mte_sync) {
+ case MTE_NONE_ERR:
+ sync_str = "no error";
+ break;
+ case MTE_SYNC_ERR:
+ sync_str = "sync error";
+ break;
+ case MTE_ASYNC_ERR:
+ sync_str = "async error";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mapping) {
+ case MAP_SHARED:
+ mapping_str = "shared";
+ break;
+ case MAP_PRIVATE:
+ mapping_str = "private";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->tag_check) {
+ case TAG_CHECK_ON:
+ tag_check_str = "tag check on";
+ break;
+ case TAG_CHECK_OFF:
+ tag_check_str = "tag check off";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->atag_check) {
+ case ATAG_CHECK_ON:
+ atag_check_str = "with address tag [63:60]";
+ break;
+ case ATAG_CHECK_OFF:
+ atag_check_str = "without address tag [63:60]";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ snprintf(test_name, sizeof(test_name),
+ "Check %s with %s mapping, %s mode, %s memory and %s (%s)\n",
+ check_type_str, mapping_str, sync_str, mem_type_str,
+ tag_check_str, atag_check_str);
+
+ switch (tc->tag_op) {
+ case TAG_OP_ALL:
+ tag_op_str = "";
+ break;
+ case TAG_OP_STONLY:
+ tag_op_str = " / store-only";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ snprintf(test_name, TEST_NAME_MAX,
+ "Check %s with %s mapping, %s mode, %s memory and %s (%s%s)\n",
+ check_type_str, mapping_str, sync_str, mem_type_str,
+ tag_check_str, atag_check_str, tag_op_str);
+
+ return test_name;
+}
+
int main(int argc, char *argv[])
{
- int err;
- int item = sizeof(sizes)/sizeof(int);
+ int err, i;
+ int item = ARRAY_SIZE(sizes);
+ struct check_mmap_testcase test_cases[]= {
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = true,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = true,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_NONE_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_NONE_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ };
err = mte_default_setup();
if (err)
@@ -201,64 +957,51 @@ int main(int argc, char *argv[])
sizes[item - 2] = page_size;
sizes[item - 1] = page_size + 1;
- /* Register signal handlers */
- mte_register_signal(SIGBUS, mte_default_handler);
- mte_register_signal(SIGSEGV, mte_default_handler);
-
/* Set test plan */
- ksft_set_plan(22);
-
- mte_enable_pstate_tco();
-
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n");
-
- mte_disable_pstate_tco();
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check file memory with private mapping, no error mode, mmap/mprotect memory and tag check off\n");
-
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
-
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
-
- evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
- "Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
- evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
- "Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
+ ksft_set_plan(ARRAY_SIZE(test_cases));
+
+ for (i = 0 ; i < ARRAY_SIZE(test_cases); i++) {
+ /* Register signal handlers */
+ mte_register_signal(SIGBUS, mte_default_handler,
+ test_cases[i].atag_check == ATAG_CHECK_ON);
+ mte_register_signal(SIGSEGV, mte_default_handler,
+ test_cases[i].atag_check == ATAG_CHECK_ON);
+
+ if (test_cases[i].enable_tco)
+ mte_enable_pstate_tco();
+ else
+ mte_disable_pstate_tco();
+
+ switch (test_cases[i].check_type) {
+ case CHECK_ANON_MEM:
+ evaluate_test(check_anonymous_memory_mapping(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].tag_check,
+ test_cases[i].atag_check,
+ test_cases[i].tag_op),
+ format_test_name(&test_cases[i]));
+ break;
+ case CHECK_FILE_MEM:
+ evaluate_test(check_file_memory_mapping(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].tag_check,
+ test_cases[i].atag_check,
+ test_cases[i].tag_op),
+ format_test_name(&test_cases[i]));
+ break;
+ case CHECK_CLEAR_PROT_MTE:
+ evaluate_test(check_clear_prot_mte_flag(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].atag_check),
+ format_test_name(&test_cases[i]));
+ break;
+ default:
+ exit(KSFT_FAIL);
+ }
+ }
mte_restore_setup();
ksft_print_cnts();
diff --git a/tools/testing/selftests/arm64/mte/check_prctl.c b/tools/testing/selftests/arm64/mte/check_prctl.c
new file mode 100644
index 000000000000..f7f320defa7b
--- /dev/null
+++ b/tools/testing/selftests/arm64/mte/check_prctl.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 ARM Limited
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+
+#include <asm/hwcap.h>
+
+#include "kselftest.h"
+
+#ifndef AT_HWCAP3
+#define AT_HWCAP3 29
+#endif
+
+static int set_tagged_addr_ctrl(int val)
+{
+ int ret;
+
+ ret = prctl(PR_SET_TAGGED_ADDR_CTRL, val, 0, 0, 0);
+ if (ret < 0)
+ ksft_print_msg("PR_SET_TAGGED_ADDR_CTRL: failed %d %d (%s)\n",
+ ret, errno, strerror(errno));
+ return ret;
+}
+
+static int get_tagged_addr_ctrl(void)
+{
+ int ret;
+
+ ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+ if (ret < 0)
+ ksft_print_msg("PR_GET_TAGGED_ADDR_CTRL failed: %d %d (%s)\n",
+ ret, errno, strerror(errno));
+ return ret;
+}
+
+/*
+ * Read the current mode without having done any configuration, should
+ * run first.
+ */
+void check_basic_read(void)
+{
+ int ret;
+
+ ret = get_tagged_addr_ctrl();
+ if (ret < 0) {
+ ksft_test_result_fail("check_basic_read\n");
+ return;
+ }
+
+ if (ret & PR_MTE_TCF_SYNC)
+ ksft_print_msg("SYNC enabled\n");
+ if (ret & PR_MTE_TCF_ASYNC)
+ ksft_print_msg("ASYNC enabled\n");
+
+ /* Any configuration is valid */
+ ksft_test_result_pass("check_basic_read\n");
+}
+
+/*
+ * Attempt to set a specified combination of modes.
+ */
+void set_mode_test(const char *name, int hwcap2, int hwcap3, int mask)
+{
+ int ret;
+
+ if ((getauxval(AT_HWCAP2) & hwcap2) != hwcap2) {
+ ksft_test_result_skip("%s\n", name);
+ return;
+ }
+
+ if ((getauxval(AT_HWCAP3) & hwcap3) != hwcap3) {
+ ksft_test_result_skip("%s\n", name);
+ return;
+ }
+
+ ret = set_tagged_addr_ctrl(mask);
+ if (ret < 0) {
+ ksft_test_result_fail("%s\n", name);
+ return;
+ }
+
+ ret = get_tagged_addr_ctrl();
+ if (ret < 0) {
+ ksft_test_result_fail("%s\n", name);
+ return;
+ }
+
+ if ((ret & (PR_MTE_TCF_MASK | PR_MTE_STORE_ONLY)) == mask) {
+ ksft_test_result_pass("%s\n", name);
+ } else {
+ ksft_print_msg("Got %x, expected %x\n",
+ (ret & (int)PR_MTE_TCF_MASK), mask);
+ ksft_test_result_fail("%s\n", name);
+ }
+}
+
+struct mte_mode {
+ int mask;
+ int hwcap2;
+ int hwcap3;
+ const char *name;
+} mte_modes[] = {
+ { PR_MTE_TCF_NONE, 0, 0, "NONE" },
+ { PR_MTE_TCF_SYNC, HWCAP2_MTE, 0, "SYNC" },
+ { PR_MTE_TCF_ASYNC, HWCAP2_MTE, 0, "ASYNC" },
+ { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, 0, "SYNC+ASYNC" },
+ { PR_MTE_TCF_SYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "SYNC+STONLY" },
+ { PR_MTE_TCF_ASYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "ASYNC+STONLY" },
+ { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "SYNC+ASYNC+STONLY" },
+};
+
+int main(void)
+{
+ int i;
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(mte_modes));
+
+ check_basic_read();
+ for (i = 0; i < ARRAY_SIZE(mte_modes); i++)
+ set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2, mte_modes[i].hwcap3,
+ mte_modes[i].mask);
+
+ ksft_print_cnts();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
index deaef1f61076..4b764f2a8185 100644
--- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
+++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
@@ -23,10 +23,13 @@ static int verify_mte_pointer_validity(char *ptr, int mode)
{
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
/* Check the validity of the tagged pointer */
- memset((void *)ptr, '1', BUFFER_SIZE);
+ memset(ptr, '1', BUFFER_SIZE);
mte_wait_after_trig();
- if (cur_mte_cxt.fault_valid)
+ if (cur_mte_cxt.fault_valid) {
+ ksft_print_msg("Unexpected fault recorded for %p-%p in mode %x\n",
+ ptr, ptr + BUFFER_SIZE, mode);
return KSFT_FAIL;
+ }
/* Proceed further for nonzero tags */
if (!MT_FETCH_TAG((uintptr_t)ptr))
return KSFT_PASS;
@@ -34,30 +37,35 @@ static int verify_mte_pointer_validity(char *ptr, int mode)
/* Check the validity outside the range */
ptr[BUFFER_SIZE] = '2';
mte_wait_after_trig();
- if (!cur_mte_cxt.fault_valid)
+ if (!cur_mte_cxt.fault_valid) {
+ ksft_print_msg("No valid fault recorded for %p in mode %x\n",
+ ptr, mode);
return KSFT_FAIL;
- else
+ } else {
return KSFT_PASS;
+ }
}
static int check_single_included_tags(int mem_type, int mode)
{
char *ptr;
- int tag, run, result = KSFT_PASS;
+ int tag, run, ret, result = KSFT_PASS;
- ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
+ ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) {
- mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag));
+ ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag), false);
+ if (ret != 0)
+ result = KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
- ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
+ ptr = mte_insert_tags(ptr, BUFFER_SIZE);
/* Check tag value */
if (MT_FETCH_TAG((uintptr_t)ptr) == tag) {
- ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
+ ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%x\n",
MT_FETCH_TAG((uintptr_t)ptr),
MT_INCLUDE_VALID_TAG(tag));
result = KSFT_FAIL;
@@ -66,7 +74,7 @@ static int check_single_included_tags(int mem_type, int mode)
result = verify_mte_pointer_validity(ptr, mode);
}
}
- mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
+ mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
@@ -76,20 +84,20 @@ static int check_multiple_included_tags(int mem_type, int mode)
int tag, run, result = KSFT_PASS;
unsigned long excl_mask = 0;
- ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
+ ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
for (tag = 0; (tag < MT_TAG_COUNT - 1) && (result == KSFT_PASS); tag++) {
excl_mask |= 1 << tag;
- mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask));
+ mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask), false);
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
- ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
+ ptr = mte_insert_tags(ptr, BUFFER_SIZE);
/* Check tag value */
if (MT_FETCH_TAG((uintptr_t)ptr) < tag) {
- ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
+ ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%lx\n",
MT_FETCH_TAG((uintptr_t)ptr),
MT_INCLUDE_VALID_TAGS(excl_mask));
result = KSFT_FAIL;
@@ -98,21 +106,23 @@ static int check_multiple_included_tags(int mem_type, int mode)
result = verify_mte_pointer_validity(ptr, mode);
}
}
- mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
+ mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
static int check_all_included_tags(int mem_type, int mode)
{
char *ptr;
- int run, result = KSFT_PASS;
+ int run, ret, result = KSFT_PASS;
- ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
+ ptr = mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
- mte_switch_mode(mode, MT_INCLUDE_TAG_MASK);
+ ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK, false);
+ if (ret != 0)
+ return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
@@ -122,20 +132,22 @@ static int check_all_included_tags(int mem_type, int mode)
*/
result = verify_mte_pointer_validity(ptr, mode);
}
- mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
+ mte_free_memory_tag_range(ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
return result;
}
static int check_none_included_tags(int mem_type, int mode)
{
char *ptr;
- int run;
+ int run, ret;
- ptr = (char *)mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false);
+ ptr = mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false);
if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
- mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK);
+ ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK, false);
+ if (ret != 0)
+ return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; run < RUNS; run++) {
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
@@ -147,12 +159,12 @@ static int check_none_included_tags(int mem_type, int mode)
}
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
/* Check the write validity of the untagged pointer */
- memset((void *)ptr, '1', BUFFER_SIZE);
+ memset(ptr, '1', BUFFER_SIZE);
mte_wait_after_trig();
if (cur_mte_cxt.fault_valid)
break;
}
- mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, false);
+ mte_free_memory(ptr, BUFFER_SIZE, mem_type, false);
if (cur_mte_cxt.fault_valid)
return KSFT_FAIL;
else
@@ -168,7 +180,7 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(4);
diff --git a/tools/testing/selftests/arm64/mte/check_user_mem.c b/tools/testing/selftests/arm64/mte/check_user_mem.c
index 4bfa80f2a8c3..fb7936c4e097 100644
--- a/tools/testing/selftests/arm64/mte/check_user_mem.c
+++ b/tools/testing/selftests/arm64/mte/check_user_mem.c
@@ -3,6 +3,7 @@
#define _GNU_SOURCE
+#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -11,6 +12,7 @@
#include <string.h>
#include <ucontext.h>
#include <unistd.h>
+#include <sys/uio.h>
#include <sys/mman.h>
#include "kselftest.h"
@@ -19,21 +21,36 @@
static size_t page_sz;
-static int check_usermem_access_fault(int mem_type, int mode, int mapping)
+#define TEST_NAME_MAX 100
+
+enum test_type {
+ READ_TEST,
+ WRITE_TEST,
+ READV_TEST,
+ WRITEV_TEST,
+ LAST_TEST,
+};
+
+static int check_usermem_access_fault(int mem_type, int mode, int mapping,
+ int tag_offset, int tag_len,
+ enum test_type test_type)
{
int fd, i, err;
char val = 'A';
- size_t len, read_len;
+ ssize_t len, syscall_len;
void *ptr, *ptr_next;
+ int fileoff, ptroff, size;
+ int sizes[] = {1, 2, 3, 8, 16, 32, 4096, page_sz};
- err = KSFT_FAIL;
+ err = KSFT_PASS;
len = 2 * page_sz;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
for (i = 0; i < len; i++)
- write(fd, &val, sizeof(val));
+ if (write(fd, &val, sizeof(val)) != sizeof(val))
+ return KSFT_FAIL;
lseek(fd, 0, 0);
ptr = mte_allocate_memory(len, mem_type, mapping, true);
if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
@@ -42,9 +59,9 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping)
}
mte_initialize_current_context(mode, (uintptr_t)ptr, len);
/* Copy from file into buffer with valid tag */
- read_len = read(fd, ptr, len);
+ syscall_len = read(fd, ptr, len);
mte_wait_after_trig();
- if (cur_mte_cxt.fault_valid || read_len < len)
+ if (cur_mte_cxt.fault_valid || syscall_len < len)
goto usermem_acc_err;
/* Verify same pattern is read */
for (i = 0; i < len; i++)
@@ -53,36 +70,136 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping)
if (i < len)
goto usermem_acc_err;
- /* Tag the next half of memory with different value */
- ptr_next = (void *)((unsigned long)ptr + page_sz);
+ if (!tag_len)
+ tag_len = len - tag_offset;
+ /* Tag a part of memory with different value */
+ ptr_next = (void *)((unsigned long)ptr + tag_offset);
ptr_next = mte_insert_new_tag(ptr_next);
- mte_set_tag_address_range(ptr_next, page_sz);
+ mte_set_tag_address_range(ptr_next, tag_len);
- lseek(fd, 0, 0);
- /* Copy from file into buffer with invalid tag */
- read_len = read(fd, ptr, len);
- mte_wait_after_trig();
- /*
- * Accessing user memory in kernel with invalid tag should fail in sync
- * mode without fault but may not fail in async mode as per the
- * implemented MTE userspace support in Arm64 kernel.
- */
- if (mode == MTE_SYNC_ERR &&
- !cur_mte_cxt.fault_valid && read_len < len) {
- err = KSFT_PASS;
- } else if (mode == MTE_ASYNC_ERR &&
- !cur_mte_cxt.fault_valid && read_len == len) {
- err = KSFT_PASS;
+ for (fileoff = 0; fileoff < 16; fileoff++) {
+ for (ptroff = 0; ptroff < 16; ptroff++) {
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ size = sizes[i];
+ lseek(fd, 0, 0);
+
+ /* perform file operation on buffer with invalid tag */
+ switch (test_type) {
+ case READ_TEST:
+ syscall_len = read(fd, ptr + ptroff, size);
+ break;
+ case WRITE_TEST:
+ syscall_len = write(fd, ptr + ptroff, size);
+ break;
+ case READV_TEST: {
+ struct iovec iov[1];
+ iov[0].iov_base = ptr + ptroff;
+ iov[0].iov_len = size;
+ syscall_len = readv(fd, iov, 1);
+ break;
+ }
+ case WRITEV_TEST: {
+ struct iovec iov[1];
+ iov[0].iov_base = ptr + ptroff;
+ iov[0].iov_len = size;
+ syscall_len = writev(fd, iov, 1);
+ break;
+ }
+ case LAST_TEST:
+ goto usermem_acc_err;
+ }
+
+ mte_wait_after_trig();
+ /*
+ * Accessing user memory in kernel with invalid tag should fail in sync
+ * mode without fault but may not fail in async mode as per the
+ * implemented MTE userspace support in Arm64 kernel.
+ */
+ if (cur_mte_cxt.fault_valid) {
+ goto usermem_acc_err;
+ }
+ if (mode == MTE_SYNC_ERR && syscall_len < len) {
+ /* test passed */
+ } else if (mode == MTE_ASYNC_ERR && syscall_len == size) {
+ /* test passed */
+ } else {
+ goto usermem_acc_err;
+ }
+ }
+ }
}
+
+ goto exit;
+
usermem_acc_err:
+ err = KSFT_FAIL;
+exit:
mte_free_memory((void *)ptr, len, mem_type, true);
close(fd);
return err;
}
+void format_test_name(char* name, int name_len, int type, int sync, int map, int len, int offset) {
+ const char* test_type;
+ const char* mte_type;
+ const char* map_type;
+
+ switch (type) {
+ case READ_TEST:
+ test_type = "read";
+ break;
+ case WRITE_TEST:
+ test_type = "write";
+ break;
+ case READV_TEST:
+ test_type = "readv";
+ break;
+ case WRITEV_TEST:
+ test_type = "writev";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (sync) {
+ case MTE_SYNC_ERR:
+ mte_type = "MTE_SYNC_ERR";
+ break;
+ case MTE_ASYNC_ERR:
+ mte_type = "MTE_ASYNC_ERR";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (map) {
+ case MAP_SHARED:
+ map_type = "MAP_SHARED";
+ break;
+ case MAP_PRIVATE:
+ map_type = "MAP_PRIVATE";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ snprintf(name, name_len,
+ "test type: %s, %s, %s, tag len: %d, tag offset: %d\n",
+ test_type, mte_type, map_type, len, offset);
+}
+
int main(int argc, char *argv[])
{
int err;
+ int t, s, m, l, o;
+ int mte_sync[] = {MTE_SYNC_ERR, MTE_ASYNC_ERR};
+ int maps[] = {MAP_SHARED, MAP_PRIVATE};
+ int tag_lens[] = {0, MT_GRANULE_SIZE};
+ int tag_offsets[] = {page_sz, MT_GRANULE_SIZE};
+ char test_name[TEST_NAME_MAX];
page_sz = getpagesize();
if (!page_sz) {
@@ -94,20 +211,31 @@ int main(int argc, char *argv[])
return err;
/* Register signal handlers */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
- ksft_set_plan(4);
+ ksft_set_plan(64);
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
- "Check memory access from kernel in sync mode, private mapping and mmap memory\n");
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
- "Check memory access from kernel in sync mode, shared mapping and mmap memory\n");
-
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
- "Check memory access from kernel in async mode, private mapping and mmap memory\n");
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
- "Check memory access from kernel in async mode, shared mapping and mmap memory\n");
+ for (t = 0; t < LAST_TEST; t++) {
+ for (s = 0; s < ARRAY_SIZE(mte_sync); s++) {
+ for (m = 0; m < ARRAY_SIZE(maps); m++) {
+ for (l = 0; l < ARRAY_SIZE(tag_lens); l++) {
+ for (o = 0; o < ARRAY_SIZE(tag_offsets); o++) {
+ int sync = mte_sync[s];
+ int map = maps[m];
+ int offset = tag_offsets[o];
+ int tag_len = tag_lens[l];
+ int res = check_usermem_access_fault(USE_MMAP, sync,
+ map, offset,
+ tag_len, t);
+ format_test_name(test_name, TEST_NAME_MAX,
+ t, sync, map, tag_len, offset);
+ evaluate_test(res, test_name);
+ }
+ }
+ }
+ }
+ }
mte_restore_setup();
ksft_print_cnts();
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index 39f8908988ea..397e57dd946a 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -6,6 +6,7 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <time.h>
#include <unistd.h>
#include <linux/auxvec.h>
@@ -19,35 +20,64 @@
#include "mte_common_util.h"
#include "mte_def.h"
+#ifndef SA_EXPOSE_TAGBITS
+#define SA_EXPOSE_TAGBITS 0x00000800
+#endif
+
#define INIT_BUFFER_SIZE 256
struct mte_fault_cxt cur_mte_cxt;
+bool mtefar_support;
+bool mtestonly_support;
static unsigned int mte_cur_mode;
static unsigned int mte_cur_pstate_tco;
+static bool mte_cur_stonly;
void mte_default_handler(int signum, siginfo_t *si, void *uc)
{
+ struct sigaction sa;
unsigned long addr = (unsigned long)si->si_addr;
+ unsigned char si_tag, si_atag;
+
+ sigaction(signum, NULL, &sa);
+
+ if (sa.sa_flags & SA_EXPOSE_TAGBITS) {
+ si_tag = MT_FETCH_TAG(addr);
+ si_atag = MT_FETCH_ATAG(addr);
+ addr = MT_CLEAR_TAGS(addr);
+ } else {
+ si_tag = 0;
+ si_atag = 0;
+ }
if (signum == SIGSEGV) {
#ifdef DEBUG
- ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
- ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
+ ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx, si_tag=%x, si_atag=%x\n",
+ ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code, si_tag, si_atag);
#endif
if (si->si_code == SEGV_MTEAERR) {
if (cur_mte_cxt.trig_si_code == si->si_code)
cur_mte_cxt.fault_valid = true;
+ else
+ ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=%llx, fault addr=%lx\n",
+ ((ucontext_t *)uc)->uc_mcontext.pc,
+ addr);
return;
}
/* Compare the context for precise error */
else if (si->si_code == SEGV_MTESERR) {
+ if ((!mtefar_support && si_atag) || (si_atag != MT_FETCH_ATAG(cur_mte_cxt.trig_addr))) {
+ ksft_print_msg("Invalid MTE synchronous exception caught for address tag! si_tag=%x, si_atag: %x\n", si_tag, si_atag);
+ exit(KSFT_FAIL);
+ }
+
if (cur_mte_cxt.trig_si_code == si->si_code &&
((cur_mte_cxt.trig_range >= 0 &&
- addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
+ addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
- addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
+ addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
@@ -60,14 +90,14 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
exit(1);
}
} else if (signum == SIGBUS) {
- ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
+ ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n",
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
if ((cur_mte_cxt.trig_range >= 0 &&
- addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
+ addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
- addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
+ addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
@@ -75,12 +105,17 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
}
}
-void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
+void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
+ bool export_tags)
{
struct sigaction sa;
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO;
+
+ if (export_tags && signal == SIGSEGV)
+ sa.sa_flags |= SA_EXPOSE_TAGBITS;
+
sigemptyset(&sa.sa_mask);
sigaction(signal, &sa, NULL);
}
@@ -96,7 +131,7 @@ void *mte_insert_tags(void *ptr, size_t size)
int align_size;
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
- ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
+ ksft_print_msg("FAIL: Addr=%p: invalid\n", ptr);
return NULL;
}
align_size = MT_ALIGN_UP(size);
@@ -108,7 +143,7 @@ void *mte_insert_tags(void *ptr, size_t size)
void mte_clear_tags(void *ptr, size_t size)
{
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
- ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
+ ksft_print_msg("FAIL: Addr=%p: invalid\n", ptr);
return;
}
size = MT_ALIGN_UP(size);
@@ -116,6 +151,19 @@ void mte_clear_tags(void *ptr, size_t size)
mte_clear_tag_address_range(ptr, size);
}
+void *mte_insert_atag(void *ptr)
+{
+ unsigned char atag;
+
+ atag = mtefar_support ? (random() % MT_ATAG_MASK) + 1 : 0;
+ return (void *)MT_SET_ATAG((unsigned long)ptr, atag);
+}
+
+void *mte_clear_atag(void *ptr)
+{
+ return (void *)MT_CLEAR_ATAG((unsigned long)ptr);
+}
+
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
size_t range_before, size_t range_after,
bool tags, int fd)
@@ -124,13 +172,16 @@ static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
int prot_flag, map_flag;
size_t entire_size = size + range_before + range_after;
- if (mem_type != USE_MALLOC && mem_type != USE_MMAP &&
- mem_type != USE_MPROTECT) {
+ switch (mem_type) {
+ case USE_MALLOC:
+ return malloc(entire_size) + range_before;
+ case USE_MMAP:
+ case USE_MPROTECT:
+ break;
+ default:
ksft_print_msg("FAIL: Invalid allocate request\n");
return NULL;
}
- if (mem_type == USE_MALLOC)
- return malloc(entire_size) + range_before;
prot_flag = PROT_READ | PROT_WRITE;
if (mem_type == USE_MMAP)
@@ -143,13 +194,13 @@ static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
map_flag |= MAP_PRIVATE;
ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0);
if (ptr == MAP_FAILED) {
- ksft_print_msg("FAIL: mmap allocation\n");
+ ksft_perror("mmap()");
return NULL;
}
if (mem_type == USE_MPROTECT) {
if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) {
+ ksft_perror("mprotect(PROT_MTE)");
munmap(ptr, size);
- ksft_print_msg("FAIL: mprotect PROT_MTE property\n");
return NULL;
}
}
@@ -181,10 +232,17 @@ void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags
}
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
- for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE)
- write(fd, buffer, INIT_BUFFER_SIZE);
+ for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) {
+ if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+ ksft_perror("initialising buffer");
+ return NULL;
+ }
+ }
index -= INIT_BUFFER_SIZE;
- write(fd, buffer, size - index);
+ if (write(fd, buffer, size - index) != size - index) {
+ ksft_perror("initialising buffer");
+ return NULL;
+ }
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
}
@@ -202,9 +260,15 @@ void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
/* Initialize the file for mappable size */
lseek(fd, 0, SEEK_SET);
for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
- write(fd, buffer, INIT_BUFFER_SIZE);
+ if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+ ksft_perror("initialising buffer");
+ return NULL;
+ }
index -= INIT_BUFFER_SIZE;
- write(fd, buffer, map_size - index);
+ if (write(fd, buffer, map_size - index) != map_size - index) {
+ ksft_perror("initialising buffer");
+ return NULL;
+ }
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
range_after, true, fd);
}
@@ -252,51 +316,68 @@ void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
cur_mte_cxt.trig_si_code = 0;
}
-int mte_switch_mode(int mte_option, unsigned long incl_mask)
+int mte_switch_mode(int mte_option, unsigned long incl_mask, bool stonly)
{
unsigned long en = 0;
- if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR ||
- mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) {
- ksft_print_msg("FAIL: Invalid mte config option\n");
+ switch (mte_option) {
+ case MTE_NONE_ERR:
+ case MTE_SYNC_ERR:
+ case MTE_ASYNC_ERR:
+ break;
+ default:
+ ksft_print_msg("FAIL: Invalid MTE option %x\n", mte_option);
return -EINVAL;
}
+
+ if (incl_mask & ~MT_INCLUDE_TAG_MASK) {
+ ksft_print_msg("FAIL: Invalid incl_mask %lx\n", incl_mask);
+ return -EINVAL;
+ }
+
en = PR_TAGGED_ADDR_ENABLE;
- if (mte_option == MTE_SYNC_ERR)
+ switch (mte_option) {
+ case MTE_SYNC_ERR:
en |= PR_MTE_TCF_SYNC;
- else if (mte_option == MTE_ASYNC_ERR)
+ break;
+ case MTE_ASYNC_ERR:
en |= PR_MTE_TCF_ASYNC;
- else if (mte_option == MTE_NONE_ERR)
+ break;
+ case MTE_NONE_ERR:
en |= PR_MTE_TCF_NONE;
+ break;
+ }
+
+ if (mtestonly_support && stonly)
+ en |= PR_MTE_STORE_ONLY;
en |= (incl_mask << PR_MTE_TAG_SHIFT);
/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
- if (!prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) == 0) {
+ if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
return -EINVAL;
}
return 0;
}
-#define ID_AA64PFR1_MTE_SHIFT 8
-#define ID_AA64PFR1_MTE 2
-
int mte_default_setup(void)
{
- unsigned long hwcaps = getauxval(AT_HWCAP);
+ unsigned long hwcaps2 = getauxval(AT_HWCAP2);
+ unsigned long hwcaps3 = getauxval(AT_HWCAP3);
unsigned long en = 0;
int ret;
- if (!(hwcaps & HWCAP_CPUID)) {
- ksft_print_msg("FAIL: CPUID registers unavailable\n");
- return KSFT_FAIL;
- }
- /* Read ID_AA64PFR1_EL1 register */
- asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
- if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
- ksft_print_msg("FAIL: MTE features unavailable\n");
- return KSFT_SKIP;
- }
+ /* To generate random address tag */
+ srandom(time(NULL));
+
+ if (!(hwcaps2 & HWCAP2_MTE))
+ ksft_exit_skip("MTE features unavailable\n");
+
+ mtefar_support = !!(hwcaps3 & HWCAP3_MTE_FAR);
+
+ if (hwcaps3 & HWCAP3_MTE_STORE_ONLY)
+ mtestonly_support = true;
+
/* Get current mte mode */
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
if (ret < 0) {
@@ -310,6 +391,8 @@ int mte_default_setup(void)
else if (ret & PR_MTE_TCF_NONE)
mte_cur_mode = MTE_NONE_ERR;
+ mte_cur_stonly = (ret & PR_MTE_STORE_ONLY) ? true : false;
+
mte_cur_pstate_tco = mte_get_pstate_tco();
/* Disable PSTATE.TCO */
mte_disable_pstate_tco();
@@ -318,7 +401,7 @@ int mte_default_setup(void)
void mte_restore_setup(void)
{
- mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG, mte_cur_stonly);
if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
mte_enable_pstate_tco();
else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
@@ -333,6 +416,7 @@ int create_temp_file(void)
/* Create a file in the tmpfs filesystem */
fd = mkstemp(&filename[0]);
if (fd == -1) {
+ ksft_perror(filename);
ksft_print_msg("FAIL: Unable to open temporary file\n");
return 0;
}
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.h b/tools/testing/selftests/arm64/mte/mte_common_util.h
index 195a7d1879e6..250d671329a5 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.h
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.h
@@ -37,10 +37,13 @@ struct mte_fault_cxt {
};
extern struct mte_fault_cxt cur_mte_cxt;
+extern bool mtefar_support;
+extern bool mtestonly_support;
/* MTE utility functions */
void mte_default_handler(int signum, siginfo_t *si, void *uc);
-void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *));
+void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
+ bool export_tags);
void mte_wait_after_trig(void);
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
@@ -54,9 +57,11 @@ void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
size_t range_before, size_t range_after);
void *mte_insert_tags(void *ptr, size_t size);
void mte_clear_tags(void *ptr, size_t size);
+void *mte_insert_atag(void *ptr);
+void *mte_clear_atag(void *ptr);
int mte_default_setup(void);
void mte_restore_setup(void);
-int mte_switch_mode(int mte_option, unsigned long incl_mask);
+int mte_switch_mode(int mte_option, unsigned long incl_mask, bool stonly);
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range);
/* Common utility functions */
@@ -75,10 +80,21 @@ unsigned int mte_get_pstate_tco(void);
/* Test framework static inline functions/macros */
static inline void evaluate_test(int err, const char *msg)
{
- if (err == KSFT_PASS)
- ksft_test_result_pass(msg);
- else if (err == KSFT_FAIL)
- ksft_test_result_fail(msg);
+ switch (err) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s", msg);
+ break;
+ case KSFT_FAIL:
+ ksft_test_result_fail("%s", msg);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s", msg);
+ break;
+ default:
+ ksft_test_result_error("Unknown return code %d from %s",
+ err, msg);
+ break;
+ }
}
static inline int check_allocated_memory(void *ptr, size_t size,
diff --git a/tools/testing/selftests/arm64/mte/mte_def.h b/tools/testing/selftests/arm64/mte/mte_def.h
index 9b188254b61a..6ad22f07c9b8 100644
--- a/tools/testing/selftests/arm64/mte/mte_def.h
+++ b/tools/testing/selftests/arm64/mte/mte_def.h
@@ -42,6 +42,8 @@
#define MT_TAG_COUNT 16
#define MT_INCLUDE_TAG_MASK 0xFFFF
#define MT_EXCLUDE_TAG_MASK 0x0
+#define MT_ATAG_SHIFT 60
+#define MT_ATAG_MASK 0xFUL
#define MT_ALIGN_GRANULE (MT_GRANULE_SIZE - 1)
#define MT_CLEAR_TAG(x) ((x) & ~(MT_TAG_MASK << MT_TAG_SHIFT))
@@ -49,6 +51,12 @@
#define MT_FETCH_TAG(x) ((x >> MT_TAG_SHIFT) & (MT_TAG_MASK))
#define MT_ALIGN_UP(x) ((x + MT_ALIGN_GRANULE) & ~(MT_ALIGN_GRANULE))
+#define MT_CLEAR_ATAG(x) ((x) & ~(MT_TAG_MASK << MT_ATAG_SHIFT))
+#define MT_SET_ATAG(x, y) ((x) | (((y) & MT_ATAG_MASK) << MT_ATAG_SHIFT))
+#define MT_FETCH_ATAG(x) ((x >> MT_ATAG_SHIFT) & (MT_ATAG_MASK))
+
+#define MT_CLEAR_TAGS(x) (MT_CLEAR_ATAG(MT_CLEAR_TAG(x)))
+
#define MT_PSTATE_TCO_SHIFT 25
#define MT_PSTATE_TCO_MASK ~(0x1 << MT_PSTATE_TCO_SHIFT)
#define MT_PSTATE_TCO_EN 1
diff --git a/tools/testing/selftests/arm64/mte/mte_helper.S b/tools/testing/selftests/arm64/mte/mte_helper.S
index a02c04cd0aac..a55dbbc56ed1 100644
--- a/tools/testing/selftests/arm64/mte/mte_helper.S
+++ b/tools/testing/selftests/arm64/mte/mte_helper.S
@@ -3,6 +3,8 @@
#include "mte_def.h"
+.arch armv8.5-a+memtag
+
#define ENTRY(name) \
.globl name ;\
.p2align 2;\
diff --git a/tools/testing/selftests/arm64/pauth/Makefile b/tools/testing/selftests/arm64/pauth/Makefile
index 72e290b0b10c..b5a1c80e0ead 100644
--- a/tools/testing/selftests/arm64/pauth/Makefile
+++ b/tools/testing/selftests/arm64/pauth/Makefile
@@ -7,8 +7,14 @@ CC := $(CROSS_COMPILE)gcc
endif
CFLAGS += -mbranch-protection=pac-ret
+
+# All supported LLVMs have PAC, test for GCC
+ifeq ($(LLVM),1)
+pauth_cc_support := 1
+else
# check if the compiler supports ARMv8.3 and branch protection with PAuth
pauth_cc_support := $(shell if ($(CC) $(CFLAGS) -march=armv8.3-a -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
+endif
ifeq ($(pauth_cc_support),1)
TEST_GEN_PROGS := pac
diff --git a/tools/testing/selftests/arm64/pauth/exec_target.c b/tools/testing/selftests/arm64/pauth/exec_target.c
index 4435600ca400..e597861b26d6 100644
--- a/tools/testing/selftests/arm64/pauth/exec_target.c
+++ b/tools/testing/selftests/arm64/pauth/exec_target.c
@@ -13,7 +13,12 @@ int main(void)
unsigned long hwcaps;
size_t val;
- fread(&val, sizeof(size_t), 1, stdin);
+ size_t size = fread(&val, sizeof(size_t), 1, stdin);
+
+ if (size != 1) {
+ fprintf(stderr, "Could not read input from stdin\n");
+ return EXIT_FAILURE;
+ }
/* don't try to execute illegal (unimplemented) instructions) caller
* should have checked this and keep worker simple
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
index 592fe538506e..67d138057707 100644
--- a/tools/testing/selftests/arm64/pauth/pac.c
+++ b/tools/testing/selftests/arm64/pauth/pac.c
@@ -10,10 +10,10 @@
#include <setjmp.h>
#include <sched.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "helper.h"
-#define PAC_COLLISION_ATTEMPTS 10
+#define PAC_COLLISION_ATTEMPTS 1000
/*
* The kernel sets TBID by default. So bits 55 and above should remain
* untouched no matter what.
@@ -25,13 +25,15 @@
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACA)) \
+ SKIP(return, "PAUTH not enabled"); \
} while (0)
#define ASSERT_GENERIC_PAUTH_ENABLED() \
do { \
unsigned long hwcaps = getauxval(AT_HWCAP); \
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
+ if (!(hwcaps & HWCAP_PACG)) \
+ SKIP(return, "Generic PAUTH not enabled"); \
} while (0)
void sign_specific(struct signatures *sign, size_t val)
@@ -180,6 +182,9 @@ int exec_sign_all(struct signatures *signed_vals, size_t val)
return -1;
}
+ close(new_stdin[1]);
+ close(new_stdout[0]);
+
return 0;
}
@@ -256,7 +261,7 @@ TEST(single_thread_different_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
@@ -299,7 +304,7 @@ TEST(exec_changed_keys)
unsigned long hwcaps = getauxval(AT_HWCAP);
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
+ ASSERT_PAUTH_ENABLED();
if (!(hwcaps & HWCAP_PACG)) {
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
nkeys = NKEYS - 1;
diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore
index 78c902045ca7..b257db665a35 100644
--- a/tools/testing/selftests/arm64/signal/.gitignore
+++ b/tools/testing/selftests/arm64/signal/.gitignore
@@ -1,4 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
mangle_*
fake_sigreturn_*
+fpmr_*
+poe_*
+gcs_*
+sme_*
+ssve_*
+sve_*
+tpidr2_*
+za_*
+zt_*
!*.[ch]
diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile
index ac4ad0005715..1381039fb36f 100644
--- a/tools/testing/selftests/arm64/signal/Makefile
+++ b/tools/testing/selftests/arm64/signal/Makefile
@@ -2,7 +2,7 @@
# Copyright (C) 2019 ARM Limited
# Additional include paths needed by kselftest.h and local headers
-CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
+CFLAGS += -std=gnu99 -I.
SRCS := $(filter-out testcases/testcases.c,$(wildcard testcases/*.c))
PROGS := $(patsubst %.c,%,$(SRCS))
@@ -11,7 +11,6 @@ PROGS := $(patsubst %.c,%,$(SRCS))
TEST_GEN_PROGS := $(notdir $(PROGS))
# Get Kernel headers installed and use them.
-KSFT_KHDR_INSTALL := 1
# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
# to account for any OUTPUT target-dirs optionally provided by
@@ -23,6 +22,10 @@ $(TEST_GEN_PROGS): $(PROGS)
# Common test-unit targets to build common-layout test-cases executables
# Needs secondary expansion to properly include the testcase c-file in pre-reqs
+COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \
+ signals.S sve_helpers.c
+COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h
+
.SECONDEXPANSION:
-$(PROGS): test_signals.c test_signals_utils.c testcases/testcases.c signals.S $$@.c test_signals.h test_signals_utils.h testcases/testcases.h
- $(CC) $(CFLAGS) $^ -o $@
+$(PROGS): $$@.c ${COMMON_SOURCES} ${COMMON_HEADERS}
+ $(CC) $(CFLAGS) ${@}.c ${COMMON_SOURCES} -o $@
diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.c b/tools/testing/selftests/arm64/signal/sve_helpers.c
new file mode 100644
index 000000000000..0acc121af306
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/sve_helpers.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 ARM Limited
+ *
+ * Common helper functions for SVE and SME functionality.
+ */
+
+#include <stdbool.h>
+#include <kselftest.h>
+#include <asm/sigcontext.h>
+#include <sys/prctl.h>
+
+unsigned int vls[SVE_VQ_MAX];
+unsigned int nvls;
+
+int sve_fill_vls(bool use_sme, int min_vls)
+{
+ int vq, vl;
+ int pr_set_vl = use_sme ? PR_SME_SET_VL : PR_SVE_SET_VL;
+ int len_mask = use_sme ? PR_SME_VL_LEN_MASK : PR_SVE_VL_LEN_MASK;
+
+ /*
+ * Enumerate up to SVE_VQ_MAX vector lengths
+ */
+ for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+ vl = prctl(pr_set_vl, vq * 16);
+ if (vl == -1)
+ return KSFT_FAIL;
+
+ vl &= len_mask;
+
+ /*
+ * Unlike SVE, SME does not require the minimum vector length
+ * to be implemented, or the VLs to be consecutive, so any call
+ * to the prctl might return the single implemented VL, which
+ * might be larger than 16. So to avoid this loop never
+ * terminating, bail out here when we find a higher VL than
+ * we asked for.
+ * See the ARM ARM, DDI 0487K.a, B1.4.2: I_QQRNR and I_NWYBP.
+ */
+ if (vq < sve_vq_from_vl(vl))
+ break;
+
+ /* Skip missing VLs */
+ vq = sve_vq_from_vl(vl);
+
+ vls[nvls++] = vl;
+ }
+
+ if (nvls < min_vls) {
+ fprintf(stderr, "Only %d VL supported\n", nvls);
+ return KSFT_SKIP;
+ }
+
+ return KSFT_PASS;
+}
diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.h b/tools/testing/selftests/arm64/signal/sve_helpers.h
new file mode 100644
index 000000000000..ca133b93375f
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/sve_helpers.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 ARM Limited
+ *
+ * Common helper functions for SVE and SME functionality.
+ */
+
+#ifndef __SVE_HELPERS_H__
+#define __SVE_HELPERS_H__
+
+#include <stdbool.h>
+
+#define VLS_USE_SVE false
+#define VLS_USE_SME true
+
+extern unsigned int vls[];
+extern unsigned int nvls;
+
+int sve_fill_vls(bool use_sme, int min_vls);
+
+static inline uint64_t get_svcr(void)
+{
+ uint64_t val;
+
+ asm volatile (
+ "mrs %0, S3_3_C4_C2_2\n"
+ : "=r"(val)
+ :
+ : "cc");
+
+ return val;
+}
+
+#endif
diff --git a/tools/testing/selftests/arm64/signal/test_signals.c b/tools/testing/selftests/arm64/signal/test_signals.c
index 416b1ff43199..1304c8ec0f2f 100644
--- a/tools/testing/selftests/arm64/signal/test_signals.c
+++ b/tools/testing/selftests/arm64/signal/test_signals.c
@@ -7,16 +7,28 @@
* Each test provides its own tde struct tdescr descriptor to link with
* this wrapper. Framework provides common helpers.
*/
+
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+
#include <kselftest.h>
#include "test_signals.h"
#include "test_signals_utils.h"
-struct tdescr *current;
+struct tdescr *current = &tde;
int main(int argc, char *argv[])
{
- current = &tde;
+ /*
+ * Ensure GCS is at least enabled throughout the tests if
+ * supported, otherwise the inability to return from the
+ * function that enabled GCS makes it very inconvenient to set
+ * up test cases. The prctl() may fail if GCS was locked by
+ * libc setup code.
+ */
+ if (getauxval(AT_HWCAP) & HWCAP_GCS)
+ gcs_set_state(PR_SHADOW_STACK_ENABLE);
ksft_print_msg("%s :: %s\n", current->name, current->descr);
if (test_setup(current) && test_init(current)) {
@@ -25,5 +37,6 @@ int main(int argc, char *argv[])
}
test_result(current);
- return current->result;
+ /* Do not return in case GCS was enabled */
+ exit(current->result);
}
diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h
index f96baf1cef1a..ee75a2c25ce7 100644
--- a/tools/testing/selftests/arm64/signal/test_signals.h
+++ b/tools/testing/selftests/arm64/signal/test_signals.h
@@ -9,9 +9,7 @@
#include <ucontext.h>
/*
- * Using ARCH specific and sanitized Kernel headers installed by KSFT
- * framework since we asked for it by setting flag KSFT_KHDR_INSTALL
- * in our Makefile.
+ * Using ARCH specific and sanitized Kernel headers from the tree.
*/
#include <asm/ptrace.h>
#include <asm/hwcap.h>
@@ -33,10 +31,20 @@
*/
enum {
FSSBS_BIT,
+ FSVE_BIT,
+ FSME_BIT,
+ FSME_FA64_BIT,
+ FSME2_BIT,
+ FGCS_BIT,
FMAX_END
};
#define FEAT_SSBS (1UL << FSSBS_BIT)
+#define FEAT_SVE (1UL << FSVE_BIT)
+#define FEAT_SME (1UL << FSME_BIT)
+#define FEAT_SME_FA64 (1UL << FSME_FA64_BIT)
+#define FEAT_SME2 (1UL << FSME2_BIT)
+#define FEAT_GCS (1UL << FGCS_BIT)
/*
* A descriptor used to describe and configure a test case.
@@ -51,6 +59,7 @@ struct tdescr {
char *name;
char *descr;
unsigned long feats_required;
+ unsigned long feats_incompatible;
/* bitmask of effectively supported feats: populated at run-time */
unsigned long feats_supported;
bool initialized;
@@ -62,6 +71,10 @@ struct tdescr {
* Zero when no signal is expected on success
*/
int sig_ok;
+ /*
+ * expected si_code for sig_ok, or 0 to not check
+ */
+ int sig_ok_code;
/* signum expected on unsupported CPU features. */
int sig_unsupp;
/* a timeout in second for test completion */
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
index 2de6e5ed5e25..5d3621921cfe 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
@@ -26,6 +26,11 @@ static int sig_copyctx = SIGTRAP;
static char const *const feats_names[FMAX_END] = {
" SSBS ",
+ " SVE ",
+ " SME ",
+ " FA64 ",
+ " SME2 ",
+ " GCS ",
};
#define MAX_FEATS_SZ 128
@@ -35,6 +40,8 @@ static inline char *feats_to_string(unsigned long feats)
{
size_t flen = MAX_FEATS_SZ - 1;
+ feats_string[0] = '\0';
+
for (int i = 0; i < FMAX_END; i++) {
if (feats & (1UL << i)) {
size_t tlen = strlen(feats_names[i]);
@@ -136,16 +143,25 @@ static bool handle_signal_ok(struct tdescr *td,
"current->token ZEROED...test is probably broken!\n");
abort();
}
- /*
- * Trying to narrow down the SEGV to the ones generated by Kernel itself
- * via arm64_notify_segfault(). This is a best-effort check anyway, and
- * the si_code check may need to change if this aspect of the kernel
- * ABI changes.
- */
- if (td->sig_ok == SIGSEGV && si->si_code != SEGV_ACCERR) {
- fprintf(stdout,
- "si_code != SEGV_ACCERR...test is probably broken!\n");
- abort();
+ if (td->sig_ok_code) {
+ if (si->si_code != td->sig_ok_code) {
+ fprintf(stdout, "si_code is %d not %d\n",
+ si->si_code, td->sig_ok_code);
+ abort();
+ }
+ } else {
+ /*
+ * Trying to narrow down the SEGV to the ones
+ * generated by Kernel itself via
+ * arm64_notify_segfault(). This is a best-effort
+ * check anyway, and the si_code check may need to
+ * change if this aspect of the kernel ABI changes.
+ */
+ if (td->sig_ok == SIGSEGV && si->si_code != SEGV_ACCERR) {
+ fprintf(stdout,
+ "si_code != SEGV_ACCERR...test is probably broken!\n");
+ abort();
+ }
}
td->pass = 1;
/*
@@ -160,15 +176,66 @@ static bool handle_signal_ok(struct tdescr *td,
}
static bool handle_signal_copyctx(struct tdescr *td,
- siginfo_t *si, void *uc)
+ siginfo_t *si, void *uc_in)
{
+ ucontext_t *uc = uc_in;
+ struct _aarch64_ctx *head;
+ struct extra_context *extra, *copied_extra;
+ size_t offset = 0;
+ size_t to_copy;
+
+ ASSERT_GOOD_CONTEXT(uc);
+
/* Mangling PC to avoid loops on original BRK instr */
- ((ucontext_t *)uc)->uc_mcontext.pc += 4;
- memcpy(td->live_uc, uc, td->live_sz);
- ASSERT_GOOD_CONTEXT(td->live_uc);
+ uc->uc_mcontext.pc += 4;
+
+ /*
+ * Check for an preserve any extra data too with fixups.
+ */
+ head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
+ head = get_header(head, EXTRA_MAGIC, td->live_sz, &offset);
+ if (head) {
+ extra = (struct extra_context *)head;
+
+ /*
+ * The extra buffer must be immediately after the
+ * extra_context and a 16 byte terminator. Include it
+ * in the copy, this was previously validated in
+ * ASSERT_GOOD_CONTEXT().
+ */
+ to_copy = __builtin_offsetof(ucontext_t,
+ uc_mcontext.__reserved);
+ to_copy += offset + sizeof(struct extra_context) + 16;
+ to_copy += extra->size;
+ copied_extra = (struct extra_context *)&(td->live_uc->uc_mcontext.__reserved[offset]);
+ } else {
+ copied_extra = NULL;
+ to_copy = sizeof(ucontext_t);
+ }
+
+ if (to_copy > td->live_sz) {
+ fprintf(stderr,
+ "Not enough space to grab context, %lu/%lu bytes\n",
+ td->live_sz, to_copy);
+ return false;
+ }
+
+ memcpy(td->live_uc, uc, to_copy);
+
+ /*
+ * If there was any EXTRA_CONTEXT fix up the size to be the
+ * struct extra_context and the following terminator record,
+ * this means that the rest of the code does not need to have
+ * special handling for the record and we don't need to fix up
+ * datap for the new location.
+ */
+ if (copied_extra)
+ copied_extra->head.size = sizeof(*copied_extra) + 16;
+
td->live_uc_valid = 1;
fprintf(stderr,
- "GOOD CONTEXT grabbed from sig_copyctx handler\n");
+ "%lu byte GOOD CONTEXT grabbed from sig_copyctx handler\n",
+ to_copy);
return true;
}
@@ -192,7 +259,8 @@ static void default_handler(int signum, siginfo_t *si, void *uc)
fprintf(stderr, "-- Timeout !\n");
} else {
fprintf(stderr,
- "-- RX UNEXPECTED SIGNAL: %d\n", signum);
+ "-- RX UNEXPECTED SIGNAL: %d code %d address %p\n",
+ signum, si->si_code, si->si_addr);
}
default_result(current, 1);
}
@@ -255,7 +323,7 @@ int test_init(struct tdescr *td)
td->minsigstksz = MINSIGSTKSZ;
fprintf(stderr, "Detected MINSTKSIGSZ:%d\n", td->minsigstksz);
- if (td->feats_required) {
+ if (td->feats_required || td->feats_incompatible) {
td->feats_supported = 0;
/*
* Checking for CPU required features using both the
@@ -263,16 +331,43 @@ int test_init(struct tdescr *td)
*/
if (getauxval(AT_HWCAP) & HWCAP_SSBS)
td->feats_supported |= FEAT_SSBS;
- if (feats_ok(td))
- fprintf(stderr,
- "Required Features: [%s] supported\n",
- feats_to_string(td->feats_required &
- td->feats_supported));
- else
- fprintf(stderr,
- "Required Features: [%s] NOT supported\n",
- feats_to_string(td->feats_required &
- ~td->feats_supported));
+ if (getauxval(AT_HWCAP) & HWCAP_SVE)
+ td->feats_supported |= FEAT_SVE;
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME)
+ td->feats_supported |= FEAT_SME;
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
+ td->feats_supported |= FEAT_SME_FA64;
+ if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
+ td->feats_supported |= FEAT_SME2;
+ if (getauxval(AT_HWCAP) & HWCAP_GCS)
+ td->feats_supported |= FEAT_GCS;
+ if (feats_ok(td)) {
+ if (td->feats_required & td->feats_supported)
+ fprintf(stderr,
+ "Required Features: [%s] supported\n",
+ feats_to_string(td->feats_required &
+ td->feats_supported));
+ if (!(td->feats_incompatible & td->feats_supported))
+ fprintf(stderr,
+ "Incompatible Features: [%s] absent\n",
+ feats_to_string(td->feats_incompatible));
+ } else {
+ if ((td->feats_required & td->feats_supported) !=
+ td->feats_supported)
+ fprintf(stderr,
+ "Required Features: [%s] NOT supported\n",
+ feats_to_string(td->feats_required &
+ ~td->feats_supported));
+ if (td->feats_incompatible & td->feats_supported)
+ fprintf(stderr,
+ "Incompatible Features: [%s] supported\n",
+ feats_to_string(td->feats_incompatible &
+ ~td->feats_supported));
+
+
+ td->result = KSFT_SKIP;
+ return 0;
+ }
}
/* Perform test specific additional initialization */
@@ -304,14 +399,12 @@ int test_setup(struct tdescr *td)
int test_run(struct tdescr *td)
{
- if (td->sig_trig) {
- if (td->trigger)
- return td->trigger(td);
- else
- return default_trigger(td);
- } else {
+ if (td->trigger)
+ return td->trigger(td);
+ else if (td->sig_trig)
+ return default_trigger(td);
+ else
return td->run(td, NULL, NULL);
- }
}
void test_result(struct tdescr *td)
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.h b/tools/testing/selftests/arm64/signal/test_signals_utils.h
index 6772b5c8d274..36fc12b3cd60 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.h
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.h
@@ -6,8 +6,11 @@
#include <assert.h>
#include <stdio.h>
+#include <stdint.h>
#include <string.h>
+#include <linux/compiler.h>
+
#include "test_signals.h"
int test_init(struct tdescr *td);
@@ -16,8 +19,48 @@ void test_cleanup(struct tdescr *td);
int test_run(struct tdescr *td);
void test_result(struct tdescr *td);
+#ifndef __NR_prctl
+#define __NR_prctl 167
+#endif
+
+/*
+ * The prctl takes 1 argument but we need to ensure that the other
+ * values passed in registers to the syscall are zero since the kernel
+ * validates them.
+ */
+#define gcs_set_state(state) \
+ ({ \
+ register long _num __asm__ ("x8") = __NR_prctl; \
+ register long _arg1 __asm__ ("x0") = PR_SET_SHADOW_STACK_STATUS; \
+ register long _arg2 __asm__ ("x1") = (long)(state); \
+ register long _arg3 __asm__ ("x2") = 0; \
+ register long _arg4 __asm__ ("x3") = 0; \
+ register long _arg5 __asm__ ("x4") = 0; \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_arg3), "r"(_arg4), \
+ "r"(_arg5), "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+ })
+
+static inline __attribute__((always_inline)) uint64_t get_gcspr_el0(void)
+{
+ uint64_t val;
+
+ asm volatile("mrs %0, S3_3_C2_C5_1" : "=r" (val));
+
+ return val;
+}
+
static inline bool feats_ok(struct tdescr *td)
{
+ if (td->feats_incompatible & td->feats_supported)
+ return false;
return (td->feats_required & td->feats_supported) == td->feats_required;
}
@@ -54,16 +97,29 @@ static inline bool feats_ok(struct tdescr *td)
* at sizeof(ucontext_t).
*/
static __always_inline bool get_current_context(struct tdescr *td,
- ucontext_t *dest_uc)
+ ucontext_t *dest_uc,
+ size_t dest_sz)
{
static volatile bool seen_already;
+ int i;
+ char *uc = (char *)dest_uc;
assert(td && dest_uc);
/* it's a genuine invocation..reinit */
seen_already = 0;
td->live_uc_valid = 0;
- td->live_sz = sizeof(*dest_uc);
- memset(dest_uc, 0x00, td->live_sz);
+ td->live_sz = dest_sz;
+
+ /*
+ * This is a memset() but we don't want the compiler to
+ * optimise it into either instructions or a library call
+ * which might be incompatible with streaming mode.
+ */
+ for (i = 0; i < td->live_sz; i++) {
+ uc[i] = 0;
+ OPTIMIZER_HIDE_VAR(uc[0]);
+ }
+
td->live_uc = dest_uc;
/*
* Grab ucontext_t triggering a SIGTRAP.
@@ -101,6 +157,17 @@ static __always_inline bool get_current_context(struct tdescr *td,
: "memory");
/*
+ * If we were grabbing a streaming mode context then we may
+ * have entered streaming mode behind the system's back and
+ * libc or compiler generated code might decide to do
+ * something invalid in streaming mode, or potentially even
+ * the state of ZA. Issue a SMSTOP to exit both now we have
+ * grabbed the state.
+ */
+ if (td->feats_supported & FEAT_SME)
+ asm volatile("msr S0_3_C4_C6_3, xzr");
+
+ /*
* If we get here with seen_already==1 it implies the td->live_uc
* context has been used to get back here....this probably means
* a test has failed to cause a SEGV...anyway live_uc does not
diff --git a/tools/testing/selftests/arm64/signal/testcases/TODO b/tools/testing/selftests/arm64/signal/testcases/TODO
new file mode 100644
index 000000000000..1f7fba8194fe
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/TODO
@@ -0,0 +1 @@
+- Validate that register contents are saved and restored as expected.
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c
index 8dc600a7d4fd..8c7f00ea9823 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c
@@ -21,7 +21,7 @@ static int fake_sigreturn_bad_magic_run(struct tdescr *td,
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* need at least 2*HDR_SZ space: KSFT_BAD_MAGIC + terminator. */
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c
index b3c362100666..1c03f6b638e0 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c
@@ -24,7 +24,7 @@ static int fake_sigreturn_bad_size_run(struct tdescr *td,
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c
index a44b88bfc81a..bc22f64b544e 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c
@@ -21,7 +21,7 @@ static int fake_sigreturn_bad_size_for_magic0_run(struct tdescr *td,
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* at least HDR_SZ for the badly sized terminator. */
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c
index afe8915f0998..63e3906b631c 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c
@@ -21,7 +21,7 @@ static int fake_sigreturn_duplicated_fpsimd_run(struct tdescr *td,
struct _aarch64_ctx *shead = GET_SF_RESV_HEAD(sf), *head;
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
head = get_starting_head(shead, sizeof(struct fpsimd_context) + HDR_SZ,
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c
index 1e089e66f9f3..d00625ff12c2 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c
@@ -19,7 +19,7 @@ static int fake_sigreturn_misaligned_run(struct tdescr *td,
siginfo_t *si, ucontext_t *uc)
{
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
/* Forcing sigframe on misaligned SP (16 + 3) */
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c
index 08ecd8073a1a..f805138cb20d 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c
@@ -23,7 +23,7 @@ static int fake_sigreturn_missing_fpsimd_run(struct tdescr *td,
struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
/* just to fill the ucontext_t with something real */
- if (!get_current_context(td, &sf.uc))
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
return 1;
resv_sz = GET_SF_RESV_SIZE(sf);
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c
new file mode 100644
index 000000000000..dfd6a2badf9f
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Attempt to change the streaming SVE vector length in a signal
+ * handler, this is not supported and is expected to segfault.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static bool sme_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SME, 2);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static int fake_sigreturn_ssve_change_vl(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+ struct za_context *za;
+
+ /* Get a signal context with a SME ZA frame in it */
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ head = get_header(head, ZA_MAGIC, resv_sz, &offset);
+ if (!head) {
+ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+
+ if (head->size != sizeof(struct za_context)) {
+ fprintf(stderr, "Register data present, aborting\n");
+ return 1;
+ }
+
+ za = (struct za_context *)head;
+
+ /* No changes are supported; init left us at minimum VL so go to max */
+ fprintf(stderr, "Attempting to change VL from %d to %d\n",
+ za->vl, vls[0]);
+ za->vl = vls[0];
+
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_SSVE_CHANGE",
+ .descr = "Attempt to change Streaming SVE VL",
+ .feats_required = FEAT_SME,
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .init = sme_get_vls,
+ .run = fake_sigreturn_ssve_change_vl,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
new file mode 100644
index 000000000000..e1ccf8f85a70
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Attempt to change the SVE vector length in a signal hander, this is not
+ * supported and is expected to segfault.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+
+static bool sve_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SVE, 2);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static int fake_sigreturn_sve_change_vl(struct tdescr *td,
+ siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+ struct sve_context *sve;
+
+ /* Get a signal context with a SVE frame in it */
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ head = get_header(head, SVE_MAGIC, resv_sz, &offset);
+ if (!head) {
+ fprintf(stderr, "No SVE context\n");
+ return 1;
+ }
+
+ if (head->size != sizeof(struct sve_context)) {
+ fprintf(stderr, "SVE register state active, skipping\n");
+ return 1;
+ }
+
+ sve = (struct sve_context *)head;
+
+ /* No changes are supported; init left us at minimum VL so go to max */
+ fprintf(stderr, "Attempting to change VL from %d to %d\n",
+ sve->vl, vls[0]);
+ sve->vl = vls[0];
+
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "FAKE_SIGRETURN_SVE_CHANGE",
+ .descr = "Attempt to change SVE VL",
+ .feats_required = FEAT_SVE,
+ .sig_ok = SIGSEGV,
+ .timeout = 3,
+ .init = sve_get_vls,
+ .run = fake_sigreturn_sve_change_vl,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/fpmr_siginfo.c b/tools/testing/selftests/arm64/signal/testcases/fpmr_siginfo.c
new file mode 100644
index 000000000000..e9d24685e741
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/fpmr_siginfo.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ARM Limited
+ *
+ * Verify that the FPMR register context in signal frames is set up as
+ * expected.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <asm/sigcontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+#define SYS_FPMR "S3_3_C4_C4_2"
+
+static uint64_t get_fpmr(void)
+{
+ uint64_t val;
+
+ asm volatile (
+ "mrs %0, " SYS_FPMR "\n"
+ : "=r"(val)
+ :
+ : "cc");
+
+ return val;
+}
+
+int fpmr_present(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct fpmr_context *fpmr_ctx;
+ size_t offset;
+ bool in_sigframe;
+ bool have_fpmr;
+ __u64 orig_fpmr;
+
+ have_fpmr = getauxval(AT_HWCAP2) & HWCAP2_FPMR;
+ if (have_fpmr)
+ orig_fpmr = get_fpmr();
+
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ fpmr_ctx = (struct fpmr_context *)
+ get_header(head, FPMR_MAGIC, td->live_sz, &offset);
+
+ in_sigframe = fpmr_ctx != NULL;
+
+ fprintf(stderr, "FPMR sigframe %s on system %s FPMR\n",
+ in_sigframe ? "present" : "absent",
+ have_fpmr ? "with" : "without");
+
+ td->pass = (in_sigframe == have_fpmr);
+
+ if (have_fpmr && fpmr_ctx) {
+ if (fpmr_ctx->fpmr != orig_fpmr) {
+ fprintf(stderr, "FPMR in frame is %llx, was %llx\n",
+ fpmr_ctx->fpmr, orig_fpmr);
+ td->pass = false;
+ }
+ }
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "FPMR",
+ .descr = "Validate that FPMR is present as expected",
+ .timeout = 3,
+ .run = fpmr_present,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/gcs_exception_fault.c b/tools/testing/selftests/arm64/signal/testcases/gcs_exception_fault.c
new file mode 100644
index 000000000000..6228448b2ae7
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/gcs_exception_fault.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ARM Limited
+ */
+
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+/*
+ * We should get this from asm/siginfo.h but the testsuite is being
+ * clever with redefining siginfo_t.
+ */
+#ifndef SEGV_CPERR
+#define SEGV_CPERR 10
+#endif
+
+static inline void gcsss1(uint64_t Xt)
+{
+ asm volatile (
+ "sys #3, C7, C7, #2, %0\n"
+ :
+ : "rZ" (Xt)
+ : "memory");
+}
+
+static int gcs_op_fault_trigger(struct tdescr *td)
+{
+ /*
+ * The slot below our current GCS should be in a valid GCS but
+ * must not have a valid cap in it.
+ */
+ gcsss1(get_gcspr_el0() - 8);
+
+ return 0;
+}
+
+static int gcs_op_fault_signal(struct tdescr *td, siginfo_t *si,
+ ucontext_t *uc)
+{
+ ASSERT_GOOD_CONTEXT(uc);
+
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "Invalid GCS operation",
+ .descr = "An invalid GCS operation generates the expected signal",
+ .feats_required = FEAT_GCS,
+ .timeout = 3,
+ .sig_ok = SIGSEGV,
+ .sig_ok_code = SEGV_CPERR,
+ .sanity_disabled = true,
+ .trigger = gcs_op_fault_trigger,
+ .run = gcs_op_fault_signal,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/gcs_frame.c b/tools/testing/selftests/arm64/signal/testcases/gcs_frame.c
new file mode 100644
index 000000000000..b405d82321da
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/gcs_frame.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ARM Limited
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 64];
+} context;
+
+static int gcs_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct gcs_context *gcs;
+ unsigned long expected, gcspr;
+ uint64_t *u64_val;
+ int ret;
+
+ ret = prctl(PR_GET_SHADOW_STACK_STATUS, &expected, 0, 0, 0);
+ if (ret != 0) {
+ fprintf(stderr, "Unable to query GCS status\n");
+ return 1;
+ }
+
+ /* We expect a cap to be added to the GCS in the signal frame */
+ gcspr = get_gcspr_el0();
+ gcspr -= 8;
+ fprintf(stderr, "Expecting GCSPR_EL0 %lx\n", gcspr);
+
+ if (!get_current_context(td, &context.uc, sizeof(context))) {
+ fprintf(stderr, "Failed getting context\n");
+ return 1;
+ }
+
+ /* Ensure that the signal restore token was consumed */
+ u64_val = (uint64_t *)get_gcspr_el0() + 1;
+ if (*u64_val) {
+ fprintf(stderr, "GCS value at %p is %lx not 0\n",
+ u64_val, *u64_val);
+ return 1;
+ }
+
+ fprintf(stderr, "Got context\n");
+
+ head = get_header(head, GCS_MAGIC, GET_BUF_RESV_SIZE(context),
+ &offset);
+ if (!head) {
+ fprintf(stderr, "No GCS context\n");
+ return 1;
+ }
+
+ gcs = (struct gcs_context *)head;
+
+ /* Basic size validation is done in get_current_context() */
+
+ if (gcs->features_enabled != expected) {
+ fprintf(stderr, "Features enabled %llx but expected %lx\n",
+ gcs->features_enabled, expected);
+ return 1;
+ }
+
+ if (gcs->gcspr != gcspr) {
+ fprintf(stderr, "Got GCSPR %llx but expected %lx\n",
+ gcs->gcspr, gcspr);
+ return 1;
+ }
+
+ fprintf(stderr, "GCS context validated\n");
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "GCS basics",
+ .descr = "Validate a GCS signal context",
+ .feats_required = FEAT_GCS,
+ .timeout = 3,
+ .run = gcs_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/gcs_write_fault.c b/tools/testing/selftests/arm64/signal/testcases/gcs_write_fault.c
new file mode 100644
index 000000000000..faeabb18c4b2
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/gcs_write_fault.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ARM Limited
+ */
+
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static uint64_t *gcs_page;
+
+#ifndef __NR_map_shadow_stack
+#define __NR_map_shadow_stack 453
+#endif
+
+static bool alloc_gcs(struct tdescr *td)
+{
+ long page_size = sysconf(_SC_PAGE_SIZE);
+
+ gcs_page = (void *)syscall(__NR_map_shadow_stack, 0,
+ page_size, 0);
+ if (gcs_page == MAP_FAILED) {
+ fprintf(stderr, "Failed to map %ld byte GCS: %d\n",
+ page_size, errno);
+ return false;
+ }
+
+ return true;
+}
+
+static int gcs_write_fault_trigger(struct tdescr *td)
+{
+ /* Verify that the page is readable (ie, not completely unmapped) */
+ fprintf(stderr, "Read value 0x%lx\n", gcs_page[0]);
+
+ /* A regular write should trigger a fault */
+ gcs_page[0] = EINVAL;
+
+ return 0;
+}
+
+static int gcs_write_fault_signal(struct tdescr *td, siginfo_t *si,
+ ucontext_t *uc)
+{
+ ASSERT_GOOD_CONTEXT(uc);
+
+ return 1;
+}
+
+
+struct tdescr tde = {
+ .name = "GCS write fault",
+ .descr = "Normal writes to a GCS segfault",
+ .feats_required = FEAT_GCS,
+ .timeout = 3,
+ .sig_ok = SIGSEGV,
+ .sanity_disabled = true,
+ .init = alloc_gcs,
+ .trigger = gcs_write_fault_trigger,
+ .run = gcs_write_fault_signal,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/poe_siginfo.c b/tools/testing/selftests/arm64/signal/testcases/poe_siginfo.c
new file mode 100644
index 000000000000..36bd9940ee05
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/poe_siginfo.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Arm Limited
+ *
+ * Verify that the POR_EL0 register context in signal frames is set up as
+ * expected.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <asm/sigcontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+#define SYS_POR_EL0 "S3_3_C10_C2_4"
+
+static uint64_t get_por_el0(void)
+{
+ uint64_t val;
+
+ asm volatile(
+ "mrs %0, " SYS_POR_EL0 "\n"
+ : "=r"(val)
+ :
+ : );
+
+ return val;
+}
+
+int poe_present(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct poe_context *poe_ctx;
+ size_t offset;
+ bool in_sigframe;
+ bool have_poe;
+ __u64 orig_poe;
+
+ have_poe = getauxval(AT_HWCAP2) & HWCAP2_POE;
+ if (have_poe)
+ orig_poe = get_por_el0();
+
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ poe_ctx = (struct poe_context *)
+ get_header(head, POE_MAGIC, td->live_sz, &offset);
+
+ in_sigframe = poe_ctx != NULL;
+
+ fprintf(stderr, "POR_EL0 sigframe %s on system %s POE\n",
+ in_sigframe ? "present" : "absent",
+ have_poe ? "with" : "without");
+
+ td->pass = (in_sigframe == have_poe);
+
+ /*
+ * Check that the value we read back was the one present at
+ * the time that the signal was triggered.
+ */
+ if (have_poe && poe_ctx) {
+ if (poe_ctx->por_el0 != orig_poe) {
+ fprintf(stderr, "POR_EL0 in frame is %llx, was %llx\n",
+ poe_ctx->por_el0, orig_poe);
+ td->pass = false;
+ }
+ }
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "POR_EL0",
+ .descr = "Validate that POR_EL0 is present as expected",
+ .timeout = 3,
+ .run = poe_present,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c
new file mode 100644
index 000000000000..f9d76ae32bba
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that using a streaming mode instruction without enabling it
+ * generates a SIGILL.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+int sme_trap_no_sm_trigger(struct tdescr *td)
+{
+ /* SMSTART ZA ; ADDHA ZA0.S, P0/M, P0/M, Z0.S */
+ asm volatile(".inst 0xd503457f ; .inst 0xc0900000");
+
+ return 0;
+}
+
+int sme_trap_no_sm_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "SME trap without SM",
+ .descr = "Check that we get a SIGILL if we use streaming mode without enabling it",
+ .timeout = 3,
+ .feats_required = FEAT_SME, /* We need a SMSTART ZA */
+ .sanity_disabled = true,
+ .trigger = sme_trap_no_sm_trigger,
+ .run = sme_trap_no_sm_run,
+ .sig_ok = SIGILL,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c
new file mode 100644
index 000000000000..e469ae5348e3
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that using an instruction not supported in streaming mode
+ * traps when in streaming mode.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+int sme_trap_non_streaming_trigger(struct tdescr *td)
+{
+ /*
+ * The framework will handle SIGILL so we need to exit SM to
+ * stop any other code triggering a further SIGILL down the
+ * line from using a streaming-illegal instruction.
+ */
+ asm volatile(".inst 0xd503437f; /* SMSTART ZA */ \
+ cnt v0.16b, v0.16b; \
+ .inst 0xd503447f /* SMSTOP ZA */");
+
+ return 0;
+}
+
+int sme_trap_non_streaming_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "SME SM trap unsupported instruction",
+ .descr = "Check that we get a SIGILL if we use an unsupported instruction in streaming mode",
+ .feats_required = FEAT_SME,
+ .feats_incompatible = FEAT_SME_FA64,
+ .timeout = 3,
+ .sanity_disabled = true,
+ .trigger = sme_trap_non_streaming_trigger,
+ .run = sme_trap_non_streaming_run,
+ .sig_ok = SIGILL,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c
new file mode 100644
index 000000000000..3a7747af4715
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that accessing ZA without enabling it generates a SIGILL.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+int sme_trap_za_trigger(struct tdescr *td)
+{
+ /* ZERO ZA */
+ asm volatile(".inst 0xc00800ff");
+
+ return 0;
+}
+
+int sme_trap_za_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ return 1;
+}
+
+struct tdescr tde = {
+ .name = "SME ZA trap",
+ .descr = "Check that we get a SIGILL if we access ZA without enabling",
+ .timeout = 3,
+ .sanity_disabled = true,
+ .trigger = sme_trap_za_trigger,
+ .run = sme_trap_za_run,
+ .sig_ok = SIGILL,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sme_vl.c b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c
new file mode 100644
index 000000000000..75f387f2db81
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sme_vl.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Check that the SME vector length reported in signal contexts is the
+ * expected one.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+unsigned int vl;
+
+static bool get_sme_vl(struct tdescr *td)
+{
+ int ret = prctl(PR_SME_GET_VL);
+ if (ret == -1)
+ return false;
+
+ vl = ret;
+
+ return true;
+}
+
+static int sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+ struct za_context *za;
+
+ /* Get a signal context which should have a ZA frame in it */
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ head = get_header(head, ZA_MAGIC, resv_sz, &offset);
+ if (!head) {
+ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+ za = (struct za_context *)head;
+
+ if (za->vl != vl) {
+ fprintf(stderr, "ZA sigframe VL %u, expected %u\n",
+ za->vl, vl);
+ return 1;
+ } else {
+ fprintf(stderr, "got expected VL %u\n", vl);
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "SME VL",
+ .descr = "Check that we get the right SME VL reported",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .init = get_sme_vl,
+ .run = sme_vl,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
new file mode 100644
index 000000000000..1dbca9afb13c
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that the streaming SVE register context in signal frames is
+ * set up as expected.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 64];
+} context;
+
+static bool sme_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static void setup_ssve_regs(void)
+{
+ /* smstart sm; real data is TODO */
+ asm volatile(".inst 0xd503437f" : : : );
+}
+
+static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+ unsigned int vl)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct sve_context *ssve;
+ int ret;
+
+ fprintf(stderr, "Testing VL %d\n", vl);
+
+ ret = prctl(PR_SME_SET_VL, vl);
+ if (ret != vl) {
+ fprintf(stderr, "Failed to set VL, got %d\n", ret);
+ return 1;
+ }
+
+ /*
+ * Get a signal context which should have a SVE frame and registers
+ * in it.
+ */
+ setup_ssve_regs();
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
+ &offset);
+ if (!head) {
+ fprintf(stderr, "No SVE context\n");
+ return 1;
+ }
+
+ ssve = (struct sve_context *)head;
+ if (ssve->vl != vl) {
+ fprintf(stderr, "Got VL %d, expected %d\n", ssve->vl, vl);
+ return 1;
+ }
+
+ if (!(ssve->flags & SVE_SIG_FLAG_SM)) {
+ fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n");
+ return 1;
+ }
+
+ /* The actual size validation is done in get_current_context() */
+ fprintf(stderr, "Got expected size %u and VL %d\n",
+ head->size, ssve->vl);
+
+ if (get_svcr() != 0) {
+ fprintf(stderr, "Unexpected SVCR %lx\n", get_svcr());
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ int i;
+
+ for (i = 0; i < nvls; i++) {
+ if (do_one_sme_vl(td, si, uc, vls[i]))
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "Streaming SVE registers",
+ .descr = "Check that we get the right Streaming SVE registers reported",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .init = sme_get_vls,
+ .run = sme_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c
new file mode 100644
index 000000000000..5557e116e973
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that both the streaming SVE and ZA register context in
+ * signal frames is set up as expected when enabled simultaneously.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+static bool sme_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static void setup_regs(void)
+{
+ /* smstart sm; real data is TODO */
+ asm volatile(".inst 0xd503437f" : : : );
+
+ /* smstart za; real data is TODO */
+ asm volatile(".inst 0xd503457f" : : : );
+}
+
+static char zeros[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
+
+static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+ unsigned int vl)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct _aarch64_ctx *regs;
+ struct sve_context *ssve;
+ struct za_context *za;
+ int ret;
+
+ fprintf(stderr, "Testing VL %d\n", vl);
+
+ ret = prctl(PR_SME_SET_VL, vl);
+ if (ret != vl) {
+ fprintf(stderr, "Failed to set VL, got %d\n", ret);
+ return 1;
+ }
+
+ /*
+ * Get a signal context which should have the SVE and ZA
+ * frames in it.
+ */
+ setup_regs();
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ regs = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
+ &offset);
+ if (!regs) {
+ fprintf(stderr, "No SVE context\n");
+ return 1;
+ }
+
+ ssve = (struct sve_context *)regs;
+ if (ssve->vl != vl) {
+ fprintf(stderr, "Got SSVE VL %d, expected %d\n", ssve->vl, vl);
+ return 1;
+ }
+
+ if (!(ssve->flags & SVE_SIG_FLAG_SM)) {
+ fprintf(stderr, "SVE_SIG_FLAG_SM not set in SVE record\n");
+ return 1;
+ }
+
+ fprintf(stderr, "Got expected SSVE size %u and VL %d\n",
+ regs->size, ssve->vl);
+
+ regs = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context),
+ &offset);
+ if (!regs) {
+ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+
+ za = (struct za_context *)regs;
+ if (za->vl != vl) {
+ fprintf(stderr, "Got ZA VL %d, expected %d\n", za->vl, vl);
+ return 1;
+ }
+
+ fprintf(stderr, "Got expected ZA size %u and VL %d\n",
+ regs->size, za->vl);
+
+ /* We didn't load any data into ZA so it should be all zeros */
+ if (memcmp(zeros, (char *)za + ZA_SIG_REGS_OFFSET,
+ ZA_SIG_REGS_SIZE(sve_vq_from_vl(za->vl))) != 0) {
+ fprintf(stderr, "ZA data invalid\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ int i;
+
+ for (i = 0; i < nvls; i++) {
+ if (do_one_sme_vl(td, si, uc, vls[i]))
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "Streaming SVE registers",
+ .descr = "Check that we get the right Streaming SVE registers reported",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .init = sme_get_vls,
+ .run = sme_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c
new file mode 100644
index 000000000000..8143eb1c58c1
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that the SVE register context in signal frames is set up as
+ * expected.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 64];
+} context;
+
+static bool sve_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SVE, 1);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static void setup_sve_regs(void)
+{
+ /* RDVL x16, #1 so we should have SVE regs; real data is TODO */
+ asm volatile(".inst 0x04bf5030" : : : "x16" );
+}
+
+static int do_one_sve_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+ unsigned int vl)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct sve_context *sve;
+
+ fprintf(stderr, "Testing VL %d\n", vl);
+
+ if (prctl(PR_SVE_SET_VL, vl) == -1) {
+ fprintf(stderr, "Failed to set VL\n");
+ return 1;
+ }
+
+ /*
+ * Get a signal context which should have a SVE frame and registers
+ * in it.
+ */
+ setup_sve_regs();
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, SVE_MAGIC, GET_BUF_RESV_SIZE(context),
+ &offset);
+ if (!head) {
+ fprintf(stderr, "No SVE context\n");
+ return 1;
+ }
+
+ sve = (struct sve_context *)head;
+ if (sve->vl != vl) {
+ fprintf(stderr, "Got VL %d, expected %d\n", sve->vl, vl);
+ return 1;
+ }
+
+ /* The actual size validation is done in get_current_context() */
+ fprintf(stderr, "Got expected size %u and VL %d\n",
+ head->size, sve->vl);
+
+ return 0;
+}
+
+static int sve_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ int i;
+
+ for (i = 0; i < nvls; i++) {
+ if (do_one_sve_vl(td, si, uc, vls[i]))
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "SVE registers",
+ .descr = "Check that we get the right SVE registers reported",
+ .feats_required = FEAT_SVE,
+ .timeout = 3,
+ .init = sve_get_vls,
+ .run = sve_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_vl.c b/tools/testing/selftests/arm64/signal/testcases/sve_vl.c
new file mode 100644
index 000000000000..aa835acec062
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/sve_vl.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Check that the SVE vector length reported in signal contexts is the
+ * expected one.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+struct fake_sigframe sf;
+unsigned int vl;
+
+static bool get_sve_vl(struct tdescr *td)
+{
+ int ret = prctl(PR_SVE_GET_VL);
+ if (ret == -1)
+ return false;
+
+ vl = ret;
+
+ return true;
+}
+
+static int sve_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+ struct sve_context *sve;
+
+ /* Get a signal context which should have a SVE frame in it */
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+ head = get_header(head, SVE_MAGIC, resv_sz, &offset);
+ if (!head) {
+ fprintf(stderr, "No SVE context\n");
+ return 1;
+ }
+ sve = (struct sve_context *)head;
+
+ if (sve->vl != vl) {
+ fprintf(stderr, "sigframe VL %u, expected %u\n",
+ sve->vl, vl);
+ return 1;
+ } else {
+ fprintf(stderr, "got expected VL %u\n", vl);
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "SVE VL",
+ .descr = "Check that we get the right SVE VL reported",
+ .feats_required = FEAT_SVE,
+ .timeout = 3,
+ .init = get_sve_vl,
+ .run = sve_vl,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c
index 61ebcdf63831..0c1a6b26afac 100644
--- a/tools/testing/selftests/arm64/signal/testcases/testcases.c
+++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c
@@ -1,31 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 ARM Limited */
-#include "testcases.h"
-
-struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
- size_t resv_sz, size_t *offset)
-{
- size_t offs = 0;
- struct _aarch64_ctx *found = NULL;
-
- if (!head || resv_sz < HDR_SZ)
- return found;
- while (offs <= resv_sz - HDR_SZ &&
- head->magic != magic && head->magic) {
- offs += head->size;
- head = GET_RESV_NEXT_HEAD(head);
- }
- if (head->magic == magic) {
- found = head;
- if (offset)
- *offset = offs;
- }
+#include <ctype.h>
+#include <string.h>
- return found;
-}
+#include "testcases.h"
-bool validate_extra_context(struct extra_context *extra, char **err)
+bool validate_extra_context(struct extra_context *extra, char **err,
+ void **extra_data, size_t *extra_size)
{
struct _aarch64_ctx *term;
@@ -33,7 +15,7 @@ bool validate_extra_context(struct extra_context *extra, char **err)
return false;
fprintf(stderr, "Validating EXTRA...\n");
- term = GET_RESV_NEXT_HEAD(extra);
+ term = GET_RESV_NEXT_HEAD(&extra->head);
if (!term || term->magic || term->size) {
*err = "Missing terminator after EXTRA context";
return false;
@@ -42,11 +24,84 @@ bool validate_extra_context(struct extra_context *extra, char **err)
*err = "Extra DATAP misaligned";
else if (extra->size & 0x0fUL)
*err = "Extra SIZE misaligned";
- else if (extra->datap != (uint64_t)term + sizeof(*term))
+ else if (extra->datap != (uint64_t)term + 0x10UL)
*err = "Extra DATAP misplaced (not contiguous)";
if (*err)
return false;
+ *extra_data = (void *)extra->datap;
+ *extra_size = extra->size;
+
+ return true;
+}
+
+bool validate_sve_context(struct sve_context *sve, char **err)
+{
+ /* Size will be rounded up to a multiple of 16 bytes */
+ size_t regs_size
+ = ((SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve->vl)) + 15) / 16) * 16;
+
+ if (!sve || !err)
+ return false;
+
+ /* Either a bare sve_context or a sve_context followed by regs data */
+ if ((sve->head.size != sizeof(struct sve_context)) &&
+ (sve->head.size != regs_size)) {
+ *err = "bad size for SVE context";
+ return false;
+ }
+
+ if (!sve_vl_valid(sve->vl)) {
+ *err = "SVE VL invalid";
+
+ return false;
+ }
+
+ return true;
+}
+
+bool validate_za_context(struct za_context *za, char **err)
+{
+ /* Size will be rounded up to a multiple of 16 bytes */
+ size_t regs_size
+ = ((ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za->vl)) + 15) / 16) * 16;
+
+ if (!za || !err)
+ return false;
+
+ /* Either a bare za_context or a za_context followed by regs data */
+ if ((za->head.size != sizeof(struct za_context)) &&
+ (za->head.size != regs_size)) {
+ *err = "bad size for ZA context";
+ return false;
+ }
+
+ if (!sve_vl_valid(za->vl)) {
+ *err = "SME VL in ZA context invalid";
+
+ return false;
+ }
+
+ return true;
+}
+
+bool validate_zt_context(struct zt_context *zt, char **err)
+{
+ if (!zt || !err)
+ return false;
+
+ /* If the context is present there should be at least one register */
+ if (zt->nregs == 0) {
+ *err = "no registers";
+ return false;
+ }
+
+ /* Size should agree with the number of registers */
+ if (zt->head.size != ZT_SIG_CONTEXT_SIZE(zt->nregs)) {
+ *err = "register count does not match size";
+ return false;
+ }
+
return true;
}
@@ -55,9 +110,16 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
bool terminated = false;
size_t offs = 0;
int flags = 0;
+ int new_flags, i;
struct extra_context *extra = NULL;
+ struct sve_context *sve = NULL;
+ struct za_context *za = NULL;
+ struct zt_context *zt = NULL;
struct _aarch64_ctx *head =
(struct _aarch64_ctx *)uc->uc_mcontext.__reserved;
+ void *extra_data = NULL;
+ size_t extra_sz = 0;
+ char magic[4];
if (!err)
return false;
@@ -68,12 +130,24 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
return false;
}
+ new_flags = 0;
+
switch (head->magic) {
case 0:
- if (head->size)
+ if (head->size) {
*err = "Bad size for terminator";
- else
+ } else if (extra_data) {
+ /* End of main data, walking the extra data */
+ head = extra_data;
+ resv_sz = extra_sz;
+ offs = 0;
+
+ extra_data = NULL;
+ extra_sz = 0;
+ continue;
+ } else {
terminated = true;
+ }
break;
case FPSIMD_MAGIC:
if (flags & FPSIMD_CTX)
@@ -81,19 +155,55 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
else if (head->size !=
sizeof(struct fpsimd_context))
*err = "Bad size for fpsimd_context";
- flags |= FPSIMD_CTX;
+ new_flags |= FPSIMD_CTX;
break;
case ESR_MAGIC:
if (head->size != sizeof(struct esr_context))
*err = "Bad size for esr_context";
break;
+ case POE_MAGIC:
+ if (head->size != sizeof(struct poe_context))
+ *err = "Bad size for poe_context";
+ break;
+ case TPIDR2_MAGIC:
+ if (head->size != sizeof(struct tpidr2_context))
+ *err = "Bad size for tpidr2_context";
+ break;
case SVE_MAGIC:
if (flags & SVE_CTX)
*err = "Multiple SVE_MAGIC";
+ /* Size is validated in validate_sve_context() */
+ sve = (struct sve_context *)head;
+ new_flags |= SVE_CTX;
+ break;
+ case ZA_MAGIC:
+ if (flags & ZA_CTX)
+ *err = "Multiple ZA_MAGIC";
+ /* Size is validated in validate_za_context() */
+ za = (struct za_context *)head;
+ new_flags |= ZA_CTX;
+ break;
+ case ZT_MAGIC:
+ if (flags & ZT_CTX)
+ *err = "Multiple ZT_MAGIC";
+ /* Size is validated in validate_za_context() */
+ zt = (struct zt_context *)head;
+ new_flags |= ZT_CTX;
+ break;
+ case FPMR_MAGIC:
+ if (flags & FPMR_CTX)
+ *err = "Multiple FPMR_MAGIC";
else if (head->size !=
- sizeof(struct sve_context))
- *err = "Bad size for sve_context";
- flags |= SVE_CTX;
+ sizeof(struct fpmr_context))
+ *err = "Bad size for fpmr_context";
+ new_flags |= FPMR_CTX;
+ break;
+ case GCS_MAGIC:
+ if (flags & GCS_CTX)
+ *err = "Multiple GCS_MAGIC";
+ if (head->size != sizeof(struct gcs_context))
+ *err = "Bad size for gcs_context";
+ new_flags |= GCS_CTX;
break;
case EXTRA_MAGIC:
if (flags & EXTRA_CTX)
@@ -101,7 +211,7 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
else if (head->size !=
sizeof(struct extra_context))
*err = "Bad size for extra_context";
- flags |= EXTRA_CTX;
+ new_flags |= EXTRA_CTX;
extra = (struct extra_context *)head;
break;
case KSFT_BAD_MAGIC:
@@ -117,11 +227,19 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
/*
* A still unknown Magic: potentially freshly added
* to the Kernel code and still unknown to the
- * tests.
+ * tests. Magic numbers are supposed to be allocated
+ * as somewhat meaningful ASCII strings so try to
+ * print as such as well as the raw number.
*/
+ memcpy(magic, &head->magic, sizeof(magic));
+ for (i = 0; i < sizeof(magic); i++)
+ if (!isalnum(magic[i]))
+ magic[i] = '?';
+
fprintf(stdout,
- "SKIP Unknown MAGIC: 0x%X - Is KSFT arm64/signal up to date ?\n",
- head->magic);
+ "SKIP Unknown MAGIC: 0x%X (%c%c%c%c) - Is KSFT arm64/signal up to date ?\n",
+ head->magic,
+ magic[3], magic[2], magic[1], magic[0]);
break;
}
@@ -134,10 +252,22 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
return false;
}
- if (flags & EXTRA_CTX)
- if (!validate_extra_context(extra, err))
+ if (new_flags & EXTRA_CTX)
+ if (!validate_extra_context(extra, err,
+ &extra_data, &extra_sz))
+ return false;
+ if (new_flags & SVE_CTX)
+ if (!validate_sve_context(sve, err))
+ return false;
+ if (new_flags & ZA_CTX)
+ if (!validate_za_context(za, err))
+ return false;
+ if (new_flags & ZT_CTX)
+ if (!validate_zt_context(zt, err))
return false;
+ flags |= new_flags;
+
head = GET_RESV_NEXT_HEAD(head);
}
@@ -146,6 +276,11 @@ bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err)
return false;
}
+ if (terminated && (flags & ZT_CTX) && !(flags & ZA_CTX)) {
+ *err = "ZT context but no ZA context";
+ return false;
+ }
+
return true;
}
diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.h b/tools/testing/selftests/arm64/signal/testcases/testcases.h
index ad884c135314..98b97efdda23 100644
--- a/tools/testing/selftests/arm64/signal/testcases/testcases.h
+++ b/tools/testing/selftests/arm64/signal/testcases/testcases.h
@@ -16,19 +16,33 @@
#define FPSIMD_CTX (1 << 0)
#define SVE_CTX (1 << 1)
-#define EXTRA_CTX (1 << 2)
+#define ZA_CTX (1 << 2)
+#define EXTRA_CTX (1 << 3)
+#define ZT_CTX (1 << 4)
+#define FPMR_CTX (1 << 5)
+#define GCS_CTX (1 << 6)
#define KSFT_BAD_MAGIC 0xdeadbeef
#define HDR_SZ \
sizeof(struct _aarch64_ctx)
+#define GET_UC_RESV_HEAD(uc) \
+ (struct _aarch64_ctx *)(&(uc->uc_mcontext.__reserved))
+
#define GET_SF_RESV_HEAD(sf) \
(struct _aarch64_ctx *)(&(sf).uc.uc_mcontext.__reserved)
#define GET_SF_RESV_SIZE(sf) \
sizeof((sf).uc.uc_mcontext.__reserved)
+#define GET_BUF_RESV_HEAD(buf) \
+ (struct _aarch64_ctx *)(&(buf).uc.uc_mcontext.__reserved)
+
+#define GET_BUF_RESV_SIZE(buf) \
+ (sizeof(buf) - sizeof(buf.uc) + \
+ sizeof((buf).uc.uc_mcontext.__reserved))
+
#define GET_UCP_RESV_SIZE(ucp) \
sizeof((ucp)->uc_mcontext.__reserved)
@@ -78,10 +92,29 @@ struct fake_sigframe {
bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err);
-bool validate_extra_context(struct extra_context *extra, char **err);
+static inline struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
+ size_t resv_sz, size_t *offset)
+{
+ size_t offs = 0;
+ struct _aarch64_ctx *found = NULL;
+
+ if (!head || resv_sz < HDR_SZ)
+ return found;
+
+ while (offs <= resv_sz - HDR_SZ &&
+ head->magic != magic && head->magic) {
+ offs += head->size;
+ head = GET_RESV_NEXT_HEAD(head);
+ }
+ if (head->magic == magic) {
+ found = head;
+ if (offset)
+ *offset = offs;
+ }
+
+ return found;
+}
-struct _aarch64_ctx *get_header(struct _aarch64_ctx *head, uint32_t magic,
- size_t resv_sz, size_t *offset);
static inline struct _aarch64_ctx *get_terminator(struct _aarch64_ctx *head,
size_t resv_sz,
diff --git a/tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c b/tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c
new file mode 100644
index 000000000000..f9a86c00c28c
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/tpidr2_restore.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ARM Limited
+ *
+ * Verify that the TPIDR2 register context in signal frames is restored.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <asm/sigcontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+#define SYS_TPIDR2 "S3_3_C13_C0_5"
+
+static uint64_t get_tpidr2(void)
+{
+ uint64_t val;
+
+ asm volatile (
+ "mrs %0, " SYS_TPIDR2 "\n"
+ : "=r"(val)
+ :
+ : "cc");
+
+ return val;
+}
+
+static void set_tpidr2(uint64_t val)
+{
+ asm volatile (
+ "msr " SYS_TPIDR2 ", %0\n"
+ :
+ : "r"(val)
+ : "cc");
+}
+
+
+static uint64_t initial_tpidr2;
+
+static bool save_tpidr2(struct tdescr *td)
+{
+ initial_tpidr2 = get_tpidr2();
+ fprintf(stderr, "Initial TPIDR2: %lx\n", initial_tpidr2);
+
+ return true;
+}
+
+static int modify_tpidr2(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ uint64_t my_tpidr2 = get_tpidr2();
+
+ my_tpidr2++;
+ fprintf(stderr, "Setting TPIDR2 to %lx\n", my_tpidr2);
+ set_tpidr2(my_tpidr2);
+
+ return 0;
+}
+
+static void check_tpidr2(struct tdescr *td)
+{
+ uint64_t tpidr2 = get_tpidr2();
+
+ td->pass = tpidr2 == initial_tpidr2;
+
+ if (td->pass)
+ fprintf(stderr, "TPIDR2 restored\n");
+ else
+ fprintf(stderr, "TPIDR2 was %lx but is now %lx\n",
+ initial_tpidr2, tpidr2);
+}
+
+struct tdescr tde = {
+ .name = "TPIDR2 restore",
+ .descr = "Validate that TPIDR2 is restored from the sigframe",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .sig_trig = SIGUSR1,
+ .init = save_tpidr2,
+ .run = modify_tpidr2,
+ .check_result = check_tpidr2,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c b/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c
new file mode 100644
index 000000000000..6a2c82bf7ead
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 ARM Limited
+ *
+ * Verify that the TPIDR2 register context in signal frames is set up as
+ * expected.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <asm/sigcontext.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+#define SYS_TPIDR2 "S3_3_C13_C0_5"
+
+static uint64_t get_tpidr2(void)
+{
+ uint64_t val;
+
+ asm volatile (
+ "mrs %0, " SYS_TPIDR2 "\n"
+ : "=r"(val)
+ :
+ : "cc");
+
+ return val;
+}
+
+int tpidr2_present(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct tpidr2_context *tpidr2_ctx;
+ size_t offset;
+ bool in_sigframe;
+ bool have_sme;
+ __u64 orig_tpidr2;
+
+ have_sme = getauxval(AT_HWCAP2) & HWCAP2_SME;
+ if (have_sme)
+ orig_tpidr2 = get_tpidr2();
+
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ tpidr2_ctx = (struct tpidr2_context *)
+ get_header(head, TPIDR2_MAGIC, td->live_sz, &offset);
+
+ in_sigframe = tpidr2_ctx != NULL;
+
+ fprintf(stderr, "TPIDR2 sigframe %s on system %s SME\n",
+ in_sigframe ? "present" : "absent",
+ have_sme ? "with" : "without");
+
+ td->pass = (in_sigframe == have_sme);
+
+ /*
+ * Check that the value we read back was the one present at
+ * the time that the signal was triggered. TPIDR2 is owned by
+ * libc so we can't safely choose the value and it is possible
+ * that we may need to revisit this in future if something
+ * starts deciding to set a new TPIDR2 between us reading and
+ * the signal.
+ */
+ if (have_sme && tpidr2_ctx) {
+ if (tpidr2_ctx->tpidr2 != orig_tpidr2) {
+ fprintf(stderr, "TPIDR2 in frame is %llx, was %llx\n",
+ tpidr2_ctx->tpidr2, orig_tpidr2);
+ td->pass = false;
+ }
+ }
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "TPIDR2",
+ .descr = "Validate that TPIDR2 is present as expected",
+ .timeout = 3,
+ .run = tpidr2_present,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c
new file mode 100644
index 000000000000..ce26e9c2fa5e
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that the ZA register context in signal frames is set up as
+ * expected.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+static bool sme_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+ unsigned int vl)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct za_context *za;
+
+ fprintf(stderr, "Testing VL %d\n", vl);
+
+ if (prctl(PR_SME_SET_VL, vl) != vl) {
+ fprintf(stderr, "Failed to set VL\n");
+ return 1;
+ }
+
+ /*
+ * Get a signal context which should have a SVE frame and registers
+ * in it.
+ */
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
+ if (!head) {
+ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+
+ za = (struct za_context *)head;
+ if (za->vl != vl) {
+ fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl);
+ return 1;
+ }
+
+ if (head->size != ZA_SIG_REGS_OFFSET) {
+ fprintf(stderr, "Context size %u, expected %lu\n",
+ head->size, ZA_SIG_REGS_OFFSET);
+ return 1;
+ }
+
+ /* The actual size validation is done in get_current_context() */
+ fprintf(stderr, "Got expected size %u and VL %d\n",
+ head->size, za->vl);
+
+ return 0;
+}
+
+static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ int i;
+
+ for (i = 0; i < nvls; i++) {
+ if (do_one_sme_vl(td, si, uc, vls[i]))
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "ZA registers - ZA disabled",
+ .descr = "Check ZA context with ZA disabled",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .init = sme_get_vls,
+ .run = sme_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
new file mode 100644
index 000000000000..badaead5326a
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that the ZA register context in signal frames is set up as
+ * expected.
+ */
+
+#include <kselftest.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "sve_helpers.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+static bool sme_get_vls(struct tdescr *td)
+{
+ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+ if (!res)
+ return true;
+
+ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+
+ return false;
+}
+
+static void setup_za_regs(void)
+{
+ /* smstart za; real data is TODO */
+ asm volatile(".inst 0xd503457f" : : : );
+}
+
+static char zeros[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
+
+static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+ unsigned int vl)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct za_context *za;
+
+ fprintf(stderr, "Testing VL %d\n", vl);
+
+ if (prctl(PR_SME_SET_VL, vl) != vl) {
+ fprintf(stderr, "Failed to set VL\n");
+ return 1;
+ }
+
+ /*
+ * Get a signal context which should have a SVE frame and registers
+ * in it.
+ */
+ setup_za_regs();
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, ZA_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
+ if (!head) {
+ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+
+ za = (struct za_context *)head;
+ if (za->vl != vl) {
+ fprintf(stderr, "Got VL %d, expected %d\n", za->vl, vl);
+ return 1;
+ }
+
+ if (head->size != ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(vl))) {
+ fprintf(stderr, "ZA context size %u, expected %lu\n",
+ head->size, ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(vl)));
+ return 1;
+ }
+
+ fprintf(stderr, "Got expected size %u and VL %d\n",
+ head->size, za->vl);
+
+ /* We didn't load any data into ZA so it should be all zeros */
+ if (memcmp(zeros, (char *)za + ZA_SIG_REGS_OFFSET,
+ ZA_SIG_REGS_SIZE(sve_vq_from_vl(za->vl))) != 0) {
+ fprintf(stderr, "ZA data invalid\n");
+ return 1;
+ }
+
+ if (get_svcr() != 0) {
+ fprintf(stderr, "Unexpected SVCR %lx\n", get_svcr());
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sme_regs(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ int i;
+
+ for (i = 0; i < nvls; i++) {
+ if (do_one_sme_vl(td, si, uc, vls[i]))
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "ZA register",
+ .descr = "Check that we get the right ZA registers reported",
+ .feats_required = FEAT_SME,
+ .timeout = 3,
+ .init = sme_get_vls,
+ .run = sme_regs,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c
new file mode 100644
index 000000000000..34f69bcf821e
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that using an instruction not supported in streaming mode
+ * traps when in streaming mode.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+int zt_no_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+
+ /*
+ * Get a signal context which should not have a ZT frame and
+ * registers in it.
+ */
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
+ if (head) {
+ fprintf(stderr, "Got unexpected ZT context\n");
+ return 1;
+ }
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "ZT register data not present",
+ .descr = "Validate that ZT is not present when ZA is disabled",
+ .feats_required = FEAT_SME2,
+ .timeout = 3,
+ .sanity_disabled = true,
+ .run = zt_no_regs_run,
+};
diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
new file mode 100644
index 000000000000..2e384d731618
--- /dev/null
+++ b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Limited
+ *
+ * Verify that using an instruction not supported in streaming mode
+ * traps when in streaming mode.
+ */
+
+#include <signal.h>
+#include <ucontext.h>
+#include <sys/prctl.h>
+
+#include "test_signals_utils.h"
+#include "testcases.h"
+
+static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+} context;
+
+static void enable_za(void)
+{
+ /* smstart za; real data is TODO */
+ asm volatile(".inst 0xd503457f" : : : );
+}
+
+int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
+{
+ size_t offset;
+ struct _aarch64_ctx *head = GET_BUF_RESV_HEAD(context);
+ struct zt_context *zt;
+ char *zeros;
+
+ /*
+ * Get a signal context which should have a ZT frame and registers
+ * in it.
+ */
+ enable_za();
+ if (!get_current_context(td, &context.uc, sizeof(context)))
+ return 1;
+
+ head = get_header(head, ZT_MAGIC, GET_BUF_RESV_SIZE(context), &offset);
+ if (!head) {
+ fprintf(stderr, "No ZT context\n");
+ return 1;
+ }
+
+ zt = (struct zt_context *)head;
+ if (zt->nregs == 0) {
+ fprintf(stderr, "Got context with no registers\n");
+ return 1;
+ }
+
+ fprintf(stderr, "Got expected size %u for %d registers\n",
+ head->size, zt->nregs);
+
+ /* We didn't load any data into ZT so it should be all zeros */
+ zeros = malloc(ZT_SIG_REGS_SIZE(zt->nregs));
+ if (!zeros) {
+ fprintf(stderr, "Out of memory, nregs=%u\n", zt->nregs);
+ return 1;
+ }
+ memset(zeros, 0, ZT_SIG_REGS_SIZE(zt->nregs));
+
+ if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET,
+ ZT_SIG_REGS_SIZE(zt->nregs)) != 0) {
+ fprintf(stderr, "ZT data invalid\n");
+ free(zeros);
+ return 1;
+ }
+
+ free(zeros);
+
+ td->pass = 1;
+
+ return 0;
+}
+
+struct tdescr tde = {
+ .name = "ZT register data",
+ .descr = "Validate that ZT is present and has data when ZA is enabled",
+ .feats_required = FEAT_SME2,
+ .timeout = 3,
+ .sanity_disabled = true,
+ .run = zt_regs_run,
+};
diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
index 41cb75070511..0a77f35295fb 100644
--- a/tools/testing/selftests/arm64/tags/Makefile
+++ b/tools/testing/selftests/arm64/tags/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I../../../../../usr/include/
+CFLAGS += $(KHDR_INCLUDES)
TEST_GEN_PROGS := tags_test
-TEST_PROGS := run_tags_test.sh
include ../../lib.mk
diff --git a/tools/testing/selftests/arm64/tags/run_tags_test.sh b/tools/testing/selftests/arm64/tags/run_tags_test.sh
deleted file mode 100755
index 745f11379930..000000000000
--- a/tools/testing/selftests/arm64/tags/run_tags_test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-echo "--------------------"
-echo "running tags test"
-echo "--------------------"
-./tags_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
-else
- echo "[PASS]"
-fi
diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 5701163460ef..375ab47f0edb 100644
--- a/tools/testing/selftests/arm64/tags/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
@@ -6,6 +6,7 @@
#include <stdint.h>
#include <sys/prctl.h>
#include <sys/utsname.h>
+#include "kselftest.h"
#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
@@ -16,16 +17,21 @@ int main(void)
static int tbi_enabled = 0;
unsigned long tag = 0;
struct utsname *ptr;
- int err;
+
+ ksft_print_header();
+ ksft_set_plan(1);
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
tbi_enabled = 1;
ptr = (struct utsname *)malloc(sizeof(*ptr));
+ if (!ptr)
+ ksft_exit_fail_perror("Failed to allocate utsname buffer");
+
if (tbi_enabled)
tag = 0x42;
ptr = (struct utsname *)SET_TAG(ptr, tag);
- err = uname(ptr);
+ ksft_test_result(!uname(ptr), "Syscall successful with tagged address\n");
free(ptr);
- return err;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index f5b7ef93618c..19c1638e312a 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,39 +1,47 @@
# SPDX-License-Identifier: GPL-2.0-only
+bpftool
+bpf-helpers*
+bpf-syscall*
test_verifier
test_maps
test_lru_map
-test_lpm_map
test_tag
FEATURE-DUMP.libbpf
+FEATURE-DUMP.selftests
fixdep
-test_dev_cgroup
-/test_progs*
+/test_progs
+/test_progs-no_alu32
+/test_progs-bpf_gcc
+/test_progs-cpuv4
test_verifier_log
feature
-test_sock
-test_sock_addr
urandom_read
test_sockmap
test_lirc_mode2_user
-get_cgroup_id_user
-test_skb_cgroup_id_user
-test_socket_cookie
-test_cgroup_storage
-test_flow_dissector
flow_dissector_load
-test_netcnt
test_tcpnotify_user
test_libbpf
-test_tcp_check_syncookie_user
-test_sysctl
-test_current_pid_tgid_new_ns
xdping
test_cpp
+test_progs_verification_cert
+*.d
+*.subskel.h
*.skel.h
+*.lskel.h
/no_alu32
/bpf_gcc
+/cpuv4
+/host-tools
/tools
-/runqslower
/bench
+/veristat
+/sign-file
+/uprobe_multi
*.ko
-xdpxceiver
+*.tmp
+xskxceiver
+xdp_redirect_multi
+xdp_synproxy
+xdp_hw_metadata
+xdp_features
+verification_cert.h
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
new file mode 100644
index 000000000000..f748f2c33b22
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST
@@ -0,0 +1,7 @@
+# TEMPORARY
+# Alphabetical order
+get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
+stacktrace_build_id
+stacktrace_build_id_nmi
+task_fd_query_rawtp
+varlen
diff --git a/tools/testing/selftests/bpf/DENYLIST.riscv64 b/tools/testing/selftests/bpf/DENYLIST.riscv64
new file mode 100644
index 000000000000..4fc4dfdde293
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.riscv64
@@ -0,0 +1,3 @@
+# riscv64 deny list for BPF CI and local vmtest
+exceptions # JIT does not support exceptions
+tailcalls/tailcall_bpf2bpf* # JIT does not support mixing bpf2bpf and tailcalls
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
new file mode 100644
index 000000000000..a17baf8c6fd7
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -0,0 +1,4 @@
+# TEMPORARY
+# Alphabetical order
+get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
+stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 8c33e999319a..b7030a6e2e76 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-include ../../../../scripts/Kbuild.include
+include ../../../build/Build.include
include ../../../scripts/Makefile.arch
+include ../../../scripts/Makefile.include
CXX ?= $(CROSS_COMPILE)g++
@@ -9,93 +10,133 @@ TOOLSDIR := $(abspath ../../..)
LIBDIR := $(TOOLSDIR)/lib
BPFDIR := $(LIBDIR)/bpf
TOOLSINCDIR := $(TOOLSDIR)/include
+TOOLSARCHINCDIR := $(TOOLSDIR)/arch/$(SRCARCH)/include
BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
APIDIR := $(TOOLSINCDIR)/uapi
+ifneq ($(O),)
+GENDIR := $(O)/include/generated
+else
GENDIR := $(abspath ../../../../include/generated)
+endif
GENHDR := $(GENDIR)/autoconf.h
+PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR
endif
-CLANG ?= clang
-LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
-CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) $(SAN_CFLAGS) \
+SAN_LDFLAGS ?= $(SAN_CFLAGS)
+RELEASE ?=
+OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0)
+
+LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+
+SKIP_DOCS ?=
+SKIP_LLVM ?=
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \
+ -Wall -Werror -fno-omit-frame-pointer \
+ -Wno-unused-but-set-variable \
+ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
- -I$(TOOLSINCDIR) -I$(APIDIR) \
- -Dbpf_prog_load=bpf_prog_test_load \
- -Dbpf_load_program=bpf_test_load_program
-LDLIBS += -lcap -lelf -lz -lrt -lpthread
+ -I$(TOOLSINCDIR) -I$(TOOLSARCHINCDIR) -I$(APIDIR) -I$(OUTPUT)
+LDFLAGS += $(SAN_LDFLAGS)
+LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread
+
+PCAP_CFLAGS := $(shell $(PKG_CONFIG) --cflags libpcap 2>/dev/null && echo "-DTRAFFIC_MONITOR=1")
+PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null)
+LDLIBS += $(PCAP_LIBS)
+CFLAGS += $(PCAP_CFLAGS)
+
+# Some utility functions use LLVM libraries
+jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS)
+
+ifneq ($(LLVM),)
+# Silence some warnings when compiled with clang
+CFLAGS += -Wno-unused-command-line-argument
+endif
+
+# Check whether bpf cpu=v4 is supported or not by clang
+ifneq ($(shell $(CLANG) --target=bpf -mcpu=help 2>&1 | grep 'v4'),)
+CLANG_CPUV4 := 1
+endif
# Order correspond to 'make run_tests' order
-TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
- test_verifier_log test_dev_cgroup \
- test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
- test_cgroup_storage \
- test_netcnt test_tcpnotify_user test_sysctl \
- test_progs-no_alu32 \
- test_current_pid_tgid_new_ns
+TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_progs \
+ test_sockmap \
+ test_tcpnotify_user \
+ test_progs-no_alu32
+TEST_INST_SUBDIRS := no_alu32
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
+TEST_INST_SUBDIRS += bpf_gcc
+
+# The following tests contain C code that, although technically legal,
+# triggers GCC warnings that cannot be disabled: declaration of
+# anonymous struct types in function parameter lists.
+progs/btf_dump_test_case_bitfields.c-bpf_gcc-CFLAGS := -Wno-error
+progs/btf_dump_test_case_namespacing.c-bpf_gcc-CFLAGS := -Wno-error
+progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error
+progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error
+progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error
+
+endif
+
+ifneq ($(CLANG_CPUV4),)
+TEST_GEN_PROGS += test_progs-cpuv4
+TEST_INST_SUBDIRS += cpuv4
endif
-TEST_GEN_FILES =
-TEST_FILES = test_lwt_ip_encap.o \
- test_tc_edt.o \
- xsk_prereqs.sh
+TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c)
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
- test_xdp_redirect.sh \
- test_xdp_meta.sh \
- test_xdp_veth.sh \
- test_offload.py \
- test_sock_addr.sh \
- test_tunnel.sh \
- test_lwt_seg6local.sh \
test_lirc_mode2.sh \
- test_skb_cgroup_id.sh \
- test_flow_dissector.sh \
- test_xdp_vlan_mode_generic.sh \
- test_xdp_vlan_mode_native.sh \
- test_lwt_ip_encap.sh \
- test_tcp_check_syncookie.sh \
- test_tc_tunnel.sh \
- test_tc_edt.sh \
test_xdping.sh \
test_bpftool_build.sh \
test_bpftool.sh \
+ test_bpftool_map.sh \
test_bpftool_metadata.sh \
- test_xsk.sh
+ test_doc_build.sh \
+ test_xsk.sh \
+ test_xdp_features.sh
-TEST_PROGS_EXTENDED := with_addr.sh \
- with_tunnels.sh \
- test_xdp_vlan.sh
+TEST_PROGS_EXTENDED := \
+ ima_setup.sh verify_sig_setup.sh \
+ test_bpftool.py
+
+TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
+ bpf_test_modorder_y.ko bpf_test_rqspinlock.ko
+TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS))
# Compile but not part of 'make run_tests'
-TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
- flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
- test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
- xdpxceiver
-
-TEST_CUSTOM_PROGS = urandom_read
-
-# Emit succinct information message describing current building step
-# $1 - generic step name (e.g., CC, LINK, etc);
-# $2 - optional "flavor" specifier; if provided, will be emitted as [flavor];
-# $3 - target (assumed to be file); only file name will be emitted;
-# $4 - optional extra arg, emitted as-is, if provided.
-ifeq ($(V),1)
-Q =
-msg =
-else
-Q = @
-msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))";
-MAKEFLAGS += --no-print-directory
+TEST_GEN_PROGS_EXTENDED = \
+ bench \
+ flow_dissector_load \
+ test_cpp \
+ test_lirc_mode2_user \
+ veristat \
+ xdp_features \
+ xdp_hw_metadata \
+ xdp_synproxy \
+ xdping \
+ xskxceiver
+
+TEST_GEN_FILES += $(TEST_KMODS) liburandom_read.so urandom_read sign-file uprobe_multi
+
+ifneq ($(V),1)
submake_extras := feature_display=0
endif
@@ -103,34 +144,93 @@ endif
OVERRIDE_TARGETS := 1
override define CLEAN
$(call msg,CLEAN)
- $(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
- $(Q)$(MAKE) -C bpf_testmod clean
+ $(Q)$(RM) -r $(TEST_GEN_PROGS)
+ $(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED)
+ $(Q)$(RM) -r $(TEST_GEN_FILES)
+ $(Q)$(RM) -r $(TEST_KMODS)
+ $(Q)$(RM) -r $(EXTRA_CLEAN)
+ $(Q)$(MAKE) -C test_kmods clean
+ $(Q)$(MAKE) docs-clean
endef
include ../lib.mk
+NON_CHECK_FEAT_TARGETS := clean docs-clean
+CHECK_FEAT := $(filter-out $(NON_CHECK_FEAT_TARGETS),$(or $(MAKECMDGOALS), "none"))
+ifneq ($(CHECK_FEAT),)
+FEATURE_USER := .selftests
+FEATURE_TESTS := llvm
+FEATURE_DISPLAY := $(FEATURE_TESTS)
+
+# Makefile.feature expects OUTPUT to end with a slash
+ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
+$(let OUTPUT,$(OUTPUT)/,\
+ $(eval include ../../../build/Makefile.feature))
+else
+override OUTPUT := $(OUTPUT)/
+$(eval include ../../../build/Makefile.feature)
+override OUTPUT := $(patsubst %/,%,$(OUTPUT))
+endif
+endif
+
+ifneq ($(SKIP_LLVM),1)
+ifeq ($(feature-llvm),1)
+ LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT
+ LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
+ # both llvm-config and lib.mk add -D_GNU_SOURCE, which ends up as conflict
+ LLVM_CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags))
+ # Prefer linking statically if it's available, otherwise fallback to shared
+ ifeq ($(shell $(LLVM_CONFIG) --link-static --libs >/dev/null 2>&1 && echo static),static)
+ LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LLVM_LDLIBS += $(filter-out -lxml2,$(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS)))
+ LLVM_LDLIBS += -lstdc++
+ else
+ LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-shared --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ endif
+ LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
+endif
+endif
+
SCRATCH_DIR := $(OUTPUT)/tools
BUILD_DIR := $(SCRATCH_DIR)/build
INCLUDE_DIR := $(SCRATCH_DIR)/include
BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
-RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids
-
+ifneq ($(CROSS_COMPILE),)
+HOST_BUILD_DIR := $(BUILD_DIR)/host
+HOST_SCRATCH_DIR := $(OUTPUT)/host-tools
+HOST_INCLUDE_DIR := $(HOST_SCRATCH_DIR)/include
+else
+HOST_BUILD_DIR := $(BUILD_DIR)
+HOST_SCRATCH_DIR := $(SCRATCH_DIR)
+HOST_INCLUDE_DIR := $(INCLUDE_DIR)
+endif
+HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a
+RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
../../../../vmlinux \
/sys/kernel/btf/vmlinux \
/boot/vmlinux-$(shell uname -r)
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
-# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
+# Define simple and short `make test_progs`, `make test_maps`, etc targets
# to build individual tests.
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
-$(notdir $(TEST_GEN_PROGS) \
- $(TEST_PROGS) \
- $(TEST_PROGS_EXTENDED) \
- $(TEST_GEN_PROGS_EXTENDED) \
- $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+$(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \
+ $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ;
+
+# sort removes libbpf duplicates when not cross-building
+MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
+ $(BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/bpftool \
+ $(HOST_BUILD_DIR)/resolve_btfids \
+ $(INCLUDE_DIR))
+$(MAKE_DIRS):
+ $(call msg,MKDIR,,$@)
+ $(Q)mkdir -p $@
$(OUTPUT)/%.o: %.c
$(call msg,CC,,$@)
@@ -140,75 +240,162 @@ $(OUTPUT)/%:%.c
$(call msg,BINARY,,$@)
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
-$(OUTPUT)/urandom_read: urandom_read.c
- $(call msg,BINARY,,$@)
- $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1
+# LLVM's ld.lld doesn't support all the architectures, so use it only on x86
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv))
+LLD := lld
+else
+LLD := $(shell command -v $(LD))
+endif
-$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
+# Filter out -static for liburandom_read.so and its dependent targets so that static builds
+# do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
+$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c liburandom_read.map
+ $(call msg,LIB,,$@)
+ $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \
+ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \
+ $(filter %.c,$^) $(filter-out -static,$(LDLIBS)) \
+ -Wno-unused-command-line-argument \
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ -Wl,--version-script=liburandom_read.map \
+ -fPIC -shared -o $@
+
+$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
+ $(call msg,BINARY,,$@)
+ $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \
+ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
+ -Wno-unused-command-line-argument \
+ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ -Wl,-rpath=. -o $@
+
+$(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
+ $(call msg,SIGN-FILE,,$@)
+ $(Q)$(CC) $(shell $(PKG_CONFIG) --cflags libcrypto 2> /dev/null) \
+ $< -o $@ \
+ $(shell $(PKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
+
+# This should really be a grouped target, but make versions before 4.3 don't
+# support that for regular rules. However, pattern matching rules are implicitly
+# treated as grouped even with older versions of make, so as a workaround, the
+# subst() turns the rule into a pattern matching rule
+$(addprefix test_kmods/,$(subst .ko,%ko,$(TEST_KMODS))): $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard test_kmods/Makefile test_kmods/*.[ch])
+ $(Q)$(RM) test_kmods/*.ko test_kmods/*.mod.o # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) -C test_kmods \
+ RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
+ EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
+
+$(TEST_KMOD_TARGETS): $(addprefix test_kmods/,$(TEST_KMODS))
$(call msg,MOD,,$@)
- $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation
- $(Q)$(MAKE) $(submake_extras) -C bpf_testmod
- $(Q)cp bpf_testmod/bpf_testmod.ko $@
+ $(Q)cp test_kmods/$(@F) $@
-$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
- $(call msg,CC,,$@)
- $(Q)$(CC) -c $(CFLAGS) -o $@ $<
-
-DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
-
-$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL)
- $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
- OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
- BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
- cp $(SCRATCH_DIR)/runqslower $@
-
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
-
-$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
-$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
-$(OUTPUT)/test_sock: cgroup_helpers.c
-$(OUTPUT)/test_sock_addr: cgroup_helpers.c
-$(OUTPUT)/test_socket_cookie: cgroup_helpers.c
-$(OUTPUT)/test_sockmap: cgroup_helpers.c
-$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
-$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
-$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
-$(OUTPUT)/test_netcnt: cgroup_helpers.c
-$(OUTPUT)/test_sock_fields: cgroup_helpers.c
-$(OUTPUT)/test_sysctl: cgroup_helpers.c
+
+DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
+ifneq ($(CROSS_COMPILE),)
+CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
+TRUNNER_BPFTOOL := $(CROSS_BPFTOOL)
+USE_BOOTSTRAP := ""
+else
+TRUNNER_BPFTOOL := $(DEFAULT_BPFTOOL)
+USE_BOOTSTRAP := "bootstrap/"
+endif
+
+TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
+
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
+
+TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
+CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
+UNPRIV_HELPERS := $(OUTPUT)/unpriv_helpers.o
+TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
+JSON_WRITER := $(OUTPUT)/json_writer.o
+CAP_HELPERS := $(OUTPUT)/cap_helpers.o
+NETWORK_HELPERS := $(OUTPUT)/network_helpers.o
+
+$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
+$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
+$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
+$(OUTPUT)/test_tag: $(TESTING_HELPERS)
+$(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
+$(OUTPUT)/xdping: $(TESTING_HELPERS)
+$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
+$(OUTPUT)/test_maps: $(TESTING_HELPERS)
+$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) $(UNPRIV_HELPERS)
+$(OUTPUT)/xsk.o: $(BPFOBJ)
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
- $(BPFOBJ) | $(BUILD_DIR)/bpftool
+ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
- OUTPUT=$(BUILD_DIR)/bpftool/ \
- prefix= DESTDIR=$(SCRATCH_DIR)/ install
- $(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation
- $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
- -C $(BPFTOOLDIR)/Documentation \
- OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \
- prefix= DESTDIR=$(SCRATCH_DIR)/ install
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \
+ EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \
+ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
+ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
+ LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
+ prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin
+
+ifneq ($(CROSS_COMPILE),)
+$(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
+ $(BPFOBJ) | $(BUILD_DIR)/bpftool
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
+ ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \
+ EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \
+ OUTPUT=$(BUILD_DIR)/bpftool/ \
+ LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
+ LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
+ prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin
+endif
+
+ifneq ($(SKIP_DOCS),1)
+all: docs
+endif
+
+docs:
+ $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
+ -f Makefile.docs \
+ prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
+
+docs-clean:
+ $(Q)$(MAKE) $(submake_extras) \
+ -f Makefile.docs \
+ prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
- ../../../include/uapi/linux/bpf.h \
- | $(INCLUDE_DIR) $(BUILD_DIR)/libbpf
+ $(APIDIR)/linux/bpf.h \
+ | $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \
+ EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
-$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
- $(call msg,MKDIR,,$@)
- $(Q)mkdir -p $@
+ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
+$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(HOST_BUILD_DIR)/libbpf
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
+ ARCH= CROSS_COMPILE= \
+ EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \
+ EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \
+ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
+ CC="$(HOSTCC)" LD="$(HOSTLD)" \
+ DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
+endif
-$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
+# vmlinux.h is first dumped to a temporary file and then compared to
+# the previous version. This helps to avoid unnecessary re-builds of
+# $(TRUNNER_BPF_OBJS)
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
$(call msg,GEN,,$@)
- $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $(INCLUDE_DIR)/.vmlinux.h.tmp
+ $(Q)cmp -s $(INCLUDE_DIR)/.vmlinux.h.tmp $@ || mv $(INCLUDE_DIR)/.vmlinux.h.tmp $@
else
$(call msg,CP,,$@)
$(Q)cp "$(VMLINUX_H)" $@
endif
-$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
+$(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
$(TOOLSDIR)/bpf/resolve_btfids/main.c \
$(TOOLSDIR)/lib/rbtree.c \
$(TOOLSDIR)/lib/zalloc.c \
@@ -216,79 +403,145 @@ $(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
$(TOOLSDIR)/lib/ctype.c \
$(TOOLSDIR)/lib/str_error_r.c
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
- OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ)
+ CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
+ LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
+ OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by
-# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# '--target=bpf'. This fixes "missing" files on some architectures/distros,
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
#
# Use '-idirafter': Don't interfere with include mechanics except where the
# build would have failed anyways.
define get_sys_includes
-$(shell $(1) -v -E - </dev/null 2>&1 \
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
-$(shell $(1) -dM -E - </dev/null | grep '#define __riscv_xlen ' | sed 's/#define /-D/' | sed 's/ /=/')
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG) |_MIPS_SIM |_ABI(O32|N32|64) ' | awk '{printf("-D%s=%s ", $$2, $$3)}')
endef
# Determine target endianness.
-IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+IS_LITTLE_ENDIAN := $(shell $(CC) -dM -E - </dev/null | \
grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
-MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
+MENDIAN:=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
+BPF_TARGET_ENDIAN:=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
-CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
-BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
- -I$(abspath $(OUTPUT)/../usr/include)
+ -I$(abspath $(OUTPUT)/../usr/include) \
+ -std=gnu11 \
+ -fno-strict-aliasing \
+ -Wno-compare-distinct-pointer-types \
+ -Wno-initializer-overrides \
+ #
+# TODO: enable me -Wsign-compare
-CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
- -Wno-compare-distinct-pointer-types
+CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
$(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+$(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h
# Build BPF object using Clang
# $1 - input .c file
# $2 - output .o file
# $3 - CFLAGS
+# $4 - binary name
define CLANG_BPF_BUILD_RULE
- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
- $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v3
+ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v3 -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
- $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v2
+ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v2 -o $2
+endef
+# Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4
+define CLANG_CPUV4_BPF_BUILD_RULE
+ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v4 -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
- $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
- $(Q)$(BPF_GCC) $3 -O2 -c $1 -o $2
+ $(call msg,GCC-BPF,$4,$2)
+ $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2
endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
+LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
+ linked_vars.skel.h linked_maps.skel.h \
+ test_subskeleton.skel.h test_subskeleton_lib.skel.h \
+ test_usdt.skel.h
+
+LSKELS := fexit_sleep.c trace_printk.c trace_vprintk.c map_ptr_kern.c \
+ core_kern.c core_kern_overflow.c test_ringbuf.c \
+ test_ringbuf_n.c test_ringbuf_map_key.c test_ringbuf_write.c \
+ test_ringbuf_overwrite.c
+
+LSKELS_SIGNED := fentry_test.c fexit_test.c atomics.c
+
+# Generate both light skeleton and libbpf skeleton for these
+LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
+ kfunc_call_test_subprog.c
+SKEL_BLACKLIST += $$(LSKELS) $$(LSKELS_SIGNED)
+
+test_static_linked.skel.h-deps := test_static_linked1.bpf.o test_static_linked2.bpf.o
+linked_funcs.skel.h-deps := linked_funcs1.bpf.o linked_funcs2.bpf.o
+linked_vars.skel.h-deps := linked_vars1.bpf.o linked_vars2.bpf.o
+linked_maps.skel.h-deps := linked_maps1.bpf.o linked_maps2.bpf.o
+# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
+# but that's created as a side-effect of the skel.h generation.
+test_subskeleton.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o test_subskeleton.bpf.o
+test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o
+test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
+xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
+xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
+xdp_features.skel.h-deps := xdp_features.bpf.o
+
+LINKED_BPF_OBJS := $(foreach skel,$(LINKED_SKELS),$($(skel)-deps))
+LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(LINKED_BPF_OBJS))
+
+HEADERS_FOR_BPF_OBJS := $(wildcard $(BPFDIR)/*.bpf.h) \
+ $(addprefix $(BPFDIR)/, bpf_core_read.h \
+ bpf_endian.h \
+ bpf_helpers.h \
+ bpf_tracing.h)
+
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
# Parameters:
# $1 - test runner base binary name (e.g., test_progs)
-# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc)
define DEFINE_TEST_RUNNER
+LSKEL_SIGN := -S -k $(PRIVATE_KEY) -i $(VERIFICATION_CERT)
TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2
TRUNNER_BINARY := $1$(if $2,-)$2
TRUNNER_TEST_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.test.o, \
$$(notdir $$(wildcard $(TRUNNER_TESTS_DIR)/*.c)))
TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
$$(filter %.c,$(TRUNNER_EXTRA_SOURCES)))
+TRUNNER_LIB_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
+ $$(filter %.c,$(TRUNNER_LIB_SOURCES)))
TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES))
TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))
-TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS))
+TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.bpf.o, $$(TRUNNER_BPF_SRCS))
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
- $$(filter-out $(SKEL_BLACKLIST), \
+ $$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS)))
+TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS) $$(LSKELS_EXTRA))
+TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
+TRUNNER_BPF_LSKELS_SIGNED := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS_SIGNED))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
@@ -299,7 +552,7 @@ endef
# Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and
# set up by DEFINE_TEST_RUNNER itself, create test runner build rules with:
# $1 - test runner base binary name (e.g., test_progs)
-# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc)
define DEFINE_TEST_RUNNER_RULES
ifeq ($($(TRUNNER_OUTPUT)-dir),)
@@ -313,19 +566,68 @@ endif
# input/output directory combination
ifeq ($($(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs),)
$(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y
-$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
+$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
- $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
+ $(HEADERS_FOR_BPF_OBJS) \
+ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
- $(TRUNNER_BPF_CFLAGS))
+ $(TRUNNER_BPF_CFLAGS) \
+ $$($$<-CFLAGS) \
+ $$($$<-$2-CFLAGS),$(TRUNNER_BINARY))
-$(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
- $(TRUNNER_OUTPUT)/%.o \
- | $(BPFTOOL) $(TRUNNER_OUTPUT)
+$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
+ $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$@
+ $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(<:.o=.linked1.o) $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
+
+$(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked2.o) $$(<:.o=.llinked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
+ $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
+ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+
+$(TRUNNER_BPF_LSKELS_SIGNED): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY) (signed),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked2.o) $$(<:.o=.llinked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
+ $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton $(LSKEL_SIGN) $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
+ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+
+$(LINKED_BPF_OBJS): %: $(TRUNNER_OUTPUT)/%
+
+# .SECONDEXPANSION here allows to correctly expand %-deps variables as prerequisites
+.SECONDEXPANSION:
+$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_OUTPUT)/%: $$$$(%-deps) $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o))
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked3.o) $$(@:.skel.h=.linked2.o)
+ $(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
- $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
+ $(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
+ $(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
+
+# When the compiler generates a %.d file, only skel basenames (not
+# full paths) are specified as prerequisites for corresponding %.o
+# file. vpath directives below instruct make to search for skel files
+# in TRUNNER_OUTPUT, if they are not present in the working directory.
+vpath %.skel.h $(TRUNNER_OUTPUT)
+vpath %.lskel.h $(TRUNNER_OUTPUT)
+vpath %.subskel.h $(TRUNNER_OUTPUT)
+
endif
# ensure we set up tests.h header generation rule just once
@@ -333,10 +635,9 @@ ifeq ($($(TRUNNER_TESTS_DIR)-tests-hdr),)
$(TRUNNER_TESTS_DIR)-tests-hdr := y
$(TRUNNER_TESTS_HDR): $(TRUNNER_TESTS_DIR)/*.c
$$(call msg,TEST-HDR,$(TRUNNER_BINARY),$$@)
- $$(shell ( cd $(TRUNNER_TESTS_DIR); \
- echo '/* Generated header, do not edit */'; \
- ls *.c 2> /dev/null | \
- sed -e 's@\([^\.]*\)\.c@DEFINE_TEST(\1)@'; \
+ $$(shell (echo '/* Generated header, do not edit */'; \
+ sed -n -E 's/^void (serial_)?test_([a-zA-Z0-9_]+)\((void)?\).*/DEFINE_TEST(\2)/p' \
+ $(TRUNNER_TESTS_DIR)/*.c | sort ; \
) > $$@)
endif
@@ -344,59 +645,132 @@ endif
# Note: we cd into output directory to ensure embedded BPF object is found
$(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_TESTS_DIR)/%.c \
- $(TRUNNER_EXTRA_HDRS) \
- $(TRUNNER_BPF_OBJS) \
- $(TRUNNER_BPF_SKELS) \
- $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ | $(TRUNNER_OUTPUT)/%.test.d
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
- $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+ $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -MMD -MT $$@ -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+
+$(TRUNNER_TEST_OBJS:.o=.d): $(TRUNNER_OUTPUT)/%.test.d: \
+ $(TRUNNER_TESTS_DIR)/%.c \
+ $(TRUNNER_EXTRA_HDRS) \
+ $(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_LSKELS) \
+ $(TRUNNER_BPF_LSKELS_SIGNED) \
+ $(TRUNNER_BPF_SKELS_LINKED) \
+ $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+
+ifeq ($(filter clean docs-clean,$(MAKECMDGOALS)),)
+include $(wildcard $(TRUNNER_TEST_OBJS:.o=.d))
+endif
+
+# add per extra obj CFGLAGS definitions
+$(foreach N,$(patsubst $(TRUNNER_OUTPUT)/%.o,%,$(TRUNNER_EXTRA_OBJS)), \
+ $(eval $(TRUNNER_OUTPUT)/$(N).o: CFLAGS += $($(N).c-CFLAGS)))
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \
$(TRUNNER_EXTRA_HDRS) \
+ $(VERIFY_SIG_HDR) \
$(TRUNNER_TESTS_HDR) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
$(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
-# only copy extra resources if in flavored build
+$(TRUNNER_LIB_OBJS): $(TRUNNER_OUTPUT)/%.o:$(TOOLSDIR)/lib/%.c
+ $$(call msg,LIB-OBJ,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+
+# non-flavored in-srctree builds receive special treatment, in particular, we
+# do not need to copy extra resources (see e.g. test_btf_dump_case())
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
-ifneq ($2,)
+ifneq ($2:$(OUTPUT),:$(shell pwd))
$$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
- $(Q)cp -a $$^ $(TRUNNER_OUTPUT)/
+ $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/
endif
+# some X.test.o files have runtime dependencies on Y.bpf.o files
+$(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS)
+
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
+ $(TRUNNER_LIB_OBJS) \
$(RESOLVE_BTFIDS) \
+ $(TRUNNER_BPFTOOL) \
+ $(OUTPUT)/veristat \
| $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@)
- $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
- $(Q)$(RESOLVE_BTFIDS) --no-fail --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
+ $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LLVM_LDLIBS) $$(LDFLAGS) $$(LLVM_LDFLAGS) -o $$@
+ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
+ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \
+ $(OUTPUT)/$(if $2,$2/)bpftool
endef
+VERIFY_SIG_SETUP := $(CURDIR)/verify_sig_setup.sh
+VERIFY_SIG_HDR := verification_cert.h
+VERIFICATION_CERT := $(BUILD_DIR)/signing_key.der
+PRIVATE_KEY := $(BUILD_DIR)/signing_key.pem
+
+$(VERIFICATION_CERT) $(PRIVATE_KEY): $(VERIFY_SIG_SETUP)
+ $(Q)mkdir -p $(BUILD_DIR)
+ $(Q)$(VERIFY_SIG_SETUP) genkey $(BUILD_DIR)
+
+$(VERIFY_SIG_HDR): $(VERIFICATION_CERT)
+ $(Q)ln -fs $< test_progs_verification_cert && \
+ xxd -i test_progs_verification_cert > $@
+
# Define test_progs test runner.
TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
-TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
- network_helpers.c testing_helpers.c \
- btf_helpers.c flow_dissector_load.h
-TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
- ima_setup.sh \
- $(wildcard progs/btf_dump_test_case_*.c)
+TRUNNER_EXTRA_SOURCES := test_progs.c \
+ cgroup_helpers.c \
+ trace_helpers.c \
+ network_helpers.c \
+ testing_helpers.c \
+ btf_helpers.c \
+ cap_helpers.c \
+ unpriv_helpers.c \
+ netlink_helpers.c \
+ jit_disasm_helpers.c \
+ io_helpers.c \
+ test_loader.c \
+ xsk.c \
+ disasm.c \
+ disasm_helpers.c \
+ json_writer.c \
+ $(VERIFY_SIG_HDR) \
+ flow_dissector_load.h \
+ ip_check_defrag_frags.h
+TRUNNER_LIB_SOURCES := find_bit.c
+TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
+ $(OUTPUT)/liburandom_read.so \
+ $(OUTPUT)/xdp_synproxy \
+ $(OUTPUT)/sign-file \
+ $(OUTPUT)/uprobe_multi \
+ $(TEST_KMOD_TARGETS) \
+ ima_setup.sh \
+ $(VERIFY_SIG_SETUP) \
+ $(wildcard progs/btf_dump_test_case_*.c) \
+ $(wildcard progs/*.bpf.o)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
-TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -DENABLE_ATOMICS_TESTS
$(eval $(call DEFINE_TEST_RUNNER,test_progs))
# Define test_progs-no_alu32 test runner.
TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
+# Define test_progs-cpuv4 test runner.
+ifneq ($(CLANG_CPUV4),)
+TRUNNER_BPF_BUILD_RULE := CLANG_CPUV4_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -DENABLE_ATOMICS_TESTS
+$(eval $(call DEFINE_TEST_RUNNER,test_progs,cpuv4))
+endif
+
# Define test_progs BPF-GCC-flavored test runner.
ifneq ($(BPF_GCC),)
TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE
-TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc)
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(call get_sys_includes,gcc,)
$(eval $(call DEFINE_TEST_RUNNER,test_progs,bpf_gcc))
endif
@@ -404,6 +778,7 @@ endif
TRUNNER_TESTS_DIR := map_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_maps.c
+TRUNNER_LIB_SOURCES :=
TRUNNER_EXTRA_FILES :=
TRUNNER_BPF_BUILD_RULE := $$(error no BPF objects should be built)
TRUNNER_BPF_CFLAGS :=
@@ -424,30 +799,110 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+# Include find_bit.c to compile xskxceiver.
+EXTRA_SRC := $(TOOLSDIR)/lib/find_bit.c prog_tests/test_xsk.c prog_tests/test_xsk.h
+$(OUTPUT)/xskxceiver: $(EXTRA_SRC) xskxceiver.c xskxceiver.h $(OUTPUT)/network_helpers.o $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
+$(OUTPUT)/xdp_hw_metadata: xdp_hw_metadata.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xsk.o $(OUTPUT)/xdp_hw_metadata.skel.h | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
+$(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp_features.skel.h | $(OUTPUT)
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
# Make sure we are able to include and link libbpf against c++.
+CXXFLAGS += $(CFLAGS)
+CXXFLAGS := $(subst -D_GNU_SOURCE=,,$(CXXFLAGS))
+CXXFLAGS := $(subst -std=gnu11,-std=gnu++11,$(CXXFLAGS))
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
- $(Q)$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
+ $(Q)$(CXX) $(CXXFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
# Benchmark runner
-$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
+$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
$(call msg,CC,,$@)
- $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
+ $(Q)$(CC) $(CFLAGS) -O2 -c $(filter %.c,$^) $(LDLIBS) -o $@
$(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
$(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
$(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
$(OUTPUT)/perfbuf_bench.skel.h
-$(OUTPUT)/bench.o: bench.h testing_helpers.h
+$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h
+$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h
+$(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
+$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
+$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
+$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
+$(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.skel.h
+$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
+$(OUTPUT)/bench_htab_mem.o: $(OUTPUT)/htab_mem_bench.skel.h
+$(OUTPUT)/bench_bpf_crypto.o: $(OUTPUT)/crypto_bench.skel.h
+$(OUTPUT)/bench_sockmap.o: $(OUTPUT)/bench_sockmap_prog.skel.h
+$(OUTPUT)/bench_lpm_trie_map.o: $(OUTPUT)/lpm_trie_bench.skel.h $(OUTPUT)/lpm_trie_map.skel.h
+$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
$(OUTPUT)/bench: LDLIBS += -lm
-$(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
+$(OUTPUT)/bench: $(OUTPUT)/bench.o \
+ $(TESTING_HELPERS) \
+ $(TRACE_HELPERS) \
+ $(CGROUP_HELPERS) \
$(OUTPUT)/bench_count.o \
$(OUTPUT)/bench_rename.o \
$(OUTPUT)/bench_trigger.o \
- $(OUTPUT)/bench_ringbufs.o
+ $(OUTPUT)/bench_ringbufs.o \
+ $(OUTPUT)/bench_bloom_filter_map.o \
+ $(OUTPUT)/bench_bpf_loop.o \
+ $(OUTPUT)/bench_strncmp.o \
+ $(OUTPUT)/bench_bpf_hashmap_full_update.o \
+ $(OUTPUT)/bench_local_storage.o \
+ $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \
+ $(OUTPUT)/bench_bpf_hashmap_lookup.o \
+ $(OUTPUT)/bench_local_storage_create.o \
+ $(OUTPUT)/bench_htab_mem.o \
+ $(OUTPUT)/bench_bpf_crypto.o \
+ $(OUTPUT)/bench_sockmap.o \
+ $(OUTPUT)/bench_lpm_trie_map.o \
+ #
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+
+# This works around GCC warning about snprintf truncating strings like:
+#
+# char a[PATH_MAX], b[PATH_MAX];
+# snprintf(a, "%s/foo", b); // triggers -Wformat-truncation
+$(OUTPUT)/veristat.o: CFLAGS += -Wno-format-truncation
+$(OUTPUT)/veristat.o: $(BPFOBJ)
+$(OUTPUT)/veristat: $(OUTPUT)/veristat.o
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) \
+# Linking uprobe_multi can fail due to relocation overflows on mips.
+$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot)
+$(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) -Wl,-T,uprobe_multi.ld -O0 $(LDFLAGS) \
+ $(filter-out %.ld,$^) $(LDLIBS) -o $@
+
+EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
- feature \
- $(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc bpf_testmod.ko)
+ feature bpftool $(TEST_KMOD_TARGETS) \
+ $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
+ no_alu32 cpuv4 bpf_gcc \
+ liburandom_read.so) \
+ $(OUTPUT)/FEATURE-DUMP.selftests \
+ test_progs_verification_cert
+
+.PHONY: docs docs-clean
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
+
+DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
+override define INSTALL_RULE
+ $(DEFAULT_INSTALL_RULE)
+ @for DIR in $(TEST_INST_SUBDIRS); do \
+ mkdir -p $(INSTALL_PATH)/$$DIR; \
+ rsync -a $(OUTPUT)/$$DIR/*.bpf.o $(INSTALL_PATH)/$$DIR;\
+ done
+endef
diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs
new file mode 100644
index 000000000000..f7f9e7088bb3
--- /dev/null
+++ b/tools/testing/selftests/bpf/Makefile.docs
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man2dir = $(mandir)/man2
+man7dir = $(mandir)/man7
+
+SYSCALL_RST = bpf-syscall.rst
+MAN2_RST = $(SYSCALL_RST)
+
+HELPERS_RST = bpf-helpers.rst
+MAN7_RST = $(HELPERS_RST)
+
+_DOC_MAN2 = $(patsubst %.rst,%.2,$(MAN2_RST))
+DOC_MAN2 = $(addprefix $(OUTPUT),$(_DOC_MAN2))
+
+_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
+DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
+
+DOCTARGETS := helpers syscall
+
+docs: $(DOCTARGETS)
+syscall: man2
+helpers: man7
+man2: $(DOC_MAN2)
+man7: $(DOC_MAN7)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
+# Configure make rules for the man page bpf-$1.$2.
+# $1 - target for scripts/bpf_doc.py
+# $2 - man page section to generate the troff file
+define DOCS_RULES =
+$(OUTPUT)bpf-$1.rst: ../../../../include/uapi/linux/bpf.h
+ $$(QUIET_GEN)../../../../scripts/bpf_doc.py $1 \
+ --filename $$< > $$@
+
+$(OUTPUT)%.$2: $(OUTPUT)%.rst
+ifndef RST2MAN_DEP
+ $$(error "rst2man not found, but required to generate man pages")
+endif
+ $$(QUIET_GEN)rst2man --exit-status=1 $$< > $$@.tmp
+ $$(QUIET_GEN)mv $$@.tmp $$@
+
+docs-clean-$1:
+ $$(call QUIET_CLEAN, eBPF_$1-manpage)
+ $(Q)$(RM) $$(DOC_MAN$2) $(OUTPUT)bpf-$1.rst
+
+docs-install-$1: docs
+ $$(call QUIET_INSTALL, eBPF_$1-manpage)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$$(man$2dir)
+ $(Q)$(INSTALL) -m 644 $$(DOC_MAN$2) $(DESTDIR)$$(man$2dir)
+
+docs-uninstall-$1:
+ $$(call QUIET_UNINST, eBPF_$1-manpage)
+ $(Q)$(RM) $$(addprefix $(DESTDIR)$$(man$2dir)/,$$(_DOC_MAN$2))
+ $(Q)$(RMDIR) $(DESTDIR)$$(man$2dir)
+
+.PHONY: $1 docs-clean-$1 docs-install-$1 docs-uninstall-$1
+endef
+
+# Create the make targets to generate manual pages by name and section
+$(eval $(call DOCS_RULES,helpers,7))
+$(eval $(call DOCS_RULES,syscall,2))
+
+docs-clean: $(foreach doctarget,$(DOCTARGETS), docs-clean-$(doctarget))
+docs-install: $(foreach doctarget,$(DOCTARGETS), docs-install-$(doctarget))
+docs-uninstall: $(foreach doctarget,$(DOCTARGETS), docs-uninstall-$(doctarget))
+
+.PHONY: docs docs-clean docs-install docs-uninstall man2 man7
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index ca064180d4d0..776fbe3cb8f9 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -6,6 +6,116 @@ General instructions on running selftests can be found in
__ /Documentation/bpf/bpf_devel_QA.rst#q-how-to-run-bpf-selftests
+=============
+BPF CI System
+=============
+
+BPF employs a continuous integration (CI) system to check patch submission in an
+automated fashion. The system runs selftests for each patch in a series. Results
+are propagated to patchwork, where failures are highlighted similar to
+violations of other checks (such as additional warnings being emitted or a
+``scripts/checkpatch.pl`` reported deficiency):
+
+ https://patchwork.kernel.org/project/netdevbpf/list/?delegate=121173
+
+The CI system executes tests on multiple architectures. It uses a kernel
+configuration derived from both the generic and architecture specific config
+file fragments below ``tools/testing/selftests/bpf/`` (e.g., ``config`` and
+``config.x86_64``).
+
+Denylisting Tests
+=================
+
+It is possible for some architectures to not have support for all BPF features.
+In such a case tests in CI may fail. An example of such a shortcoming is BPF
+trampoline support on IBM's s390x architecture. For cases like this, an in-tree
+deny list file, located at ``tools/testing/selftests/bpf/DENYLIST.<arch>``, can
+be used to prevent the test from running on such an architecture.
+
+In addition to that, the generic ``tools/testing/selftests/bpf/DENYLIST`` is
+honored on every architecture running tests.
+
+These files are organized in three columns. The first column lists the test in
+question. This can be the name of a test suite or of an individual test. The
+remaining two columns provide additional meta data that helps identify and
+classify the entry: column two is a copy and paste of the error being reported
+when running the test in the setting in question. The third column, if
+available, summarizes the underlying problem. A value of ``trampoline``, for
+example, indicates that lack of trampoline support is causing the test to fail.
+This last entry helps identify tests that can be re-enabled once such support is
+added.
+
+=========================
+Running Selftests in a VM
+=========================
+
+It's now possible to run the selftests using ``tools/testing/selftests/bpf/vmtest.sh``.
+The script tries to ensure that the tests are run with the same environment as they
+would be run post-submit in the CI used by the Maintainers, with the exception
+that deny lists are not automatically honored.
+
+This script uses the in-tree kernel configuration and downloads a VM userspace
+image from the system used by the CI. It builds the kernel (without overwriting
+your existing Kconfig), recompiles the bpf selftests, runs them (by default
+``tools/testing/selftests/bpf/test_progs``) and saves the resulting output (by
+default in ``~/.bpf_selftests``).
+
+Script dependencies:
+- clang (preferably built from sources, https://github.com/llvm/llvm-project);
+- pahole (preferably built from sources, https://git.kernel.org/pub/scm/devel/pahole/pahole.git/);
+- qemu;
+- docutils (for ``rst2man``);
+- libcap-devel.
+
+For more information about using the script, run:
+
+.. code-block:: console
+
+ $ tools/testing/selftests/bpf/vmtest.sh -h
+
+In case of linker errors when running selftests, try using static linking:
+
+.. code-block:: console
+
+ $ LDLIBS=-static PKG_CONFIG='pkg-config --static' vmtest.sh
+
+.. note:: Some distros may not support static linking.
+
+.. note:: The script uses pahole and clang based on host environment setting.
+ If you want to change pahole and llvm, you can change `PATH` environment
+ variable in the beginning of script.
+
+Running vmtest on RV64
+======================
+To speed up testing and avoid various dependency issues, it is recommended to
+run vmtest in a Docker container. Before running vmtest, we need to prepare
+Docker container and local rootfs image. The overall steps are as follows:
+
+1. Create Docker container as shown in link [0].
+
+2. Use mkrootfs_debian.sh script [1] to build local rootfs image:
+
+.. code-block:: console
+
+ $ sudo ./mkrootfs_debian.sh --arch riscv64 --distro noble
+
+3. Start Docker container [0] and run vmtest in the container:
+
+.. code-block:: console
+
+ $ PLATFORM=riscv64 CROSS_COMPILE=riscv64-linux-gnu- \
+ tools/testing/selftests/bpf/vmtest.sh \
+ -l <path of local rootfs image> -- \
+ ./test_progs -d \
+ \"$(cat tools/testing/selftests/bpf/DENYLIST.riscv64 \
+ | cut -d'#' -f1 \
+ | sed -e 's/^[[:space:]]*//' \
+ -e 's/[[:space:]]*$//' \
+ | tr -s '\n' ',' \
+ )\"
+
+Link: https://github.com/pulehui/riscv-bpf-vmtest.git [0]
+Link: https://github.com/libbpf/ci/blob/main/rootfs/mkrootfs_debian.sh [1]
Additional information about selftest failures are
documented here.
@@ -35,7 +145,7 @@ the insn 20 undoes map_value addition. It is currently impossible for the
verifier to understand such speculative pointer arithmetic.
Hence `this patch`__ addresses it on the compiler side. It was committed on llvm 12.
-__ https://reviews.llvm.org/D85570
+__ https://github.com/llvm/llvm-project/commit/ddf1864ace484035e3cde5e83b3a31ac81e059c6
The corresponding C code
@@ -85,7 +195,46 @@ This is due to a llvm BPF backend bug. `The fix`__
has been pushed to llvm 10.x release branch and will be
available in 10.0.1. The patch is available in llvm 11.0.0 trunk.
-__ https://reviews.llvm.org/D78466
+__ https://github.com/llvm/llvm-project/commit/3cb7e7bf959dcd3b8080986c62e10a75c7af43f0
+
+bpf_verif_scale/loop6.bpf.o test failure with Clang 12
+======================================================
+
+With Clang 12, the following bpf_verif_scale test failed:
+ * ``bpf_verif_scale/loop6.bpf.o``
+
+The verifier output looks like
+
+.. code-block:: c
+
+ R1 type=ctx expected=fp
+ The sequence of 8193 jumps is too complex.
+
+The reason is compiler generating the following code
+
+.. code-block:: c
+
+ ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < num); i++) {
+ 14: 16 05 40 00 00 00 00 00 if w5 == 0 goto +64 <LBB0_6>
+ 15: bc 51 00 00 00 00 00 00 w1 = w5
+ 16: 04 01 00 00 ff ff ff ff w1 += -1
+ 17: 67 05 00 00 20 00 00 00 r5 <<= 32
+ 18: 77 05 00 00 20 00 00 00 r5 >>= 32
+ 19: a6 01 01 00 05 00 00 00 if w1 < 5 goto +1 <LBB0_4>
+ 20: b7 05 00 00 06 00 00 00 r5 = 6
+ 00000000000000a8 <LBB0_4>:
+ 21: b7 02 00 00 00 00 00 00 r2 = 0
+ 22: b7 01 00 00 00 00 00 00 r1 = 0
+ ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < num); i++) {
+ 23: 7b 1a e0 ff 00 00 00 00 *(u64 *)(r10 - 32) = r1
+ 24: 7b 5a c0 ff 00 00 00 00 *(u64 *)(r10 - 64) = r5
+
+Note that insn #15 has w1 = w5 and w1 is refined later but
+r5(w5) is eventually saved on stack at insn #24 for later use.
+This cause later verifier failure. The bug has been `fixed`__ in
+Clang 13.
+
+__ https://github.com/llvm/llvm-project/commit/1959ead525b8830cc8a345f45e1c3ef9902d3229
BPF CO-RE-based tests and Clang version
=======================================
@@ -102,8 +251,89 @@ failures:
- __builtin_btf_type_id() [0_, 1_, 2_];
- __builtin_preserve_type_info(), __builtin_preserve_enum_value() [3_, 4_].
-.. _0: https://reviews.llvm.org/D74572
-.. _1: https://reviews.llvm.org/D74668
-.. _2: https://reviews.llvm.org/D85174
-.. _3: https://reviews.llvm.org/D83878
-.. _4: https://reviews.llvm.org/D83242
+.. _0: https://github.com/llvm/llvm-project/commit/6b01b465388b204d543da3cf49efd6080db094a9
+.. _1: https://github.com/llvm/llvm-project/commit/072cde03aaa13a2c57acf62d79876bf79aa1919f
+.. _2: https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
+.. _3: https://github.com/llvm/llvm-project/commit/6d218b4adb093ff2e9764febbbc89f429412006c
+.. _4: https://github.com/llvm/llvm-project/commit/6d6750696400e7ce988d66a1a00e1d0cb32815f8
+
+Floating-point tests and Clang version
+======================================
+
+Certain selftests, e.g. core_reloc, require support for the floating-point
+types, which was introduced in `Clang 13`__. The older Clang versions will
+either crash when compiling these tests, or generate an incorrect BTF.
+
+__ https://github.com/llvm/llvm-project/commit/a7137b238a07d9399d3ae96c0b461571bd5aa8b2
+
+Kernel function call test and Clang version
+===========================================
+
+Some selftests (e.g. kfunc_call and bpf_tcp_ca) require a LLVM support
+to generate extern function in BTF. It was introduced in `Clang 13`__.
+
+Without it, the error from compiling bpf selftests looks like:
+
+.. code-block:: console
+
+ libbpf: failed to find BTF for extern 'tcp_slow_start' [25] section: -2
+
+__ https://github.com/llvm/llvm-project/commit/886f9ff53155075bd5f1e994f17b85d1e1b7470c
+
+btf_tag test and Clang version
+==============================
+
+The btf_tag selftest requires LLVM support to recognize the btf_decl_tag and
+btf_type_tag attributes. They are introduced in `Clang 14` [0_, 1_].
+The subtests ``btf_type_tag_user_{mod1, mod2, vmlinux}`` also requires
+pahole version ``1.23``.
+
+Without them, the btf_tag selftest will be skipped and you will observe:
+
+.. code-block:: console
+
+ #<test_num> btf_tag:SKIP
+
+.. _0: https://github.com/llvm/llvm-project/commit/a162b67c98066218d0d00aa13b99afb95d9bb5e6
+.. _1: https://github.com/llvm/llvm-project/commit/3466e00716e12e32fdb100e3fcfca5c2b3e8d784
+
+Clang dependencies for static linking tests
+===========================================
+
+linked_vars, linked_maps, and linked_funcs tests depend on `Clang fix`__ to
+generate valid BTF information for weak variables. Please make sure you use
+Clang that contains the fix.
+
+__ https://github.com/llvm/llvm-project/commit/968292cb93198442138128d850fd54dc7edc0035
+
+Clang relocation changes
+========================
+
+Clang 13 patch `clang reloc patch`_ made some changes on relocations such
+that existing relocation types are broken into more types and
+each new type corresponds to only one way to resolve relocation.
+See `kernel llvm reloc`_ for more explanation and some examples.
+Using clang 13 to compile old libbpf which has static linker support,
+there will be a compilation failure::
+
+ libbpf: ELF relo #0 in section #6 has unexpected type 2 in .../bpf_tcp_nogpl.bpf.o
+
+Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``.
+To fix this issue, user newer libbpf.
+
+.. Links
+.. _clang reloc patch: https://github.com/llvm/llvm-project/commit/6a2ea84600ba4bd3b2733bd8f08f5115eb32164b
+.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
+
+Clang dependencies for the u32 spill test (xdpwall)
+===================================================
+The xdpwall selftest requires a change in `Clang 14`__.
+
+Without it, the xdpwall selftest will fail and the error message
+from running test_progs will look like:
+
+.. code-block:: console
+
+ test_xdpwall:FAIL:Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5? unexpected error: -4007
+
+__ https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5
diff --git a/tools/testing/selftests/bpf/autoconf_helper.h b/tools/testing/selftests/bpf/autoconf_helper.h
new file mode 100644
index 000000000000..5b243b9cdf8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/autoconf_helper.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#ifdef HAVE_GENHDR
+# include "autoconf.h"
+#else
+# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
+# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
+# endif
+#endif
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 332ed2f7b402..bd29bb2e6cb5 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -8,16 +8,17 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/sysinfo.h>
-#include <sys/resource.h>
#include <signal.h>
#include "bench.h"
+#include "bpf_util.h"
#include "testing_helpers.h"
struct env env = {
.warmup_sec = 1,
.duration_sec = 5,
.affinity = false,
- .consumer_cnt = 1,
+ .quiet = false,
+ .consumer_cnt = 0,
.producer_cnt = 1,
};
@@ -29,25 +30,39 @@ static int libbpf_print_fn(enum libbpf_print_level level,
return vfprintf(stderr, format, args);
}
-static int bump_memlock_rlimit(void)
+void setup_libbpf(void)
{
- struct rlimit rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ libbpf_set_print(libbpf_print_fn);
+}
+
+void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ long total = res->false_hits + res->hits + res->drops;
+
+ printf("Iter %3d (%7.3lfus): ",
+ iter, (delta_ns - 1000000000) / 1000.0);
- return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+ printf("%ld false hits of %ld total operations. Percentage = %2.2f %%\n",
+ res->false_hits, total, ((float)res->false_hits / total) * 100);
}
-void setup_libbpf()
+void false_hits_report_final(struct bench_res res[], int res_cnt)
{
- int err;
+ long total_hits = 0, total_drops = 0, total_false_hits = 0, total_ops = 0;
+ int i;
- libbpf_set_print(libbpf_print_fn);
+ for (i = 0; i < res_cnt; i++) {
+ total_hits += res[i].hits;
+ total_false_hits += res[i].false_hits;
+ total_drops += res[i].drops;
+ }
+ total_ops = total_hits + total_false_hits + total_drops;
- err = bump_memlock_rlimit();
- if (err)
- fprintf(stderr, "failed to increase RLIMIT_MEMLOCK: %d", err);
+ printf("Summary: %ld false hits of %ld total operations. ",
+ total_false_hits, total_ops);
+ printf("Percentage = %2.2f %%\n",
+ ((float)total_false_hits / total_ops) * 100);
}
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
@@ -62,20 +77,59 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
printf("Iter %3d (%7.3lfus): ",
iter, (delta_ns - 1000000000) / 1000.0);
- printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s\n",
- hits_per_sec, hits_per_prod, drops_per_sec);
+ printf("hits %8.3lfM/s (%7.3lfM/prod), drops %8.3lfM/s, total operations %8.3lfM/s\n",
+ hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec);
+}
+
+void
+grace_period_latency_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
+{
+ int i;
+
+ memset(gp_stat, 0, sizeof(struct basic_stats));
+
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->mean += res[i].gp_ns / 1000.0 / (double)res[i].gp_ct / (0.0 + res_cnt);
+
+#define IT_MEAN_DIFF (res[i].gp_ns / 1000.0 / (double)res[i].gp_ct - gp_stat->mean)
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
+ }
+ gp_stat->stddev = sqrt(gp_stat->stddev);
+#undef IT_MEAN_DIFF
+}
+
+void
+grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
+{
+ int i;
+
+ memset(gp_stat, 0, sizeof(struct basic_stats));
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->mean += res[i].stime / (double)res[i].gp_ct / (0.0 + res_cnt);
+
+#define IT_MEAN_DIFF (res[i].stime / (double)res[i].gp_ct - gp_stat->mean)
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
+ }
+ gp_stat->stddev = sqrt(gp_stat->stddev);
+#undef IT_MEAN_DIFF
}
void hits_drops_report_final(struct bench_res res[], int res_cnt)
{
int i;
- double hits_mean = 0.0, drops_mean = 0.0;
- double hits_stddev = 0.0, drops_stddev = 0.0;
+ double hits_mean = 0.0, drops_mean = 0.0, total_ops_mean = 0.0;
+ double hits_stddev = 0.0, drops_stddev = 0.0, total_ops_stddev = 0.0;
+ double total_ops;
for (i = 0; i < res_cnt; i++) {
hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
drops_mean += res[i].drops / 1000000.0 / (0.0 + res_cnt);
}
+ total_ops_mean = hits_mean + drops_mean;
if (res_cnt > 1) {
for (i = 0; i < res_cnt; i++) {
@@ -85,14 +139,101 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt)
drops_stddev += (drops_mean - res[i].drops / 1000000.0) *
(drops_mean - res[i].drops / 1000000.0) /
(res_cnt - 1.0);
+ total_ops = res[i].hits + res[i].drops;
+ total_ops_stddev += (total_ops_mean - total_ops / 1000000.0) *
+ (total_ops_mean - total_ops / 1000000.0) /
+ (res_cnt - 1.0);
}
hits_stddev = sqrt(hits_stddev);
drops_stddev = sqrt(drops_stddev);
+ total_ops_stddev = sqrt(total_ops_stddev);
}
printf("Summary: hits %8.3lf \u00B1 %5.3lfM/s (%7.3lfM/prod), ",
hits_mean, hits_stddev, hits_mean / env.producer_cnt);
- printf("drops %8.3lf \u00B1 %5.3lfM/s\n",
+ printf("drops %8.3lf \u00B1 %5.3lfM/s, ",
drops_mean, drops_stddev);
+ printf("total operations %8.3lf \u00B1 %5.3lfM/s\n",
+ total_ops_mean, total_ops_stddev);
+}
+
+void ops_report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ double hits_per_sec, hits_per_prod;
+
+ hits_per_sec = res->hits / 1000000.0 / (delta_ns / 1000000000.0);
+ hits_per_prod = hits_per_sec / env.producer_cnt;
+
+ printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
+
+ printf("hits %8.3lfM/s (%7.3lfM/prod)\n", hits_per_sec, hits_per_prod);
+}
+
+void ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double hits_mean = 0.0, hits_stddev = 0.0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++)
+ hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
+ (hits_mean - res[i].hits / 1000000.0) /
+ (res_cnt - 1.0);
+
+ hits_stddev = sqrt(hits_stddev);
+ }
+ printf("Summary: throughput %8.3lf \u00B1 %5.3lf M ops/s (%7.3lfM ops/prod), ",
+ hits_mean, hits_stddev, hits_mean / env.producer_cnt);
+ printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt);
+}
+
+void local_storage_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double important_hits_per_sec, hits_per_sec;
+ double delta_sec = delta_ns / 1000000000.0;
+
+ hits_per_sec = res->hits / 1000000.0 / delta_sec;
+ important_hits_per_sec = res->important_hits / 1000000.0 / delta_sec;
+
+ printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
+
+ printf("hits %8.3lfM/s ", hits_per_sec);
+ printf("important_hits %8.3lfM/s\n", important_hits_per_sec);
+}
+
+void local_storage_report_final(struct bench_res res[], int res_cnt)
+{
+ double important_hits_mean = 0.0, important_hits_stddev = 0.0;
+ double hits_mean = 0.0, hits_stddev = 0.0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
+ important_hits_mean += res[i].important_hits / 1000000.0 / (0.0 + res_cnt);
+ }
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++) {
+ hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
+ (hits_mean - res[i].hits / 1000000.0) /
+ (res_cnt - 1.0);
+ important_hits_stddev +=
+ (important_hits_mean - res[i].important_hits / 1000000.0) *
+ (important_hits_mean - res[i].important_hits / 1000000.0) /
+ (res_cnt - 1.0);
+ }
+
+ hits_stddev = sqrt(hits_stddev);
+ important_hits_stddev = sqrt(important_hits_stddev);
+ }
+ printf("Summary: hits throughput %8.3lf \u00B1 %5.3lf M ops/s, ",
+ hits_mean, hits_stddev);
+ printf("hits latency %8.3lf ns/op, ", 1000.0 / hits_mean);
+ printf("important_hits throughput %8.3lf \u00B1 %5.3lf M ops/s\n",
+ important_hits_mean, important_hits_stddev);
}
const char *argp_program_version = "benchmark";
@@ -123,6 +264,7 @@ static const struct argp_option opts[] = {
{ "consumers", 'c', "NUM", 0, "Number of consumer threads"},
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
+ { "quiet", 'q', NULL, 0, "Be more quiet"},
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for producer threads; implies --affinity"},
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
@@ -131,16 +273,42 @@ static const struct argp_option opts[] = {
};
extern struct argp bench_ringbufs_argp;
+extern struct argp bench_bloom_map_argp;
+extern struct argp bench_bpf_loop_argp;
+extern struct argp bench_local_storage_argp;
+extern struct argp bench_local_storage_rcu_tasks_trace_argp;
+extern struct argp bench_strncmp_argp;
+extern struct argp bench_hashmap_lookup_argp;
+extern struct argp bench_local_storage_create_argp;
+extern struct argp bench_htab_mem_argp;
+extern struct argp bench_trigger_batch_argp;
+extern struct argp bench_crypto_argp;
+extern struct argp bench_sockmap_argp;
+extern struct argp bench_lpm_trie_map_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
+ { &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 },
+ { &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 },
+ { &bench_local_storage_argp, 0, "local_storage benchmark", 0 },
+ { &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
+ { &bench_local_storage_rcu_tasks_trace_argp, 0,
+ "local_storage RCU Tasks Trace slowdown benchmark", 0 },
+ { &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 },
+ { &bench_local_storage_create_argp, 0, "local-storage-create benchmark", 0 },
+ { &bench_htab_mem_argp, 0, "hash map memory benchmark", 0 },
+ { &bench_trigger_batch_argp, 0, "BPF triggering benchmark", 0 },
+ { &bench_crypto_argp, 0, "bpf crypto benchmark", 0 },
+ { &bench_sockmap_argp, 0, "bpf sockmap benchmark", 0 },
+ { &bench_lpm_trie_map_argp, 0, "LPM trie map benchmark", 0 },
{},
};
+/* Make pos_args global, so that we can run argp_parse twice, if necessary */
+static int pos_args;
+
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
- static int pos_args;
-
switch (key) {
case 'v':
env.verbose = true;
@@ -164,14 +332,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
break;
case 'p':
env.producer_cnt = strtol(arg, NULL, 10);
- if (env.producer_cnt <= 0) {
+ if (env.producer_cnt < 0) {
fprintf(stderr, "Invalid producer count: %s\n", arg);
argp_usage(state);
}
break;
case 'c':
env.consumer_cnt = strtol(arg, NULL, 10);
- if (env.consumer_cnt <= 0) {
+ if (env.consumer_cnt < 0) {
fprintf(stderr, "Invalid consumer count: %s\n", arg);
argp_usage(state);
}
@@ -179,6 +347,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 'a':
env.affinity = true;
break;
+ case 'q':
+ env.quiet = true;
+ break;
case ARG_PROD_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.prod_cpus.cpus,
@@ -209,7 +380,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return 0;
}
-static void parse_cmdline_args(int argc, char **argv)
+static void parse_cmdline_args_init(int argc, char **argv)
{
static const struct argp argp = {
.options = opts,
@@ -219,9 +390,25 @@ static void parse_cmdline_args(int argc, char **argv)
};
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
exit(1);
- if (!env.list && !env.bench_name) {
- argp_help(&argp, stderr, ARGP_HELP_DOC, "bench");
- exit(1);
+}
+
+static void parse_cmdline_args_final(int argc, char **argv)
+{
+ struct argp_child bench_parsers[2] = {};
+ const struct argp argp = {
+ .options = opts,
+ .parser = parse_arg,
+ .doc = argp_program_doc,
+ .children = bench_parsers,
+ };
+
+ /* Parse arguments the second time with the correct set of parsers */
+ if (bench->argp) {
+ bench_parsers[0].argp = bench->argp;
+ bench_parsers[0].header = bench->name;
+ pos_args = 0;
+ if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
+ exit(1);
}
}
@@ -265,12 +452,14 @@ static void setup_timer()
static void set_thread_affinity(pthread_t thread, int cpu)
{
cpu_set_t cpuset;
+ int err;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
- if (pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset)) {
+ err = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
+ if (err) {
fprintf(stderr, "setting affinity to CPU #%d failed: %d\n",
- cpu, errno);
+ cpu, -err);
exit(1);
}
}
@@ -291,7 +480,7 @@ static int next_cpu(struct cpu_set *cpu_set)
exit(1);
}
- return cpu_set->next_cpu++;
+ return cpu_set->next_cpu++ % env.nr_cpus;
}
static struct bench_state {
@@ -311,17 +500,75 @@ extern const struct bench bench_rename_kretprobe;
extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
-extern const struct bench bench_trig_base;
-extern const struct bench bench_trig_tp;
-extern const struct bench bench_trig_rawtp;
+
+/* pure counting benchmarks to establish theoretical limits */
+extern const struct bench bench_trig_usermode_count;
+extern const struct bench bench_trig_syscall_count;
+extern const struct bench bench_trig_kernel_count;
+
+/* batched, staying mostly in-kernel benchmarks */
extern const struct bench bench_trig_kprobe;
+extern const struct bench bench_trig_kretprobe;
+extern const struct bench bench_trig_kprobe_multi;
+extern const struct bench bench_trig_kretprobe_multi;
extern const struct bench bench_trig_fentry;
-extern const struct bench bench_trig_fentry_sleep;
+extern const struct bench bench_trig_kprobe_multi_all;
+extern const struct bench bench_trig_kretprobe_multi_all;
+extern const struct bench bench_trig_fexit;
extern const struct bench bench_trig_fmodret;
+extern const struct bench bench_trig_tp;
+extern const struct bench bench_trig_rawtp;
+
+/* uprobe/uretprobe benchmarks */
+extern const struct bench bench_trig_uprobe_nop;
+extern const struct bench bench_trig_uretprobe_nop;
+extern const struct bench bench_trig_uprobe_push;
+extern const struct bench bench_trig_uretprobe_push;
+extern const struct bench bench_trig_uprobe_ret;
+extern const struct bench bench_trig_uretprobe_ret;
+extern const struct bench bench_trig_uprobe_multi_nop;
+extern const struct bench bench_trig_uretprobe_multi_nop;
+extern const struct bench bench_trig_uprobe_multi_push;
+extern const struct bench bench_trig_uretprobe_multi_push;
+extern const struct bench bench_trig_uprobe_multi_ret;
+extern const struct bench bench_trig_uretprobe_multi_ret;
+#ifdef __x86_64__
+extern const struct bench bench_trig_uprobe_nop5;
+extern const struct bench bench_trig_uretprobe_nop5;
+extern const struct bench bench_trig_uprobe_multi_nop5;
+extern const struct bench bench_trig_uretprobe_multi_nop5;
+#endif
+
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
extern const struct bench bench_pb_libbpf;
extern const struct bench bench_pb_custom;
+extern const struct bench bench_bloom_lookup;
+extern const struct bench bench_bloom_update;
+extern const struct bench bench_bloom_false_positive;
+extern const struct bench bench_hashmap_without_bloom;
+extern const struct bench bench_hashmap_with_bloom;
+extern const struct bench bench_bpf_loop;
+extern const struct bench bench_strncmp_no_helper;
+extern const struct bench bench_strncmp_helper;
+extern const struct bench bench_bpf_hashmap_full_update;
+extern const struct bench bench_local_storage_cache_seq_get;
+extern const struct bench bench_local_storage_cache_interleaved_get;
+extern const struct bench bench_local_storage_cache_hashmap_control;
+extern const struct bench bench_local_storage_tasks_trace;
+extern const struct bench bench_bpf_hashmap_lookup;
+extern const struct bench bench_local_storage_create;
+extern const struct bench bench_htab_mem;
+extern const struct bench bench_crypto_encrypt;
+extern const struct bench bench_crypto_decrypt;
+extern const struct bench bench_sockmap;
+extern const struct bench bench_lpm_trie_noop;
+extern const struct bench bench_lpm_trie_baseline;
+extern const struct bench bench_lpm_trie_lookup;
+extern const struct bench bench_lpm_trie_insert;
+extern const struct bench bench_lpm_trie_update;
+extern const struct bench bench_lpm_trie_delete;
+extern const struct bench bench_lpm_trie_free;
static const struct bench *benchs[] = {
&bench_count_global,
@@ -332,28 +579,82 @@ static const struct bench *benchs[] = {
&bench_rename_rawtp,
&bench_rename_fentry,
&bench_rename_fexit,
- &bench_trig_base,
- &bench_trig_tp,
- &bench_trig_rawtp,
+ /* pure counting benchmarks for establishing theoretical limits */
+ &bench_trig_usermode_count,
+ &bench_trig_kernel_count,
+ &bench_trig_syscall_count,
+ /* batched, staying mostly in-kernel triggers */
&bench_trig_kprobe,
+ &bench_trig_kretprobe,
+ &bench_trig_kprobe_multi,
+ &bench_trig_kretprobe_multi,
&bench_trig_fentry,
- &bench_trig_fentry_sleep,
+ &bench_trig_kprobe_multi_all,
+ &bench_trig_kretprobe_multi_all,
+ &bench_trig_fexit,
&bench_trig_fmodret,
+ &bench_trig_tp,
+ &bench_trig_rawtp,
+ /* uprobes */
+ &bench_trig_uprobe_nop,
+ &bench_trig_uretprobe_nop,
+ &bench_trig_uprobe_push,
+ &bench_trig_uretprobe_push,
+ &bench_trig_uprobe_ret,
+ &bench_trig_uretprobe_ret,
+ &bench_trig_uprobe_multi_nop,
+ &bench_trig_uretprobe_multi_nop,
+ &bench_trig_uprobe_multi_push,
+ &bench_trig_uretprobe_multi_push,
+ &bench_trig_uprobe_multi_ret,
+ &bench_trig_uretprobe_multi_ret,
+#ifdef __x86_64__
+ &bench_trig_uprobe_nop5,
+ &bench_trig_uretprobe_nop5,
+ &bench_trig_uprobe_multi_nop5,
+ &bench_trig_uretprobe_multi_nop5,
+#endif
+ /* ringbuf/perfbuf benchmarks */
&bench_rb_libbpf,
&bench_rb_custom,
&bench_pb_libbpf,
&bench_pb_custom,
+ &bench_bloom_lookup,
+ &bench_bloom_update,
+ &bench_bloom_false_positive,
+ &bench_hashmap_without_bloom,
+ &bench_hashmap_with_bloom,
+ &bench_bpf_loop,
+ &bench_strncmp_no_helper,
+ &bench_strncmp_helper,
+ &bench_bpf_hashmap_full_update,
+ &bench_local_storage_cache_seq_get,
+ &bench_local_storage_cache_interleaved_get,
+ &bench_local_storage_cache_hashmap_control,
+ &bench_local_storage_tasks_trace,
+ &bench_bpf_hashmap_lookup,
+ &bench_local_storage_create,
+ &bench_htab_mem,
+ &bench_crypto_encrypt,
+ &bench_crypto_decrypt,
+ &bench_sockmap,
+ &bench_lpm_trie_noop,
+ &bench_lpm_trie_baseline,
+ &bench_lpm_trie_lookup,
+ &bench_lpm_trie_insert,
+ &bench_lpm_trie_update,
+ &bench_lpm_trie_delete,
+ &bench_lpm_trie_free,
};
-static void setup_benchmark()
+static void find_benchmark(void)
{
- int i, err;
+ int i;
if (!env.bench_name) {
fprintf(stderr, "benchmark name is not specified\n");
exit(1);
}
-
for (i = 0; i < ARRAY_SIZE(benchs); i++) {
if (strcmp(benchs[i]->name, env.bench_name) == 0) {
bench = benchs[i];
@@ -364,8 +665,14 @@ static void setup_benchmark()
fprintf(stderr, "benchmark '%s' not found\n", env.bench_name);
exit(1);
}
+}
+
+static void setup_benchmark(void)
+{
+ int i, err;
- printf("Setting up benchmark '%s'...\n", bench->name);
+ if (!env.quiet)
+ printf("Setting up benchmark '%s'...\n", bench->name);
state.producers = calloc(env.producer_cnt, sizeof(*state.producers));
state.consumers = calloc(env.consumer_cnt, sizeof(*state.consumers));
@@ -380,11 +687,15 @@ static void setup_benchmark()
bench->setup();
for (i = 0; i < env.consumer_cnt; i++) {
+ if (!bench->consumer_thread) {
+ fprintf(stderr, "benchmark doesn't support consumers!\n");
+ exit(1);
+ }
err = pthread_create(&state.consumers[i], NULL,
bench->consumer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create consumer thread #%d: %d\n",
- i, -errno);
+ i, -err);
exit(1);
}
if (env.affinity)
@@ -399,11 +710,15 @@ static void setup_benchmark()
env.prod_cpus.next_cpu = env.cons_cpus.next_cpu;
for (i = 0; i < env.producer_cnt; i++) {
+ if (!bench->producer_thread) {
+ fprintf(stderr, "benchmark doesn't support producers!\n");
+ exit(1);
+ }
err = pthread_create(&state.producers[i], NULL,
bench->producer_thread, (void *)(long)i);
if (err) {
fprintf(stderr, "failed to create producer thread #%d: %d\n",
- i, -errno);
+ i, -err);
exit(1);
}
if (env.affinity)
@@ -411,7 +726,8 @@ static void setup_benchmark()
next_cpu(&env.prod_cpus));
}
- printf("Benchmark '%s' started.\n", bench->name);
+ if (!env.quiet)
+ printf("Benchmark '%s' started.\n", bench->name);
}
static pthread_mutex_t bench_done_mtx = PTHREAD_MUTEX_INITIALIZER;
@@ -435,7 +751,8 @@ static void collect_measurements(long delta_ns) {
int main(int argc, char **argv)
{
- parse_cmdline_args(argc, argv);
+ env.nr_cpus = get_nprocs();
+ parse_cmdline_args_init(argc, argv);
if (env.list) {
int i;
@@ -447,6 +764,9 @@ int main(int argc, char **argv)
return 0;
}
+ find_benchmark();
+ parse_cmdline_args_final(argc, argv);
+
setup_benchmark();
setup_timer();
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index c1f48a473b02..bea323820ffb 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -10,6 +10,7 @@
#include <math.h>
#include <time.h>
#include <sys/syscall.h>
+#include <limits.h>
struct cpu_set {
bool *cpus;
@@ -24,21 +25,35 @@ struct env {
bool verbose;
bool list;
bool affinity;
+ bool quiet;
int consumer_cnt;
int producer_cnt;
+ int nr_cpus;
struct cpu_set prod_cpus;
struct cpu_set cons_cpus;
};
+struct basic_stats {
+ double mean;
+ double stddev;
+};
+
struct bench_res {
long hits;
long drops;
+ long false_hits;
+ long important_hits;
+ unsigned long gp_ns;
+ unsigned long gp_ct;
+ unsigned int stime;
+ unsigned long duration_ns;
};
struct bench {
const char *name;
- void (*validate)();
- void (*setup)();
+ const struct argp *argp;
+ void (*validate)(void);
+ void (*setup)(void);
void *(*producer_thread)(void *ctx);
void *(*consumer_thread)(void *ctx);
void (*measure)(struct bench_res* res);
@@ -53,17 +68,20 @@ struct counter {
extern struct env env;
extern const struct bench *bench;
-void setup_libbpf();
+void setup_libbpf(void);
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns);
void hits_drops_report_final(struct bench_res res[], int res_cnt);
-
-static inline __u64 get_time_ns() {
- struct timespec t;
-
- clock_gettime(CLOCK_MONOTONIC, &t);
-
- return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
-}
+void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
+void false_hits_report_final(struct bench_res res[], int res_cnt);
+void ops_report_progress(int iter, struct bench_res *res, long delta_ns);
+void ops_report_final(struct bench_res res[], int res_cnt);
+void local_storage_report_progress(int iter, struct bench_res *res,
+ long delta_ns);
+void local_storage_report_final(struct bench_res res[], int res_cnt);
+void grace_period_latency_basic_stats(struct bench_res res[], int res_cnt,
+ struct basic_stats *gp_stat);
+void grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt,
+ struct basic_stats *gp_stat);
static inline void atomic_inc(long *value)
{
diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
new file mode 100644
index 000000000000..e289dd1a14ee
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <argp.h>
+#include <linux/log2.h>
+#include <pthread.h>
+#include "bench.h"
+#include "bloom_filter_bench.skel.h"
+#include "bpf_util.h"
+
+static struct ctx {
+ bool use_array_map;
+ bool use_hashmap;
+ bool hashmap_use_bloom;
+ bool count_false_hits;
+
+ struct bloom_filter_bench *skel;
+
+ int bloom_fd;
+ int hashmap_fd;
+ int array_map_fd;
+
+ pthread_mutex_t map_done_mtx;
+ pthread_cond_t map_done_cv;
+ bool map_done;
+ bool map_prepare_err;
+
+ __u32 next_map_idx;
+} ctx = {
+ .map_done_mtx = PTHREAD_MUTEX_INITIALIZER,
+ .map_done_cv = PTHREAD_COND_INITIALIZER,
+};
+
+struct stat {
+ __u32 stats[3];
+};
+
+static struct {
+ __u32 nr_entries;
+ __u8 nr_hash_funcs;
+ __u8 value_size;
+} args = {
+ .nr_entries = 1000,
+ .nr_hash_funcs = 3,
+ .value_size = 8,
+};
+
+enum {
+ ARG_NR_ENTRIES = 3000,
+ ARG_NR_HASH_FUNCS = 3001,
+ ARG_VALUE_SIZE = 3002,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+ "Set number of expected unique entries in the bloom filter"},
+ { "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0,
+ "Set number of hash functions in the bloom filter"},
+ { "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
+ "Set value size (in bytes) of bloom filter entries"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "Invalid nr_entries count.");
+ argp_usage(state);
+ }
+ args.nr_entries = ret;
+ break;
+ case ARG_NR_HASH_FUNCS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > 15) {
+ fprintf(stderr,
+ "The bloom filter must use 1 to 15 hash functions.");
+ argp_usage(state);
+ }
+ args.nr_hash_funcs = ret;
+ break;
+ case ARG_VALUE_SIZE:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 2 || ret > 256) {
+ fprintf(stderr,
+ "Invalid value size. Must be between 2 and 256 bytes");
+ argp_usage(state);
+ }
+ args.value_size = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/* exported into benchmark runner */
+const struct argp bench_bloom_map_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr,
+ "The bloom filter benchmarks do not support consumer\n");
+ exit(1);
+ }
+}
+
+static inline void trigger_bpf_program(void)
+{
+ syscall(__NR_getpgid);
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ trigger_bpf_program();
+
+ return NULL;
+}
+
+static void *map_prepare_thread(void *arg)
+{
+ __u32 val_size, i;
+ void *val = NULL;
+ int err;
+
+ val_size = args.value_size;
+ val = malloc(val_size);
+ if (!val) {
+ ctx.map_prepare_err = true;
+ goto done;
+ }
+
+ while (true) {
+ i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED);
+ if (i > args.nr_entries)
+ break;
+
+again:
+ /* Populate hashmap, bloom filter map, and array map with the same
+ * random values
+ */
+ err = syscall(__NR_getrandom, val, val_size, 0);
+ if (err != val_size) {
+ ctx.map_prepare_err = true;
+ fprintf(stderr, "failed to get random value: %d\n", -errno);
+ break;
+ }
+
+ if (ctx.use_hashmap) {
+ err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST);
+ if (err) {
+ if (err != -EEXIST) {
+ ctx.map_prepare_err = true;
+ fprintf(stderr, "failed to add elem to hashmap: %d\n",
+ -errno);
+ break;
+ }
+ goto again;
+ }
+ }
+
+ i--;
+
+ if (ctx.use_array_map) {
+ err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0);
+ if (err) {
+ ctx.map_prepare_err = true;
+ fprintf(stderr, "failed to add elem to array map: %d\n", -errno);
+ break;
+ }
+ }
+
+ if (ctx.use_hashmap && !ctx.hashmap_use_bloom)
+ continue;
+
+ err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0);
+ if (err) {
+ ctx.map_prepare_err = true;
+ fprintf(stderr,
+ "failed to add elem to bloom filter map: %d\n", -errno);
+ break;
+ }
+ }
+done:
+ pthread_mutex_lock(&ctx.map_done_mtx);
+ ctx.map_done = true;
+ pthread_cond_signal(&ctx.map_done_cv);
+ pthread_mutex_unlock(&ctx.map_done_mtx);
+
+ if (val)
+ free(val);
+
+ return NULL;
+}
+
+static void populate_maps(void)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ pthread_t map_thread;
+ int i, err, nr_rand_bytes;
+
+ ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map);
+ ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap);
+ ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map);
+
+ for (i = 0; i < nr_cpus; i++) {
+ err = pthread_create(&map_thread, NULL, map_prepare_thread,
+ NULL);
+ if (err) {
+ fprintf(stderr, "failed to create pthread: %d\n", -errno);
+ exit(1);
+ }
+ }
+
+ pthread_mutex_lock(&ctx.map_done_mtx);
+ while (!ctx.map_done)
+ pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx);
+ pthread_mutex_unlock(&ctx.map_done_mtx);
+
+ if (ctx.map_prepare_err)
+ exit(1);
+
+ nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals,
+ ctx.skel->rodata->nr_rand_bytes, 0);
+ if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) {
+ fprintf(stderr, "failed to get random bytes\n");
+ exit(1);
+ }
+}
+
+static void check_args(void)
+{
+ if (args.value_size < 8) {
+ __u64 nr_unique_entries = 1ULL << (args.value_size * 8);
+
+ if (args.nr_entries > nr_unique_entries) {
+ fprintf(stderr,
+ "Not enough unique values for the nr_entries requested\n");
+ exit(1);
+ }
+ }
+}
+
+static struct bloom_filter_bench *setup_skeleton(void)
+{
+ struct bloom_filter_bench *skel;
+
+ check_args();
+
+ setup_libbpf();
+
+ skel = bloom_filter_bench__open();
+ if (!skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom;
+ skel->rodata->count_false_hits = ctx.count_false_hits;
+
+ /* Resize number of entries */
+ bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries);
+
+ bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries);
+
+ bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries);
+
+ /* Set value size */
+ bpf_map__set_value_size(skel->maps.array_map, args.value_size);
+
+ bpf_map__set_value_size(skel->maps.bloom_map, args.value_size);
+
+ bpf_map__set_value_size(skel->maps.hashmap, args.value_size);
+
+ /* For the hashmap, we use the value as the key as well */
+ bpf_map__set_key_size(skel->maps.hashmap, args.value_size);
+
+ skel->bss->value_size = args.value_size;
+
+ /* Set number of hash functions */
+ bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs);
+
+ if (bloom_filter_bench__load(skel)) {
+ fprintf(stderr, "failed to load skeleton\n");
+ exit(1);
+ }
+
+ return skel;
+}
+
+static void bloom_lookup_setup(void)
+{
+ struct bpf_link *link;
+
+ ctx.use_array_map = true;
+
+ ctx.skel = setup_skeleton();
+
+ populate_maps();
+
+ link = bpf_program__attach(ctx.skel->progs.bloom_lookup);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void bloom_update_setup(void)
+{
+ struct bpf_link *link;
+
+ ctx.use_array_map = true;
+
+ ctx.skel = setup_skeleton();
+
+ populate_maps();
+
+ link = bpf_program__attach(ctx.skel->progs.bloom_update);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void false_positive_setup(void)
+{
+ struct bpf_link *link;
+
+ ctx.use_hashmap = true;
+ ctx.hashmap_use_bloom = true;
+ ctx.count_false_hits = true;
+
+ ctx.skel = setup_skeleton();
+
+ populate_maps();
+
+ link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void hashmap_with_bloom_setup(void)
+{
+ struct bpf_link *link;
+
+ ctx.use_hashmap = true;
+ ctx.hashmap_use_bloom = true;
+
+ ctx.skel = setup_skeleton();
+
+ populate_maps();
+
+ link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void hashmap_no_bloom_setup(void)
+{
+ struct bpf_link *link;
+
+ ctx.use_hashmap = true;
+
+ ctx.skel = setup_skeleton();
+
+ populate_maps();
+
+ link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void measure(struct bench_res *res)
+{
+ unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0;
+ static unsigned long last_hits, last_drops, last_false_hits;
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ int hit_key, drop_key, false_hit_key;
+ int i;
+
+ hit_key = ctx.skel->rodata->hit_key;
+ drop_key = ctx.skel->rodata->drop_key;
+ false_hit_key = ctx.skel->rodata->false_hit_key;
+
+ if (ctx.skel->bss->error != 0) {
+ fprintf(stderr, "error (%d) when searching the bloom filter\n",
+ ctx.skel->bss->error);
+ exit(1);
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i];
+
+ total_hits += s->stats[hit_key];
+ total_drops += s->stats[drop_key];
+ total_false_hits += s->stats[false_hit_key];
+ }
+
+ res->hits = total_hits - last_hits;
+ res->drops = total_drops - last_drops;
+ res->false_hits = total_false_hits - last_false_hits;
+
+ last_hits = total_hits;
+ last_drops = total_drops;
+ last_false_hits = total_false_hits;
+}
+
+const struct bench bench_bloom_lookup = {
+ .name = "bloom-lookup",
+ .argp = &bench_bloom_map_argp,
+ .validate = validate,
+ .setup = bloom_lookup_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_bloom_update = {
+ .name = "bloom-update",
+ .argp = &bench_bloom_map_argp,
+ .validate = validate,
+ .setup = bloom_update_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_bloom_false_positive = {
+ .name = "bloom-false-positive",
+ .argp = &bench_bloom_map_argp,
+ .validate = validate,
+ .setup = false_positive_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = false_hits_report_progress,
+ .report_final = false_hits_report_final,
+};
+
+const struct bench bench_hashmap_without_bloom = {
+ .name = "hashmap-without-bloom",
+ .argp = &bench_bloom_map_argp,
+ .validate = validate,
+ .setup = hashmap_no_bloom_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_hashmap_with_bloom = {
+ .name = "hashmap-with-bloom",
+ .argp = &bench_bloom_map_argp,
+ .validate = validate,
+ .setup = hashmap_with_bloom_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_crypto.c b/tools/testing/selftests/bpf/benchs/bench_bpf_crypto.c
new file mode 100644
index 000000000000..2845edaba8db
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_crypto.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <argp.h>
+#include "bench.h"
+#include "crypto_bench.skel.h"
+
+#define MAX_CIPHER_LEN 32
+static char *input;
+static struct crypto_ctx {
+ struct crypto_bench *skel;
+ int pfd;
+} ctx;
+
+static struct crypto_args {
+ u32 crypto_len;
+ char *crypto_cipher;
+} args = {
+ .crypto_len = 16,
+ .crypto_cipher = "ecb(aes)",
+};
+
+enum {
+ ARG_CRYPTO_LEN = 5000,
+ ARG_CRYPTO_CIPHER = 5001,
+};
+
+static const struct argp_option opts[] = {
+ { "crypto-len", ARG_CRYPTO_LEN, "CRYPTO_LEN", 0,
+ "Set the length of crypto buffer" },
+ { "crypto-cipher", ARG_CRYPTO_CIPHER, "CRYPTO_CIPHER", 0,
+ "Set the cipher to use (default:ecb(aes))" },
+ {},
+};
+
+static error_t crypto_parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case ARG_CRYPTO_LEN:
+ args.crypto_len = strtoul(arg, NULL, 10);
+ if (!args.crypto_len ||
+ args.crypto_len > sizeof(ctx.skel->bss->dst)) {
+ fprintf(stderr, "Invalid crypto buffer len (limit %zu)\n",
+ sizeof(ctx.skel->bss->dst));
+ argp_usage(state);
+ }
+ break;
+ case ARG_CRYPTO_CIPHER:
+ args.crypto_cipher = strdup(arg);
+ if (!strlen(args.crypto_cipher) ||
+ strlen(args.crypto_cipher) > MAX_CIPHER_LEN) {
+ fprintf(stderr, "Invalid crypto cipher len (limit %d)\n",
+ MAX_CIPHER_LEN);
+ argp_usage(state);
+ }
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_crypto_argp = {
+ .options = opts,
+ .parser = crypto_parse_arg,
+};
+
+static void crypto_validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "bpf crypto benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+}
+
+static void crypto_setup(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ int err, pfd;
+ size_t i, sz;
+
+ sz = args.crypto_len;
+ if (!sz || sz > sizeof(ctx.skel->bss->dst)) {
+ fprintf(stderr, "invalid encrypt buffer size (source %zu, target %zu)\n",
+ sz, sizeof(ctx.skel->bss->dst));
+ exit(1);
+ }
+
+ setup_libbpf();
+
+ ctx.skel = crypto_bench__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ snprintf(ctx.skel->bss->cipher, 128, "%s", args.crypto_cipher);
+ memcpy(ctx.skel->bss->key, "12345678testtest", 16);
+ ctx.skel->bss->key_len = 16;
+ ctx.skel->bss->authsize = 0;
+
+ srandom(time(NULL));
+ input = malloc(sz);
+ for (i = 0; i < sz - 1; i++)
+ input[i] = '1' + random() % 9;
+ input[sz - 1] = '\0';
+
+ ctx.skel->rodata->len = args.crypto_len;
+
+ err = crypto_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to load skeleton\n");
+ crypto_bench__destroy(ctx.skel);
+ exit(1);
+ }
+
+ pfd = bpf_program__fd(ctx.skel->progs.crypto_setup);
+ if (pfd < 0) {
+ fprintf(stderr, "failed to get fd for setup prog\n");
+ crypto_bench__destroy(ctx.skel);
+ exit(1);
+ }
+
+ err = bpf_prog_test_run_opts(pfd, &opts);
+ if (err || ctx.skel->bss->status) {
+ fprintf(stderr, "failed to run setup prog: err %d, status %d\n",
+ err, ctx.skel->bss->status);
+ crypto_bench__destroy(ctx.skel);
+ exit(1);
+ }
+}
+
+static void crypto_encrypt_setup(void)
+{
+ crypto_setup();
+ ctx.pfd = bpf_program__fd(ctx.skel->progs.crypto_encrypt);
+}
+
+static void crypto_decrypt_setup(void)
+{
+ crypto_setup();
+ ctx.pfd = bpf_program__fd(ctx.skel->progs.crypto_decrypt);
+}
+
+static void crypto_measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+}
+
+static void *crypto_producer(void *unused)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .repeat = 64,
+ .data_in = input,
+ .data_size_in = args.crypto_len,
+ );
+
+ while (true)
+ (void)bpf_prog_test_run_opts(ctx.pfd, &opts);
+ return NULL;
+}
+
+const struct bench bench_crypto_encrypt = {
+ .name = "crypto-encrypt",
+ .argp = &bench_crypto_argp,
+ .validate = crypto_validate,
+ .setup = crypto_encrypt_setup,
+ .producer_thread = crypto_producer,
+ .measure = crypto_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_crypto_decrypt = {
+ .name = "crypto-decrypt",
+ .argp = &bench_crypto_argp,
+ .validate = crypto_validate,
+ .setup = crypto_decrypt_setup,
+ .producer_thread = crypto_producer,
+ .measure = crypto_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
new file mode 100644
index 000000000000..ee1dc12c5e5e
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "bench.h"
+#include "bpf_hashmap_full_update_bench.skel.h"
+#include "bpf_util.h"
+
+/* BPF triggering benchmarks */
+static struct ctx {
+ struct bpf_hashmap_full_update_bench *skel;
+} ctx;
+
+#define MAX_LOOP_NUM 10000
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+}
+
+static void *producer(void *input)
+{
+ while (true) {
+ /* trigger the bpf program */
+ syscall(__NR_getpgid);
+ }
+
+ return NULL;
+}
+
+static void measure(struct bench_res *res)
+{
+}
+
+static void setup(void)
+{
+ struct bpf_link *link;
+ int map_fd, i, max_entries;
+
+ setup_libbpf();
+
+ ctx.skel = bpf_hashmap_full_update_bench__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ ctx.skel->bss->nr_loops = MAX_LOOP_NUM;
+
+ link = bpf_program__attach(ctx.skel->progs.benchmark);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+
+ /* fill hash_map */
+ map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
+ max_entries = bpf_map__max_entries(ctx.skel->maps.hash_map_bench);
+ for (i = 0; i < max_entries; i++)
+ bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
+}
+
+static void hashmap_report_final(struct bench_res res[], int res_cnt)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ int i;
+
+ for (i = 0; i < nr_cpus; i++) {
+ u64 time = ctx.skel->bss->percpu_time[i];
+
+ if (!time)
+ continue;
+
+ printf("%d:hash_map_full_perf %lld events per sec\n",
+ i, ctx.skel->bss->nr_loops * 1000000000ll / time);
+ }
+}
+
+const struct bench bench_bpf_hashmap_full_update = {
+ .name = "bpf-hashmap-full-update",
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = NULL,
+ .report_final = hashmap_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
new file mode 100644
index 000000000000..279ff1b8b5b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include <sys/random.h>
+#include <argp.h>
+#include "bench.h"
+#include "bpf_hashmap_lookup.skel.h"
+#include "bpf_util.h"
+
+/* BPF triggering benchmarks */
+static struct ctx {
+ struct bpf_hashmap_lookup *skel;
+} ctx;
+
+/* only available to kernel, so define it here */
+#define BPF_MAX_LOOPS (1<<23)
+
+#define MAX_KEY_SIZE 1024 /* the size of the key map */
+
+static struct {
+ __u32 key_size;
+ __u32 map_flags;
+ __u32 max_entries;
+ __u32 nr_entries;
+ __u32 nr_loops;
+} args = {
+ .key_size = 4,
+ .map_flags = 0,
+ .max_entries = 1000,
+ .nr_entries = 500,
+ .nr_loops = 1000000,
+};
+
+enum {
+ ARG_KEY_SIZE = 8001,
+ ARG_MAP_FLAGS,
+ ARG_MAX_ENTRIES,
+ ARG_NR_ENTRIES,
+ ARG_NR_LOOPS,
+};
+
+static const struct argp_option opts[] = {
+ { "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
+ "The hashmap key size (max 1024)"},
+ { "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
+ "The hashmap flags passed to BPF_MAP_CREATE"},
+ { "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
+ "The hashmap max entries"},
+ { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+ "The number of entries to insert/lookup"},
+ { "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
+ "The number of loops for the benchmark"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_KEY_SIZE:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > MAX_KEY_SIZE) {
+ fprintf(stderr, "invalid key_size");
+ argp_usage(state);
+ }
+ args.key_size = ret;
+ break;
+ case ARG_MAP_FLAGS:
+ ret = strtol(arg, NULL, 0);
+ if (ret < 0 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid map_flags");
+ argp_usage(state);
+ }
+ args.map_flags = ret;
+ break;
+ case ARG_MAX_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid max_entries");
+ argp_usage(state);
+ }
+ args.max_entries = ret;
+ break;
+ case ARG_NR_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_entries");
+ argp_usage(state);
+ }
+ args.nr_entries = ret;
+ break;
+ case ARG_NR_LOOPS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > BPF_MAX_LOOPS) {
+ fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
+ ret, BPF_MAX_LOOPS);
+ argp_usage(state);
+ }
+ args.nr_loops = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_hashmap_lookup_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_entries > args.max_entries) {
+ fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
+ args.max_entries, args.nr_entries);
+ exit(1);
+ }
+}
+
+static void *producer(void *input)
+{
+ while (true) {
+ /* trigger the bpf program */
+ syscall(__NR_getpgid);
+ }
+ return NULL;
+}
+
+static void measure(struct bench_res *res)
+{
+}
+
+static inline void patch_key(u32 i, u32 *key)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *key = i + 1;
+#else
+ *key = __builtin_bswap32(i + 1);
+#endif
+ /* the rest of key is random */
+}
+
+static void setup(void)
+{
+ struct bpf_link *link;
+ int map_fd;
+ int ret;
+ int i;
+
+ setup_libbpf();
+
+ ctx.skel = bpf_hashmap_lookup__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
+ bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
+ bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
+ bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);
+
+ ctx.skel->bss->nr_entries = args.nr_entries;
+ ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;
+
+ if (args.key_size > 4) {
+ for (i = 1; i < args.key_size/4; i++)
+ ctx.skel->bss->key[i] = 2654435761 * i;
+ }
+
+ ret = bpf_hashmap_lookup__load(ctx.skel);
+ if (ret) {
+ bpf_hashmap_lookup__destroy(ctx.skel);
+ fprintf(stderr, "failed to load map: %s", strerror(-ret));
+ exit(1);
+ }
+
+ /* fill in the hash_map */
+ map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
+ for (u64 i = 0; i < args.nr_entries; i++) {
+ patch_key(i, ctx.skel->bss->key);
+ bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
+ }
+
+ link = bpf_program__attach(ctx.skel->progs.benchmark);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static inline double events_from_time(u64 time)
+{
+ if (time)
+ return args.nr_loops * 1000000000llu / time / 1000000.0L;
+
+ return 0;
+}
+
+static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
+{
+ int i, n = 0;
+
+ *events_mean = 0;
+ *events_stddev = 0;
+ *mean_time = 0;
+
+ for (i = 0; i < 32; i++) {
+ if (!times[i])
+ break;
+ *mean_time += times[i];
+ *events_mean += events_from_time(times[i]);
+ n += 1;
+ }
+ if (!n)
+ return 0;
+
+ *mean_time /= n;
+ *events_mean /= n;
+
+ if (n > 1) {
+ for (i = 0; i < n; i++) {
+ double events_i = *events_mean - events_from_time(times[i]);
+ *events_stddev += events_i * events_i / (n - 1);
+ }
+ *events_stddev = sqrt(*events_stddev);
+ }
+
+ return n;
+}
+
+static void hashmap_report_final(struct bench_res res[], int res_cnt)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ double events_mean, events_stddev;
+ u64 mean_time;
+ int i, n;
+
+ for (i = 0; i < nr_cpus; i++) {
+ n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
+ &events_stddev, &mean_time);
+ if (n == 0)
+ continue;
+
+ if (env.quiet) {
+ /* we expect only one cpu to be present */
+ if (env.affinity)
+ printf("%.3lf\n", events_mean);
+ else
+ printf("cpu%02d %.3lf\n", i, events_mean);
+ } else {
+ printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
+ " (approximated from %d samples of ~%lums)\n",
+ i, events_mean, 2*events_stddev,
+ n, mean_time / 1000000);
+ }
+ }
+}
+
+const struct bench bench_bpf_hashmap_lookup = {
+ .name = "bpf-hashmap-lookup",
+ .argp = &bench_hashmap_lookup_argp,
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = NULL,
+ .report_final = hashmap_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
new file mode 100644
index 000000000000..a705cfb2bccc
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <argp.h>
+#include "bench.h"
+#include "bpf_loop_bench.skel.h"
+
+/* BPF triggering benchmarks */
+static struct ctx {
+ struct bpf_loop_bench *skel;
+} ctx;
+
+static struct {
+ __u32 nr_loops;
+} args = {
+ .nr_loops = 10,
+};
+
+enum {
+ ARG_NR_LOOPS = 4000,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_loops", ARG_NR_LOOPS, "nr_loops", 0,
+ "Set number of loops for the bpf_loop helper"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case ARG_NR_LOOPS:
+ args.nr_loops = strtol(arg, NULL, 10);
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/* exported into benchmark runner */
+const struct argp bench_bpf_loop_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ /* trigger the bpf program */
+ syscall(__NR_getpgid);
+
+ return NULL;
+}
+
+static void measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+}
+
+static void setup(void)
+{
+ struct bpf_link *link;
+
+ setup_libbpf();
+
+ ctx.skel = bpf_loop_bench__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ link = bpf_program__attach(ctx.skel->progs.benchmark);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+
+ ctx.skel->bss->nr_loops = args.nr_loops;
+}
+
+const struct bench bench_bpf_loop = {
+ .name = "bpf-loop",
+ .argp = &bench_bpf_loop_argp,
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_count.c b/tools/testing/selftests/bpf/benchs/bench_count.c
index befba7a82643..ba89ed3936b7 100644
--- a/tools/testing/selftests/bpf/benchs/bench_count.c
+++ b/tools/testing/selftests/bpf/benchs/bench_count.c
@@ -18,11 +18,6 @@ static void *count_global_producer(void *input)
return NULL;
}
-static void *count_global_consumer(void *input)
-{
- return NULL;
-}
-
static void count_global_measure(struct bench_res *res)
{
struct count_global_ctx *ctx = &count_global_ctx;
@@ -36,11 +31,11 @@ static struct count_local_ctx {
struct counter *hits;
} count_local_ctx;
-static void count_local_setup()
+static void count_local_setup(void)
{
struct count_local_ctx *ctx = &count_local_ctx;
- ctx->hits = calloc(env.consumer_cnt, sizeof(*ctx->hits));
+ ctx->hits = calloc(env.producer_cnt, sizeof(*ctx->hits));
if (!ctx->hits)
exit(1);
}
@@ -56,11 +51,6 @@ static void *count_local_producer(void *input)
return NULL;
}
-static void *count_local_consumer(void *input)
-{
- return NULL;
-}
-
static void count_local_measure(struct bench_res *res)
{
struct count_local_ctx *ctx = &count_local_ctx;
@@ -74,7 +64,6 @@ static void count_local_measure(struct bench_res *res)
const struct bench bench_count_global = {
.name = "count-global",
.producer_thread = count_global_producer,
- .consumer_thread = count_global_consumer,
.measure = count_global_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -84,7 +73,6 @@ const struct bench bench_count_local = {
.name = "count-local",
.setup = count_local_setup,
.producer_thread = count_local_producer,
- .consumer_thread = count_local_consumer,
.measure = count_local_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c
new file mode 100644
index 000000000000..297e32390cd1
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <argp.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <fcntl.h>
+
+#include "bench.h"
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+#include "htab_mem_bench.skel.h"
+
+struct htab_mem_use_case {
+ const char *name;
+ const char **progs;
+ /* Do synchronization between addition thread and deletion thread */
+ bool need_sync;
+};
+
+static struct htab_mem_ctx {
+ const struct htab_mem_use_case *uc;
+ struct htab_mem_bench *skel;
+ pthread_barrier_t *notify;
+ int fd;
+} ctx;
+
+const char *ow_progs[] = {"overwrite", NULL};
+const char *batch_progs[] = {"batch_add_batch_del", NULL};
+const char *add_del_progs[] = {"add_only", "del_only", NULL};
+const static struct htab_mem_use_case use_cases[] = {
+ { .name = "overwrite", .progs = ow_progs },
+ { .name = "batch_add_batch_del", .progs = batch_progs },
+ { .name = "add_del_on_diff_cpu", .progs = add_del_progs, .need_sync = true },
+};
+
+static struct htab_mem_args {
+ u32 value_size;
+ const char *use_case;
+ bool preallocated;
+} args = {
+ .value_size = 8,
+ .use_case = "overwrite",
+ .preallocated = false,
+};
+
+enum {
+ ARG_VALUE_SIZE = 10000,
+ ARG_USE_CASE = 10001,
+ ARG_PREALLOCATED = 10002,
+};
+
+static const struct argp_option opts[] = {
+ { "value-size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
+ "Set the value size of hash map (default 8)" },
+ { "use-case", ARG_USE_CASE, "USE_CASE", 0,
+ "Set the use case of hash map: overwrite|batch_add_batch_del|add_del_on_diff_cpu" },
+ { "preallocated", ARG_PREALLOCATED, NULL, 0, "use preallocated hash map" },
+ {},
+};
+
+static error_t htab_mem_parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case ARG_VALUE_SIZE:
+ args.value_size = strtoul(arg, NULL, 10);
+ if (args.value_size > 4096) {
+ fprintf(stderr, "too big value size %u\n", args.value_size);
+ argp_usage(state);
+ }
+ break;
+ case ARG_USE_CASE:
+ args.use_case = strdup(arg);
+ if (!args.use_case) {
+ fprintf(stderr, "no mem for use-case\n");
+ argp_usage(state);
+ }
+ break;
+ case ARG_PREALLOCATED:
+ args.preallocated = true;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_htab_mem_argp = {
+ .options = opts,
+ .parser = htab_mem_parse_arg,
+};
+
+static void htab_mem_validate(void)
+{
+ if (!strcmp(use_cases[2].name, args.use_case) && env.producer_cnt % 2) {
+ fprintf(stderr, "%s needs an even number of producers\n", args.use_case);
+ exit(1);
+ }
+}
+
+static int htab_mem_bench_init_barriers(void)
+{
+ pthread_barrier_t *barriers;
+ unsigned int i, nr;
+
+ if (!ctx.uc->need_sync)
+ return 0;
+
+ nr = (env.producer_cnt + 1) / 2;
+ barriers = calloc(nr, sizeof(*barriers));
+ if (!barriers)
+ return -1;
+
+ /* Used for synchronization between two threads */
+ for (i = 0; i < nr; i++)
+ pthread_barrier_init(&barriers[i], NULL, 2);
+
+ ctx.notify = barriers;
+ return 0;
+}
+
+static void htab_mem_bench_exit_barriers(void)
+{
+ unsigned int i, nr;
+
+ if (!ctx.notify)
+ return;
+
+ nr = (env.producer_cnt + 1) / 2;
+ for (i = 0; i < nr; i++)
+ pthread_barrier_destroy(&ctx.notify[i]);
+ free(ctx.notify);
+}
+
+static const struct htab_mem_use_case *htab_mem_find_use_case_or_exit(const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(use_cases); i++) {
+ if (!strcmp(name, use_cases[i].name))
+ return &use_cases[i];
+ }
+
+ fprintf(stderr, "no such use-case: %s\n", name);
+ fprintf(stderr, "available use case:");
+ for (i = 0; i < ARRAY_SIZE(use_cases); i++)
+ fprintf(stderr, " %s", use_cases[i].name);
+ fprintf(stderr, "\n");
+ exit(1);
+}
+
+static void htab_mem_setup(void)
+{
+ struct bpf_map *map;
+ const char **names;
+ int err;
+
+ setup_libbpf();
+
+ ctx.uc = htab_mem_find_use_case_or_exit(args.use_case);
+ err = htab_mem_bench_init_barriers();
+ if (err) {
+ fprintf(stderr, "failed to init barrier\n");
+ exit(1);
+ }
+
+ ctx.fd = cgroup_setup_and_join("/htab_mem");
+ if (ctx.fd < 0)
+ goto cleanup;
+
+ ctx.skel = htab_mem_bench__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ goto cleanup;
+ }
+
+ map = ctx.skel->maps.htab;
+ bpf_map__set_value_size(map, args.value_size);
+ /* Ensure that different CPUs can operate on different subset */
+ bpf_map__set_max_entries(map, MAX(8192, 64 * env.nr_cpus));
+ if (args.preallocated)
+ bpf_map__set_map_flags(map, bpf_map__map_flags(map) & ~BPF_F_NO_PREALLOC);
+
+ names = ctx.uc->progs;
+ while (*names) {
+ struct bpf_program *prog;
+
+ prog = bpf_object__find_program_by_name(ctx.skel->obj, *names);
+ if (!prog) {
+ fprintf(stderr, "no such program %s\n", *names);
+ goto cleanup;
+ }
+ bpf_program__set_autoload(prog, true);
+ names++;
+ }
+ ctx.skel->bss->nr_thread = env.producer_cnt;
+
+ err = htab_mem_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to load skeleton\n");
+ goto cleanup;
+ }
+ err = htab_mem_bench__attach(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to attach skeleton\n");
+ goto cleanup;
+ }
+ return;
+
+cleanup:
+ htab_mem_bench__destroy(ctx.skel);
+ htab_mem_bench_exit_barriers();
+ if (ctx.fd >= 0) {
+ close(ctx.fd);
+ cleanup_cgroup_environment();
+ }
+ exit(1);
+}
+
+static void htab_mem_add_fn(pthread_barrier_t *notify)
+{
+ while (true) {
+ /* Do addition */
+ (void)syscall(__NR_getpgid, 0);
+ /* Notify deletion thread to do deletion */
+ pthread_barrier_wait(notify);
+ /* Wait for deletion to complete */
+ pthread_barrier_wait(notify);
+ }
+}
+
+static void htab_mem_delete_fn(pthread_barrier_t *notify)
+{
+ while (true) {
+ /* Wait for addition to complete */
+ pthread_barrier_wait(notify);
+ /* Do deletion */
+ (void)syscall(__NR_getppid);
+ /* Notify addition thread to do addition */
+ pthread_barrier_wait(notify);
+ }
+}
+
+static void *htab_mem_producer(void *arg)
+{
+ pthread_barrier_t *notify;
+ int seq;
+
+ if (!ctx.uc->need_sync) {
+ while (true)
+ (void)syscall(__NR_getpgid, 0);
+ return NULL;
+ }
+
+ seq = (long)arg;
+ notify = &ctx.notify[seq / 2];
+ if (seq & 1)
+ htab_mem_delete_fn(notify);
+ else
+ htab_mem_add_fn(notify);
+ return NULL;
+}
+
+static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value)
+{
+ char buf[32];
+ ssize_t got;
+ int fd;
+
+ fd = openat(ctx.fd, name, O_RDONLY);
+ if (fd < 0) {
+ /* cgroup v1 ? */
+ fprintf(stderr, "no %s\n", name);
+ *value = 0;
+ return;
+ }
+
+ got = read(fd, buf, sizeof(buf) - 1);
+ close(fd);
+ if (got <= 0) {
+ *value = 0;
+ return;
+ }
+ buf[got] = 0;
+
+ *value = strtoull(buf, NULL, 0);
+}
+
+static void htab_mem_measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->op_cnt, 0) / env.producer_cnt;
+ htab_mem_read_mem_cgrp_file("memory.current", &res->gp_ct);
+}
+
+static void htab_mem_report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ double loop, mem;
+
+ loop = res->hits / 1000.0 / (delta_ns / 1000000000.0);
+ mem = res->gp_ct / 1048576.0;
+ printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
+ printf("per-prod-op %7.2lfk/s, memory usage %7.2lfMiB\n", loop, mem);
+}
+
+static void htab_mem_report_final(struct bench_res res[], int res_cnt)
+{
+ double mem_mean = 0.0, mem_stddev = 0.0;
+ double loop_mean = 0.0, loop_stddev = 0.0;
+ unsigned long peak_mem;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ loop_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
+ mem_mean += res[i].gp_ct / 1048576.0 / (0.0 + res_cnt);
+ }
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++) {
+ loop_stddev += (loop_mean - res[i].hits / 1000.0) *
+ (loop_mean - res[i].hits / 1000.0) /
+ (res_cnt - 1.0);
+ mem_stddev += (mem_mean - res[i].gp_ct / 1048576.0) *
+ (mem_mean - res[i].gp_ct / 1048576.0) /
+ (res_cnt - 1.0);
+ }
+ loop_stddev = sqrt(loop_stddev);
+ mem_stddev = sqrt(mem_stddev);
+ }
+
+ htab_mem_read_mem_cgrp_file("memory.peak", &peak_mem);
+ printf("Summary: per-prod-op %7.2lf \u00B1 %7.2lfk/s, memory usage %7.2lf \u00B1 %7.2lfMiB,"
+ " peak memory usage %7.2lfMiB\n",
+ loop_mean, loop_stddev, mem_mean, mem_stddev, peak_mem / 1048576.0);
+
+ close(ctx.fd);
+ cleanup_cgroup_environment();
+}
+
+const struct bench bench_htab_mem = {
+ .name = "htab-mem",
+ .argp = &bench_htab_mem_argp,
+ .validate = htab_mem_validate,
+ .setup = htab_mem_setup,
+ .producer_thread = htab_mem_producer,
+ .measure = htab_mem_measure,
+ .report_progress = htab_mem_report_progress,
+ .report_final = htab_mem_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
new file mode 100644
index 000000000000..452499428ceb
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <argp.h>
+#include <linux/btf.h>
+
+#include "local_storage_bench.skel.h"
+#include "bench.h"
+
+#include <test_btf.h>
+
+static struct {
+ __u32 nr_maps;
+ __u32 hashmap_nr_keys_used;
+} args = {
+ .nr_maps = 1000,
+ .hashmap_nr_keys_used = 1000,
+};
+
+enum {
+ ARG_NR_MAPS = 6000,
+ ARG_HASHMAP_NR_KEYS_USED = 6001,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_maps", ARG_NR_MAPS, "NR_MAPS", 0,
+ "Set number of local_storage maps"},
+ { "hashmap_nr_keys_used", ARG_HASHMAP_NR_KEYS_USED, "NR_KEYS",
+ 0, "When doing hashmap test, set number of hashmap keys test uses"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_MAPS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_maps");
+ argp_usage(state);
+ }
+ args.nr_maps = ret;
+ break;
+ case ARG_HASHMAP_NR_KEYS_USED:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid hashmap_nr_keys_used");
+ argp_usage(state);
+ }
+ args.hashmap_nr_keys_used = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_local_storage_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+/* Keep in sync w/ array of maps in bpf */
+#define MAX_NR_MAPS 1000
+/* keep in sync w/ same define in bpf */
+#define HASHMAP_SZ 4194304
+
+static void validate(void)
+{
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-producer!\n");
+ exit(1);
+ }
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_maps > MAX_NR_MAPS) {
+ fprintf(stderr, "nr_maps must be <= 1000\n");
+ exit(1);
+ }
+
+ if (args.hashmap_nr_keys_used > HASHMAP_SZ) {
+ fprintf(stderr, "hashmap_nr_keys_used must be <= %u\n", HASHMAP_SZ);
+ exit(1);
+ }
+}
+
+static struct {
+ struct local_storage_bench *skel;
+ void *bpf_obj;
+ struct bpf_map *array_of_maps;
+} ctx;
+
+static void prepopulate_hashmap(int fd)
+{
+ int i, key, val;
+
+ /* local_storage gets will have BPF_LOCAL_STORAGE_GET_F_CREATE flag set, so
+ * populate the hashmap for a similar comparison
+ */
+ for (i = 0; i < HASHMAP_SZ; i++) {
+ key = val = i;
+ if (bpf_map_update_elem(fd, &key, &val, 0)) {
+ fprintf(stderr, "Error prepopulating hashmap (key %d)\n", key);
+ exit(1);
+ }
+ }
+}
+
+static void __setup(struct bpf_program *prog, bool hashmap)
+{
+ struct bpf_map *inner_map;
+ int i, fd, mim_fd, err;
+
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts);
+
+ if (!hashmap)
+ create_opts.map_flags = BPF_F_NO_PREALLOC;
+
+ ctx.skel->rodata->num_maps = args.nr_maps;
+ ctx.skel->rodata->hashmap_num_keys = args.hashmap_nr_keys_used;
+ inner_map = bpf_map__inner_map(ctx.array_of_maps);
+ create_opts.btf_key_type_id = bpf_map__btf_key_type_id(inner_map);
+ create_opts.btf_value_type_id = bpf_map__btf_value_type_id(inner_map);
+
+ err = local_storage_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "Error loading skeleton\n");
+ goto err_out;
+ }
+
+ create_opts.btf_fd = bpf_object__btf_fd(ctx.skel->obj);
+
+ mim_fd = bpf_map__fd(ctx.array_of_maps);
+ if (mim_fd < 0) {
+ fprintf(stderr, "Error getting map_in_map fd\n");
+ goto err_out;
+ }
+
+ for (i = 0; i < args.nr_maps; i++) {
+ if (hashmap)
+ fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
+ sizeof(int), HASHMAP_SZ, &create_opts);
+ else
+ fd = bpf_map_create(BPF_MAP_TYPE_TASK_STORAGE, NULL, sizeof(int),
+ sizeof(int), 0, &create_opts);
+ if (fd < 0) {
+ fprintf(stderr, "Error creating map %d: %d\n", i, fd);
+ goto err_out;
+ }
+
+ if (hashmap)
+ prepopulate_hashmap(fd);
+
+ err = bpf_map_update_elem(mim_fd, &i, &fd, 0);
+ if (err) {
+ fprintf(stderr, "Error updating array-of-maps w/ map %d\n", i);
+ goto err_out;
+ }
+ }
+
+ if (!bpf_program__attach(prog)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ return;
+err_out:
+ exit(1);
+}
+
+static void hashmap_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_hash_maps;
+ skel->rodata->use_hashmap = 1;
+ skel->rodata->interleave = 0;
+
+ __setup(skel->progs.get_local, true);
+}
+
+static void local_storage_cache_get_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
+ skel->rodata->use_hashmap = 0;
+ skel->rodata->interleave = 0;
+
+ __setup(skel->progs.get_local, false);
+}
+
+static void local_storage_cache_get_interleaved_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
+ skel->rodata->use_hashmap = 0;
+ skel->rodata->interleave = 1;
+
+ __setup(skel->progs.get_local, false);
+}
+
+static void measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+ res->important_hits = atomic_swap(&ctx.skel->bss->important_hits, 0);
+}
+
+static inline void trigger_bpf_program(void)
+{
+ syscall(__NR_getpgid);
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ trigger_bpf_program();
+
+ return NULL;
+}
+
+/* cache sequential and interleaved get benchs test local_storage get
+ * performance, specifically they demonstrate performance cliff of
+ * current list-plus-cache local_storage model.
+ *
+ * cache sequential get: call bpf_task_storage_get on n maps in order
+ * cache interleaved get: like "sequential get", but interleave 4 calls to the
+ * 'important' map (idx 0 in array_of_maps) for every 10 calls. Goal
+ * is to mimic environment where many progs are accessing their local_storage
+ * maps, with 'our' prog needing to access its map more often than others
+ */
+const struct bench bench_local_storage_cache_seq_get = {
+ .name = "local-storage-cache-seq-get",
+ .argp = &bench_local_storage_argp,
+ .validate = validate,
+ .setup = local_storage_cache_get_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
+
+const struct bench bench_local_storage_cache_interleaved_get = {
+ .name = "local-storage-cache-int-get",
+ .argp = &bench_local_storage_argp,
+ .validate = validate,
+ .setup = local_storage_cache_get_interleaved_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
+
+const struct bench bench_local_storage_cache_hashmap_control = {
+ .name = "local-storage-cache-hashmap-control",
+ .argp = &bench_local_storage_argp,
+ .validate = validate,
+ .setup = hashmap_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
new file mode 100644
index 000000000000..e2ff8ea1cb79
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <pthread.h>
+#include <argp.h>
+
+#include "bench.h"
+#include "bench_local_storage_create.skel.h"
+
+struct thread {
+ int *fds;
+ pthread_t *pthds;
+ int *pthd_results;
+};
+
+static struct bench_local_storage_create *skel;
+static struct thread *threads;
+static long create_owner_errs;
+static int storage_type = BPF_MAP_TYPE_SK_STORAGE;
+static int batch_sz = 32;
+
+enum {
+ ARG_BATCH_SZ = 9000,
+ ARG_STORAGE_TYPE = 9001,
+};
+
+static const struct argp_option opts[] = {
+ { "batch-size", ARG_BATCH_SZ, "BATCH_SIZE", 0,
+ "The number of storage creations in each batch" },
+ { "storage-type", ARG_STORAGE_TYPE, "STORAGE_TYPE", 0,
+ "The type of local storage to test (socket or task)" },
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ int ret;
+
+ switch (key) {
+ case ARG_BATCH_SZ:
+ ret = atoi(arg);
+ if (ret < 1) {
+ fprintf(stderr, "invalid batch-size\n");
+ argp_usage(state);
+ }
+ batch_sz = ret;
+ break;
+ case ARG_STORAGE_TYPE:
+ if (!strcmp(arg, "task")) {
+ storage_type = BPF_MAP_TYPE_TASK_STORAGE;
+ } else if (!strcmp(arg, "socket")) {
+ storage_type = BPF_MAP_TYPE_SK_STORAGE;
+ } else {
+ fprintf(stderr, "invalid storage-type (socket or task)\n");
+ argp_usage(state);
+ }
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_local_storage_create_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr,
+ "local-storage-create benchmark does not need consumer\n");
+ exit(1);
+ }
+}
+
+static void setup(void)
+{
+ int i;
+
+ skel = bench_local_storage_create__open_and_load();
+ if (!skel) {
+ fprintf(stderr, "error loading skel\n");
+ exit(1);
+ }
+
+ skel->bss->bench_pid = getpid();
+ if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
+ if (!bpf_program__attach(skel->progs.socket_post_create)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ exit(1);
+ }
+ } else {
+ if (!bpf_program__attach(skel->progs.sched_process_fork)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ exit(1);
+ }
+ }
+
+ if (!bpf_program__attach(skel->progs.kmalloc)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ exit(1);
+ }
+
+ threads = calloc(env.producer_cnt, sizeof(*threads));
+
+ if (!threads) {
+ fprintf(stderr, "cannot alloc thread_res\n");
+ exit(1);
+ }
+
+ for (i = 0; i < env.producer_cnt; i++) {
+ struct thread *t = &threads[i];
+
+ if (storage_type == BPF_MAP_TYPE_SK_STORAGE) {
+ t->fds = malloc(batch_sz * sizeof(*t->fds));
+ if (!t->fds) {
+ fprintf(stderr, "cannot alloc t->fds\n");
+ exit(1);
+ }
+ } else {
+ t->pthds = malloc(batch_sz * sizeof(*t->pthds));
+ if (!t->pthds) {
+ fprintf(stderr, "cannot alloc t->pthds\n");
+ exit(1);
+ }
+ t->pthd_results = malloc(batch_sz * sizeof(*t->pthd_results));
+ if (!t->pthd_results) {
+ fprintf(stderr, "cannot alloc t->pthd_results\n");
+ exit(1);
+ }
+ }
+ }
+}
+
+static void measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&skel->bss->create_cnts, 0);
+ res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0);
+}
+
+static void *sk_producer(void *input)
+{
+ struct thread *t = &threads[(long)(input)];
+ int *fds = t->fds;
+ int i;
+
+ while (true) {
+ for (i = 0; i < batch_sz; i++) {
+ fds[i] = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (fds[i] == -1)
+ atomic_inc(&create_owner_errs);
+ }
+
+ for (i = 0; i < batch_sz; i++) {
+ if (fds[i] != -1)
+ close(fds[i]);
+ }
+ }
+
+ return NULL;
+}
+
+static void *thread_func(void *arg)
+{
+ return NULL;
+}
+
+static void *task_producer(void *input)
+{
+ struct thread *t = &threads[(long)(input)];
+ pthread_t *pthds = t->pthds;
+ int *pthd_results = t->pthd_results;
+ int i;
+
+ while (true) {
+ for (i = 0; i < batch_sz; i++) {
+ pthd_results[i] = pthread_create(&pthds[i], NULL, thread_func, NULL);
+ if (pthd_results[i])
+ atomic_inc(&create_owner_errs);
+ }
+
+ for (i = 0; i < batch_sz; i++) {
+ if (!pthd_results[i])
+ pthread_join(pthds[i], NULL);
+ }
+ }
+
+ return NULL;
+}
+
+static void *producer(void *input)
+{
+ if (storage_type == BPF_MAP_TYPE_SK_STORAGE)
+ return sk_producer(input);
+ else
+ return task_producer(input);
+}
+
+static void report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ double creates_per_sec, kmallocs_per_create;
+
+ creates_per_sec = res->hits / 1000.0 / (delta_ns / 1000000000.0);
+ kmallocs_per_create = (double)res->drops / res->hits;
+
+ printf("Iter %3d (%7.3lfus): ",
+ iter, (delta_ns - 1000000000) / 1000.0);
+ printf("creates %8.3lfk/s (%7.3lfk/prod), ",
+ creates_per_sec, creates_per_sec / env.producer_cnt);
+ printf("%3.2lf kmallocs/create\n", kmallocs_per_create);
+}
+
+static void report_final(struct bench_res res[], int res_cnt)
+{
+ double creates_mean = 0.0, creates_stddev = 0.0;
+ long total_creates = 0, total_kmallocs = 0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ creates_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
+ total_creates += res[i].hits;
+ total_kmallocs += res[i].drops;
+ }
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ creates_stddev += (creates_mean - res[i].hits / 1000.0) *
+ (creates_mean - res[i].hits / 1000.0) /
+ (res_cnt - 1.0);
+ creates_stddev = sqrt(creates_stddev);
+ }
+ printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ",
+ creates_mean, creates_stddev, creates_mean / env.producer_cnt);
+ printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates);
+ if (create_owner_errs || skel->bss->create_errs)
+ printf("%s() errors %ld create_errs %ld\n",
+ storage_type == BPF_MAP_TYPE_SK_STORAGE ?
+ "socket" : "pthread_create",
+ create_owner_errs,
+ skel->bss->create_errs);
+}
+
+/* Benchmark performance of creating bpf local storage */
+const struct bench bench_local_storage_create = {
+ .name = "local-storage-create",
+ .argp = &bench_local_storage_create_argp,
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = report_progress,
+ .report_final = report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
new file mode 100644
index 000000000000..edf0b00418c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <argp.h>
+
+#include <sys/prctl.h>
+#include "local_storage_rcu_tasks_trace_bench.skel.h"
+#include "bench.h"
+
+#include <signal.h>
+
+static struct {
+ __u32 nr_procs;
+ __u32 kthread_pid;
+} args = {
+ .nr_procs = 1000,
+ .kthread_pid = 0,
+};
+
+enum {
+ ARG_NR_PROCS = 7000,
+ ARG_KTHREAD_PID = 7001,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_procs", ARG_NR_PROCS, "NR_PROCS", 0,
+ "Set number of user processes to spin up"},
+ { "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
+ "Pid of rcu_tasks_trace kthread for ticks tracking"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_PROCS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_procs\n");
+ argp_usage(state);
+ }
+ args.nr_procs = ret;
+ break;
+ case ARG_KTHREAD_PID:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1) {
+ fprintf(stderr, "invalid kthread_pid\n");
+ argp_usage(state);
+ }
+ args.kthread_pid = ret;
+ break;
+break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_local_storage_rcu_tasks_trace_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+#define MAX_SLEEP_PROCS 150000
+
+static void validate(void)
+{
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-producer!\n");
+ exit(1);
+ }
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_procs > MAX_SLEEP_PROCS) {
+ fprintf(stderr, "benchmark supports up to %u sleeper procs!\n",
+ MAX_SLEEP_PROCS);
+ exit(1);
+ }
+}
+
+static long kthread_pid_ticks(void)
+{
+ char procfs_path[100];
+ long stime;
+ FILE *f;
+
+ if (!args.kthread_pid)
+ return -1;
+
+ sprintf(procfs_path, "/proc/%u/stat", args.kthread_pid);
+ f = fopen(procfs_path, "r");
+ if (!f) {
+ fprintf(stderr, "couldn't open %s, exiting\n", procfs_path);
+ goto err_out;
+ }
+ if (fscanf(f, "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %ld", &stime) != 1) {
+ fprintf(stderr, "fscanf of %s failed, exiting\n", procfs_path);
+ goto err_out;
+ }
+ fclose(f);
+ return stime;
+
+err_out:
+ if (f)
+ fclose(f);
+ exit(1);
+ return 0;
+}
+
+static struct {
+ struct local_storage_rcu_tasks_trace_bench *skel;
+ long prev_kthread_stime;
+} ctx;
+
+static void sleep_and_loop(void)
+{
+ while (true) {
+ sleep(rand() % 4);
+ syscall(__NR_getpgid);
+ }
+}
+
+static void local_storage_tasks_trace_setup(void)
+{
+ int i, err, forkret, runner_pid;
+
+ runner_pid = getpid();
+
+ for (i = 0; i < args.nr_procs; i++) {
+ forkret = fork();
+ if (forkret < 0) {
+ fprintf(stderr, "Error forking sleeper proc %u of %u, exiting\n", i,
+ args.nr_procs);
+ goto err_out;
+ }
+
+ if (!forkret) {
+ err = prctl(PR_SET_PDEATHSIG, SIGKILL);
+ if (err < 0) {
+ fprintf(stderr, "prctl failed with err %d, exiting\n", errno);
+ goto err_out;
+ }
+
+ if (getppid() != runner_pid) {
+ fprintf(stderr, "Runner died while spinning up procs, exiting\n");
+ goto err_out;
+ }
+ sleep_and_loop();
+ }
+ }
+ printf("Spun up %u procs (our pid %d)\n", args.nr_procs, runner_pid);
+
+ setup_libbpf();
+
+ ctx.skel = local_storage_rcu_tasks_trace_bench__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "Error doing open_and_load, exiting\n");
+ goto err_out;
+ }
+
+ ctx.prev_kthread_stime = kthread_pid_ticks();
+
+ if (!bpf_program__attach(ctx.skel->progs.get_local)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ if (!bpf_program__attach(ctx.skel->progs.pregp_step)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ if (!bpf_program__attach(ctx.skel->progs.postgp)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ return;
+err_out:
+ exit(1);
+}
+
+static void measure(struct bench_res *res)
+{
+ long ticks;
+
+ res->gp_ct = atomic_swap(&ctx.skel->bss->gp_hits, 0);
+ res->gp_ns = atomic_swap(&ctx.skel->bss->gp_times, 0);
+ ticks = kthread_pid_ticks();
+ res->stime = ticks - ctx.prev_kthread_stime;
+ ctx.prev_kthread_stime = ticks;
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ syscall(__NR_getpgid);
+ return NULL;
+}
+
+static void report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ if (ctx.skel->bss->unexpected) {
+ fprintf(stderr, "Error: Unexpected order of bpf prog calls (postgp after pregp).");
+ fprintf(stderr, "Data can't be trusted, exiting\n");
+ exit(1);
+ }
+
+ if (env.quiet)
+ return;
+
+ printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
+ iter, res->gp_ns / (double)res->gp_ct);
+ printf("Iter %d\t avg ticks per tasks_trace grace period\t%lf\n",
+ iter, res->stime / (double)res->gp_ct);
+}
+
+static void report_final(struct bench_res res[], int res_cnt)
+{
+ struct basic_stats gp_stat;
+
+ grace_period_latency_basic_stats(res, res_cnt, &gp_stat);
+ printf("SUMMARY tasks_trace grace period latency");
+ printf("\tavg %.3lf us\tstddev %.3lf us\n", gp_stat.mean, gp_stat.stddev);
+ grace_period_ticks_basic_stats(res, res_cnt, &gp_stat);
+ printf("SUMMARY ticks per tasks_trace grace period");
+ printf("\tavg %.3lf\tstddev %.3lf\n", gp_stat.mean, gp_stat.stddev);
+}
+
+/* local-storage-tasks-trace: Benchmark performance of BPF local_storage's use
+ * of RCU Tasks-Trace.
+ *
+ * Stress RCU Tasks Trace by forking many tasks, all of which do no work aside
+ * from sleep() loop, and creating/destroying BPF task-local storage on wakeup.
+ * The number of forked tasks is configurable.
+ *
+ * exercising code paths which call call_rcu_tasks_trace while there are many
+ * thousands of tasks on the system should result in RCU Tasks-Trace having to
+ * do a noticeable amount of work.
+ *
+ * This should be observable by measuring rcu_tasks_trace_kthread CPU usage
+ * after the grace period has ended, or by measuring grace period latency.
+ *
+ * This benchmark uses both approaches, attaching to rcu_tasks_trace_pregp_step
+ * and rcu_tasks_trace_postgp functions to measure grace period latency and
+ * using /proc/PID/stat to measure rcu_tasks_trace_kthread kernel ticks
+ */
+const struct bench bench_local_storage_tasks_trace = {
+ .name = "local-storage-tasks-trace",
+ .argp = &bench_local_storage_rcu_tasks_trace_argp,
+ .validate = validate,
+ .setup = local_storage_tasks_trace_setup,
+ .producer_thread = producer,
+ .measure = measure,
+ .report_progress = report_progress,
+ .report_final = report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c b/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
new file mode 100644
index 000000000000..246f6cb3387d
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Cloudflare */
+
+/*
+ * All of these benchmarks operate on tries with keys in the range
+ * [0, args.nr_entries), i.e. there are no gaps or partially filled
+ * branches of the trie for any key < args.nr_entries.
+ *
+ * This gives an idea of worst-case behaviour.
+ */
+
+#include <argp.h>
+#include <linux/time64.h>
+#include <linux/if_ether.h>
+#include "lpm_trie_bench.skel.h"
+#include "lpm_trie_map.skel.h"
+#include "bench.h"
+#include "testing_helpers.h"
+#include "progs/lpm_trie.h"
+
+static struct ctx {
+ struct lpm_trie_bench *bench;
+} ctx;
+
+static struct {
+ __u32 nr_entries;
+ __u32 prefixlen;
+ bool random;
+} args = {
+ .nr_entries = 0,
+ .prefixlen = 32,
+ .random = false,
+};
+
+enum {
+ ARG_NR_ENTRIES = 9000,
+ ARG_PREFIX_LEN,
+ ARG_RANDOM,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+ "Number of unique entries in the LPM trie" },
+ { "prefix_len", ARG_PREFIX_LEN, "PREFIX_LEN", 0,
+ "Number of prefix bits to use in the LPM trie" },
+ { "random", ARG_RANDOM, NULL, 0, "Access random keys during op" },
+ {},
+};
+
+static error_t lpm_parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "Invalid nr_entries count.");
+ argp_usage(state);
+ }
+ args.nr_entries = ret;
+ break;
+ case ARG_PREFIX_LEN:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "Invalid prefix_len value.");
+ argp_usage(state);
+ }
+ args.prefixlen = ret;
+ break;
+ case ARG_RANDOM:
+ args.random = true;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+const struct argp bench_lpm_trie_map_argp = {
+ .options = opts,
+ .parser = lpm_parse_arg,
+};
+
+static void validate_common(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer\n");
+ exit(1);
+ }
+
+ if (args.nr_entries == 0) {
+ fprintf(stderr, "Missing --nr_entries parameter\n");
+ exit(1);
+ }
+
+ if ((1UL << args.prefixlen) < args.nr_entries) {
+ fprintf(stderr, "prefix_len value too small for nr_entries\n");
+ exit(1);
+ }
+}
+
+static void lpm_insert_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-insert requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-insert does not support --random\n");
+ exit(1);
+ }
+}
+
+static void lpm_delete_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-delete requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-delete does not support --random\n");
+ exit(1);
+ }
+}
+
+static void lpm_free_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-free requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-free does not support --random\n");
+ exit(1);
+ }
+}
+
+static struct trie_key *keys;
+static __u32 *vals;
+
+static void fill_map(int map_fd)
+{
+ int err;
+
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ err = bpf_map_update_batch(map_fd, keys, vals, &args.nr_entries, &opts);
+ if (err) {
+ fprintf(stderr, "failed to batch update keys to map: %d\n",
+ -err);
+ exit(1);
+ }
+}
+
+static void empty_map(int map_fd)
+{
+ int err;
+
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ err = bpf_map_delete_batch(map_fd, keys, &args.nr_entries, &opts);
+ if (err) {
+ fprintf(stderr, "failed to batch delete keys for map: %d\n",
+ -err);
+ exit(1);
+ }
+}
+
+static void attach_prog(void)
+{
+ int i;
+
+ ctx.bench = lpm_trie_bench__open_and_load();
+ if (!ctx.bench) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ ctx.bench->bss->nr_entries = args.nr_entries;
+ ctx.bench->bss->prefixlen = args.prefixlen;
+ ctx.bench->bss->random = args.random;
+
+ if (lpm_trie_bench__attach(ctx.bench)) {
+ fprintf(stderr, "failed to attach skeleton\n");
+ exit(1);
+ }
+
+ keys = calloc(args.nr_entries, sizeof(*keys));
+ vals = calloc(args.nr_entries, sizeof(*vals));
+
+ for (i = 0; i < args.nr_entries; i++) {
+ struct trie_key *k = &keys[i];
+ __u32 *v = &vals[i];
+
+ k->prefixlen = args.prefixlen;
+ k->data = i;
+ *v = 1;
+ }
+}
+
+static void attach_prog_and_fill_map(void)
+{
+ int fd;
+
+ attach_prog();
+
+ fd = bpf_map__fd(ctx.bench->maps.trie_map);
+ fill_map(fd);
+}
+
+static void lpm_noop_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_NOOP;
+}
+
+static void lpm_baseline_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_BASELINE;
+}
+
+static void lpm_lookup_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_LOOKUP;
+}
+
+static void lpm_insert_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_INSERT;
+}
+
+static void lpm_update_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_UPDATE;
+}
+
+static void lpm_delete_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_DELETE;
+}
+
+static void lpm_free_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_FREE;
+}
+
+static void lpm_measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.bench->bss->hits, 0);
+ res->duration_ns = atomic_swap(&ctx.bench->bss->duration_ns, 0);
+}
+
+static void bench_reinit_map(void)
+{
+ int fd = bpf_map__fd(ctx.bench->maps.trie_map);
+
+ switch (ctx.bench->bss->op) {
+ case LPM_OP_INSERT:
+ /* trie_map needs to be emptied */
+ empty_map(fd);
+ break;
+ case LPM_OP_DELETE:
+ /* trie_map needs to be refilled */
+ fill_map(fd);
+ break;
+ default:
+ fprintf(stderr, "Unexpected REINIT return code for op %d\n",
+ ctx.bench->bss->op);
+ exit(1);
+ }
+}
+
+/* For NOOP, BASELINE, LOOKUP, INSERT, UPDATE, and DELETE */
+static void *lpm_producer(void *unused __always_unused)
+{
+ int err;
+ char in[ETH_HLEN]; /* unused */
+
+ LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = in,
+ .data_size_in = sizeof(in), .repeat = 1, );
+
+ while (true) {
+ int fd = bpf_program__fd(ctx.bench->progs.run_bench);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err) {
+ fprintf(stderr, "failed to run BPF prog: %d\n", err);
+ exit(1);
+ }
+
+ /* Check for kernel error code */
+ if ((int)opts.retval < 0) {
+ fprintf(stderr, "BPF prog returned error: %d\n",
+ opts.retval);
+ exit(1);
+ }
+
+ switch (opts.retval) {
+ case LPM_BENCH_SUCCESS:
+ break;
+ case LPM_BENCH_REINIT_MAP:
+ bench_reinit_map();
+ break;
+ default:
+ fprintf(stderr, "Unexpected BPF prog return code %d for op %d\n",
+ opts.retval, ctx.bench->bss->op);
+ exit(1);
+ }
+ }
+
+ return NULL;
+}
+
+static void *lpm_free_producer(void *unused __always_unused)
+{
+ while (true) {
+ struct lpm_trie_map *skel;
+
+ skel = lpm_trie_map__open_and_load();
+ if (!skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ fill_map(bpf_map__fd(skel->maps.trie_free_map));
+ lpm_trie_map__destroy(skel);
+ }
+
+ return NULL;
+}
+
+/*
+ * The standard bench op_report_*() functions assume measurements are
+ * taken over a 1-second interval but operations that modify the map
+ * (INSERT, DELETE, and FREE) cannot run indefinitely without
+ * "resetting" the map to the initial state. Depending on the size of
+ * the map, this likely needs to happen before the 1-second timer fires.
+ *
+ * Calculate the fraction of a second over which the op measurement was
+ * taken (to ignore any time spent doing the reset) and report the
+ * throughput results per second.
+ */
+static void frac_second_report_progress(int iter, struct bench_res *res,
+ long delta_ns, double rate_divisor,
+ char rate)
+{
+ double hits_per_sec, hits_per_prod;
+
+ hits_per_sec = res->hits / rate_divisor /
+ (res->duration_ns / (double)NSEC_PER_SEC);
+ hits_per_prod = hits_per_sec / env.producer_cnt;
+
+ printf("Iter %3d (%7.3lfus): ", iter,
+ (delta_ns - NSEC_PER_SEC) / 1000.0);
+ printf("hits %8.3lf%c/s (%7.3lf%c/prod)\n", hits_per_sec, rate,
+ hits_per_prod, rate);
+}
+
+static void frac_second_report_final(struct bench_res res[], int res_cnt,
+ double lat_divisor, double rate_divisor,
+ char rate, const char *unit)
+{
+ double hits_mean = 0.0, hits_stddev = 0.0;
+ double latency = 0.0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ double val = res[i].hits / rate_divisor /
+ (res[i].duration_ns / (double)NSEC_PER_SEC);
+ hits_mean += val / (0.0 + res_cnt);
+ latency += res[i].duration_ns / res[i].hits / (0.0 + res_cnt);
+ }
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++) {
+ double val =
+ res[i].hits / rate_divisor /
+ (res[i].duration_ns / (double)NSEC_PER_SEC);
+ hits_stddev += (hits_mean - val) * (hits_mean - val) /
+ (res_cnt - 1.0);
+ }
+
+ hits_stddev = sqrt(hits_stddev);
+ }
+ printf("Summary: throughput %8.3lf \u00B1 %5.3lf %c ops/s (%7.3lf%c ops/prod), ",
+ hits_mean, hits_stddev, rate, hits_mean / env.producer_cnt,
+ rate);
+ printf("latency %8.3lf %s/op\n",
+ latency / lat_divisor / env.producer_cnt, unit);
+}
+
+static void insert_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000000.0;
+ char rate = 'M';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void delete_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000000.0;
+ char rate = 'M';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void free_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000.0;
+ char rate = 'K';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void insert_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1.0;
+ double rate_divisor = 1000000.0;
+ const char *unit = "ns";
+ char rate = 'M';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+static void delete_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1.0;
+ double rate_divisor = 1000000.0;
+ const char *unit = "ns";
+ char rate = 'M';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+static void free_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1000000.0;
+ double rate_divisor = 1000.0;
+ const char *unit = "ms";
+ char rate = 'K';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+/* noop bench measures harness-overhead */
+const struct bench bench_lpm_trie_noop = {
+ .name = "lpm-trie-noop",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_noop_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* baseline overhead for lookup and update */
+const struct bench bench_lpm_trie_baseline = {
+ .name = "lpm-trie-baseline",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_baseline_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of doing a lookup on existing entries in a full trie */
+const struct bench bench_lpm_trie_lookup = {
+ .name = "lpm-trie-lookup",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_lookup_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of inserting new entries into an empty trie */
+const struct bench bench_lpm_trie_insert = {
+ .name = "lpm-trie-insert",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_insert_validate,
+ .setup = lpm_insert_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = insert_ops_report_progress,
+ .report_final = insert_ops_report_final,
+};
+
+/* measure cost of updating existing entries in a full trie */
+const struct bench bench_lpm_trie_update = {
+ .name = "lpm-trie-update",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_update_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of deleting existing entries from a full trie */
+const struct bench bench_lpm_trie_delete = {
+ .name = "lpm-trie-delete",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_delete_validate,
+ .setup = lpm_delete_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = delete_ops_report_progress,
+ .report_final = delete_ops_report_final,
+};
+
+/* measure cost of freeing a full trie */
+const struct bench bench_lpm_trie_free = {
+ .name = "lpm-trie-free",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_free_validate,
+ .setup = lpm_free_setup,
+ .producer_thread = lpm_free_producer,
+ .measure = lpm_measure,
+ .report_progress = free_ops_report_progress,
+ .report_final = free_ops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
index a967674098ad..bf66893c7a33 100644
--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
+++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
@@ -11,14 +11,14 @@ static struct ctx {
int fd;
} ctx;
-static void validate()
+static void validate(void)
{
if (env.producer_cnt != 1) {
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
exit(1);
}
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
@@ -43,7 +43,7 @@ static void measure(struct bench_res *res)
res->hits = atomic_swap(&ctx.hits.value, 0);
}
-static void setup_ctx()
+static void setup_ctx(void)
{
setup_libbpf();
@@ -65,58 +65,52 @@ static void attach_bpf(struct bpf_program *prog)
struct bpf_link *link;
link = bpf_program__attach(prog);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
-static void setup_base()
+static void setup_base(void)
{
setup_ctx();
}
-static void setup_kprobe()
+static void setup_kprobe(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog1);
}
-static void setup_kretprobe()
+static void setup_kretprobe(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog2);
}
-static void setup_rawtp()
+static void setup_rawtp(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog3);
}
-static void setup_fentry()
+static void setup_fentry(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog4);
}
-static void setup_fexit()
+static void setup_fexit(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.prog5);
}
-static void *consumer(void *input)
-{
- return NULL;
-}
-
const struct bench bench_rename_base = {
.name = "rename-base",
.validate = validate,
.setup = setup_base,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -127,7 +121,6 @@ const struct bench bench_rename_kprobe = {
.validate = validate,
.setup = setup_kprobe,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -138,7 +131,6 @@ const struct bench bench_rename_kretprobe = {
.validate = validate,
.setup = setup_kretprobe,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -149,7 +141,6 @@ const struct bench bench_rename_rawtp = {
.validate = validate,
.setup = setup_rawtp,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -160,7 +151,6 @@ const struct bench bench_rename_fentry = {
.validate = validate,
.setup = setup_fentry,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
@@ -171,7 +161,6 @@ const struct bench bench_rename_fexit = {
.validate = validate,
.setup = setup_fexit,
.producer_thread = producer,
- .consumer_thread = consumer,
.measure = measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index da87c7f31891..01bdce692799 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -19,6 +19,8 @@ static struct {
int ringbuf_sz; /* per-ringbuf, in bytes */
bool ringbuf_use_output; /* use slower output API */
int perfbuf_sz; /* per-CPU size, in pages */
+ bool overwrite;
+ bool bench_producer;
} args = {
.back2back = false,
.batch_cnt = 500,
@@ -27,6 +29,8 @@ static struct {
.ringbuf_sz = 512 * 1024,
.ringbuf_use_output = false,
.perfbuf_sz = 128,
+ .overwrite = false,
+ .bench_producer = false,
};
enum {
@@ -35,6 +39,8 @@ enum {
ARG_RB_BATCH_CNT = 2002,
ARG_RB_SAMPLED = 2003,
ARG_RB_SAMPLE_RATE = 2004,
+ ARG_RB_OVERWRITE = 2005,
+ ARG_RB_BENCH_PRODUCER = 2006,
};
static const struct argp_option opts[] = {
@@ -43,6 +49,8 @@ static const struct argp_option opts[] = {
{ "rb-batch-cnt", ARG_RB_BATCH_CNT, "CNT", 0, "Set BPF-side record batch count"},
{ "rb-sampled", ARG_RB_SAMPLED, NULL, 0, "Notification sampling"},
{ "rb-sample-rate", ARG_RB_SAMPLE_RATE, "RATE", 0, "Notification sample rate"},
+ { "rb-overwrite", ARG_RB_OVERWRITE, NULL, 0, "Overwrite mode"},
+ { "rb-bench-producer", ARG_RB_BENCH_PRODUCER, NULL, 0, "Benchmark producer"},
{},
};
@@ -72,6 +80,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
argp_usage(state);
}
break;
+ case ARG_RB_OVERWRITE:
+ args.overwrite = true;
+ break;
+ case ARG_RB_BENCH_PRODUCER:
+ args.bench_producer = true;
+ break;
default:
return ARGP_ERR_UNKNOWN;
}
@@ -88,15 +102,40 @@ const struct argp bench_ringbufs_argp = {
static struct counter buf_hits;
-static inline void bufs_trigger_batch()
+static inline void bufs_trigger_batch(void)
{
(void)syscall(__NR_getpgid);
}
-static void bufs_validate()
+static void bufs_validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "rb-libbpf benchmark doesn't support multi-consumer!\n");
+ if (args.bench_producer && strcmp(env.bench_name, "rb-libbpf")) {
+ fprintf(stderr, "--rb-bench-producer only works with rb-libbpf!\n");
+ exit(1);
+ }
+
+ if (args.overwrite && !args.bench_producer) {
+ fprintf(stderr, "overwrite mode only works with --rb-bench-producer for now!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && env.consumer_cnt != 0) {
+ fprintf(stderr, "no consumer is needed for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && args.back2back) {
+ fprintf(stderr, "back-to-back mode makes no sense for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && args.sampled) {
+ fprintf(stderr, "sampling mode makes no sense for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (!args.bench_producer && env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmarks without --rb-bench-producer require exactly one consumer!\n");
exit(1);
}
@@ -128,12 +167,17 @@ static void ringbuf_libbpf_measure(struct bench_res *res)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
- res->hits = atomic_swap(&buf_hits.value, 0);
+ if (args.bench_producer)
+ res->hits = atomic_swap(&ctx->skel->bss->hits, 0);
+ else
+ res->hits = atomic_swap(&buf_hits.value, 0);
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
-static struct ringbuf_bench *ringbuf_setup_skeleton()
+static struct ringbuf_bench *ringbuf_setup_skeleton(void)
{
+ __u32 flags;
+ struct bpf_map *ringbuf;
struct ringbuf_bench *skel;
setup_libbpf();
@@ -146,12 +190,19 @@ static struct ringbuf_bench *ringbuf_setup_skeleton()
skel->rodata->batch_cnt = args.batch_cnt;
skel->rodata->use_output = args.ringbuf_use_output ? 1 : 0;
+ skel->rodata->bench_producer = args.bench_producer;
if (args.sampled)
/* record data + header take 16 bytes */
skel->rodata->wakeup_data_size = args.sample_rate * 16;
- bpf_map__resize(skel->maps.ringbuf, args.ringbuf_sz);
+ ringbuf = skel->maps.ringbuf;
+ if (args.overwrite) {
+ flags = bpf_map__map_flags(ringbuf) | BPF_F_RB_OVERWRITE;
+ bpf_map__set_map_flags(ringbuf, flags);
+ }
+
+ bpf_map__set_max_entries(ringbuf, args.ringbuf_sz);
if (ringbuf_bench__load(skel)) {
fprintf(stderr, "failed to load skeleton\n");
@@ -167,21 +218,23 @@ static int buf_process_sample(void *ctx, void *data, size_t len)
return 0;
}
-static void ringbuf_libbpf_setup()
+static void ringbuf_libbpf_setup(void)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
struct bpf_link *link;
+ int map_fd;
ctx->skel = ringbuf_setup_skeleton();
- ctx->ringbuf = ring_buffer__new(bpf_map__fd(ctx->skel->maps.ringbuf),
- buf_process_sample, NULL, NULL);
+
+ map_fd = bpf_map__fd(ctx->skel->maps.ringbuf);
+ ctx->ringbuf = ring_buffer__new(map_fd, buf_process_sample, NULL, NULL);
if (!ctx->ringbuf) {
fprintf(stderr, "failed to create ringbuf\n");
exit(1);
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
@@ -223,7 +276,7 @@ static void ringbuf_custom_measure(struct bench_res *res)
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
-static void ringbuf_custom_setup()
+static void ringbuf_custom_setup(void)
{
struct ringbuf_custom_ctx *ctx = &ringbuf_custom_ctx;
const size_t page_size = getpagesize();
@@ -271,7 +324,7 @@ static void ringbuf_custom_setup()
}
link = bpf_program__attach(ctx->skel->progs.bench_ringbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
@@ -319,7 +372,7 @@ static void ringbuf_custom_process_ring(struct ringbuf_custom *r)
smp_store_release(r->consumer_pos, cons_pos);
else
break;
- };
+ }
}
static void *ringbuf_custom_consumer(void *input)
@@ -352,7 +405,7 @@ static void perfbuf_measure(struct bench_res *res)
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
-static struct perfbuf_bench *perfbuf_setup_skeleton()
+static struct perfbuf_bench *perfbuf_setup_skeleton(void)
{
struct perfbuf_bench *skel;
@@ -390,21 +443,16 @@ perfbuf_process_sample_raw(void *input_ctx, int cpu,
return LIBBPF_PERF_EVENT_CONT;
}
-static void perfbuf_libbpf_setup()
+static void perfbuf_libbpf_setup(void)
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
struct perf_event_attr attr;
- struct perf_buffer_raw_opts pb_opts = {
- .event_cb = perfbuf_process_sample_raw,
- .ctx = (void *)(long)0,
- .attr = &attr,
- };
struct bpf_link *link;
ctx->skel = perfbuf_setup_skeleton();
memset(&attr, 0, sizeof(attr));
- attr.config = PERF_COUNT_SW_BPF_OUTPUT,
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
/* notify only every Nth sample */
@@ -423,14 +471,15 @@ static void perfbuf_libbpf_setup()
}
ctx->perfbuf = perf_buffer__new_raw(bpf_map__fd(ctx->skel->maps.perfbuf),
- args.perfbuf_sz, &pb_opts);
+ args.perfbuf_sz, &attr,
+ perfbuf_process_sample_raw, NULL, NULL);
if (!ctx->perfbuf) {
fprintf(stderr, "failed to create perfbuf\n");
exit(1);
}
link = bpf_program__attach(ctx->skel->progs.bench_perfbuf);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program\n");
exit(1);
}
@@ -522,6 +571,7 @@ static void *perfbuf_custom_consumer(void *input)
const struct bench bench_rb_libbpf = {
.name = "rb-libbpf",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
@@ -533,6 +583,7 @@ const struct bench bench_rb_libbpf = {
const struct bench bench_rb_custom = {
.name = "rb-custom",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = ringbuf_custom_setup,
.producer_thread = bufs_sample_producer,
@@ -544,6 +595,7 @@ const struct bench bench_rb_custom = {
const struct bench bench_pb_libbpf = {
.name = "pb-libbpf",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
@@ -555,6 +607,7 @@ const struct bench bench_pb_libbpf = {
const struct bench bench_pb_custom = {
.name = "pb-custom",
+ .argp = &bench_ringbufs_argp,
.validate = bufs_validate,
.setup = perfbuf_libbpf_setup,
.producer_thread = bufs_sample_producer,
diff --git a/tools/testing/selftests/bpf/benchs/bench_sockmap.c b/tools/testing/selftests/bpf/benchs/bench_sockmap.c
new file mode 100644
index 000000000000..cfc072aa7fff
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_sockmap.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <error.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <sys/sendfile.h>
+#include <arpa/inet.h>
+#include <fcntl.h>
+#include <argp.h>
+#include "bench.h"
+#include "bench_sockmap_prog.skel.h"
+#include "bpf_util.h"
+
+#define FILE_SIZE (128 * 1024)
+#define DATA_REPEAT_SIZE 10
+
+static const char snd_data[DATA_REPEAT_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+/* c1 <-> [p1, p2] <-> c2
+ * RX bench(BPF_SK_SKB_STREAM_VERDICT):
+ * ARG_FW_RX_PASS:
+ * send(p2) -> recv(c2) -> bpf skb passthrough -> recv(c2)
+ * ARG_FW_RX_VERDICT_EGRESS:
+ * send(c1) -> verdict skb to tx queuec of p2 -> recv(c2)
+ * ARG_FW_RX_VERDICT_INGRESS:
+ * send(c1) -> verdict skb to rx queuec of c2 -> recv(c2)
+ *
+ * TX bench(BPF_SK_MSG_VERDIC):
+ * ARG_FW_TX_PASS:
+ * send(p2) -> bpf msg passthrough -> send(p2) -> recv(c2)
+ * ARG_FW_TX_VERDICT_INGRESS:
+ * send(p2) -> verdict msg to rx queue of c2 -> recv(c2)
+ * ARG_FW_TX_VERDICT_EGRESS:
+ * send(p1) -> verdict msg to tx queue of p2 -> recv(c2)
+ */
+enum SOCKMAP_ARG_FLAG {
+ ARG_FW_RX_NORMAL = 11000,
+ ARG_FW_RX_PASS,
+ ARG_FW_RX_VERDICT_EGRESS,
+ ARG_FW_RX_VERDICT_INGRESS,
+ ARG_FW_TX_NORMAL,
+ ARG_FW_TX_PASS,
+ ARG_FW_TX_VERDICT_INGRESS,
+ ARG_FW_TX_VERDICT_EGRESS,
+ ARG_CTL_RX_STRP,
+ ARG_CONSUMER_DELAY_TIME,
+ ARG_PRODUCER_DURATION,
+};
+
+#define TXMODE_NORMAL() \
+ ((ctx.mode) == ARG_FW_TX_NORMAL)
+
+#define TXMODE_BPF_INGRESS() \
+ ((ctx.mode) == ARG_FW_TX_VERDICT_INGRESS)
+
+#define TXMODE_BPF_EGRESS() \
+ ((ctx.mode) == ARG_FW_TX_VERDICT_EGRESS)
+
+#define TXMODE_BPF_PASS() \
+ ((ctx.mode) == ARG_FW_TX_PASS)
+
+#define TXMODE_BPF() ( \
+ TXMODE_BPF_PASS() || \
+ TXMODE_BPF_INGRESS() || \
+ TXMODE_BPF_EGRESS())
+
+#define TXMODE() ( \
+ TXMODE_NORMAL() || \
+ TXMODE_BPF())
+
+#define RXMODE_NORMAL() \
+ ((ctx.mode) == ARG_FW_RX_NORMAL)
+
+#define RXMODE_BPF_PASS() \
+ ((ctx.mode) == ARG_FW_RX_PASS)
+
+#define RXMODE_BPF_VERDICT_EGRESS() \
+ ((ctx.mode) == ARG_FW_RX_VERDICT_EGRESS)
+
+#define RXMODE_BPF_VERDICT_INGRESS() \
+ ((ctx.mode) == ARG_FW_RX_VERDICT_INGRESS)
+
+#define RXMODE_BPF_VERDICT() ( \
+ RXMODE_BPF_VERDICT_INGRESS() || \
+ RXMODE_BPF_VERDICT_EGRESS())
+
+#define RXMODE_BPF() ( \
+ RXMODE_BPF_PASS() || \
+ RXMODE_BPF_VERDICT())
+
+#define RXMODE() ( \
+ RXMODE_NORMAL() || \
+ RXMODE_BPF())
+
+static struct socmap_ctx {
+ struct bench_sockmap_prog *skel;
+ enum SOCKMAP_ARG_FLAG mode;
+ #define c1 fds[0]
+ #define p1 fds[1]
+ #define c2 fds[2]
+ #define p2 fds[3]
+ #define sfd fds[4]
+ int fds[5];
+ long send_calls;
+ long read_calls;
+ long prod_send;
+ long user_read;
+ int file_size;
+ int delay_consumer;
+ int prod_run_time;
+ int strp_size;
+} ctx = {
+ .prod_send = 0,
+ .user_read = 0,
+ .file_size = FILE_SIZE,
+ .mode = ARG_FW_RX_VERDICT_EGRESS,
+ .fds = {0},
+ .delay_consumer = 0,
+ .prod_run_time = 0,
+ .strp_size = 0,
+};
+
+static void bench_sockmap_prog_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx.fds); i++) {
+ if (ctx.fds[i] > 0)
+ close(ctx.fds[i]);
+ }
+
+ bench_sockmap_prog__destroy(ctx.skel);
+}
+
+static void init_addr(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = 0;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ *len = sizeof(*addr4);
+}
+
+static bool set_non_block(int fd, bool blocking)
+{
+ int flags = fcntl(fd, F_GETFL, 0);
+
+ if (flags == -1)
+ return false;
+ flags = blocking ? (flags | O_NONBLOCK) : (flags & ~O_NONBLOCK);
+ return (fcntl(fd, F_SETFL, flags) == 0);
+}
+
+static int create_pair(int *c, int *p, int type)
+{
+ struct sockaddr_storage addr;
+ int err, cfd, pfd;
+ socklen_t addr_len = sizeof(struct sockaddr_storage);
+
+ err = getsockname(ctx.sfd, (struct sockaddr *)&addr, &addr_len);
+ if (err) {
+ fprintf(stderr, "getsockname error %d\n", errno);
+ return err;
+ }
+ cfd = socket(AF_INET, type, 0);
+ if (cfd < 0) {
+ fprintf(stderr, "socket error %d\n", errno);
+ return err;
+ }
+
+ err = connect(cfd, (struct sockaddr *)&addr, addr_len);
+ if (err && errno != EINPROGRESS) {
+ fprintf(stderr, "connect error %d\n", errno);
+ return err;
+ }
+
+ pfd = accept(ctx.sfd, NULL, NULL);
+ if (pfd < 0) {
+ fprintf(stderr, "accept error %d\n", errno);
+ return err;
+ }
+ *c = cfd;
+ *p = pfd;
+ return 0;
+}
+
+static int create_sockets(void)
+{
+ struct sockaddr_storage addr;
+ int err, one = 1;
+ socklen_t addr_len;
+
+ init_addr(&addr, &addr_len);
+ ctx.sfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (ctx.sfd < 0) {
+ fprintf(stderr, "socket error:%d\n", errno);
+ return ctx.sfd;
+ }
+ err = setsockopt(ctx.sfd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+ if (err) {
+ fprintf(stderr, "setsockopt error:%d\n", errno);
+ return err;
+ }
+
+ err = bind(ctx.sfd, (struct sockaddr *)&addr, addr_len);
+ if (err) {
+ fprintf(stderr, "bind error:%d\n", errno);
+ return err;
+ }
+
+ err = listen(ctx.sfd, SOMAXCONN);
+ if (err) {
+ fprintf(stderr, "listen error:%d\n", errno);
+ return err;
+ }
+
+ err = create_pair(&ctx.c1, &ctx.p1, SOCK_STREAM);
+ if (err) {
+ fprintf(stderr, "create_pair 1 error\n");
+ return err;
+ }
+
+ err = create_pair(&ctx.c2, &ctx.p2, SOCK_STREAM);
+ if (err) {
+ fprintf(stderr, "create_pair 2 error\n");
+ return err;
+ }
+ printf("create socket fd c1:%d p1:%d c2:%d p2:%d\n",
+ ctx.c1, ctx.p1, ctx.c2, ctx.p2);
+ return 0;
+}
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 2 || env.producer_cnt != 1 ||
+ !env.affinity)
+ goto err;
+ return;
+err:
+ fprintf(stderr, "argument '-c 2 -p 1 -a' is necessary");
+ exit(1);
+}
+
+static int setup_rx_sockmap(void)
+{
+ int verdict, pass, parser, map;
+ int zero = 0, one = 1;
+ int err;
+
+ parser = bpf_program__fd(ctx.skel->progs.prog_skb_parser);
+ verdict = bpf_program__fd(ctx.skel->progs.prog_skb_verdict);
+ pass = bpf_program__fd(ctx.skel->progs.prog_skb_pass);
+ map = bpf_map__fd(ctx.skel->maps.sock_map_rx);
+
+ if (ctx.strp_size != 0) {
+ ctx.skel->bss->pkt_size = ctx.strp_size;
+ err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return err;
+ }
+
+ if (RXMODE_BPF_VERDICT())
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ else if (RXMODE_BPF_PASS())
+ err = bpf_prog_attach(pass, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ return err;
+
+ if (RXMODE_BPF_PASS())
+ return bpf_map_update_elem(map, &zero, &ctx.c2, BPF_NOEXIST);
+
+ err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST);
+ if (err < 0)
+ return err;
+
+ if (RXMODE_BPF_VERDICT_INGRESS()) {
+ ctx.skel->bss->verdict_dir = BPF_F_INGRESS;
+ err = bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST);
+ } else {
+ err = bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST);
+ }
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int setup_tx_sockmap(void)
+{
+ int zero = 0, one = 1;
+ int prog, map;
+ int err;
+
+ map = bpf_map__fd(ctx.skel->maps.sock_map_tx);
+ prog = TXMODE_BPF_PASS() ?
+ bpf_program__fd(ctx.skel->progs.prog_skmsg_pass) :
+ bpf_program__fd(ctx.skel->progs.prog_skmsg_verdict);
+
+ err = bpf_prog_attach(prog, map, BPF_SK_MSG_VERDICT, 0);
+ if (err)
+ return err;
+
+ if (TXMODE_BPF_EGRESS()) {
+ err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST);
+ err |= bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST);
+ } else {
+ ctx.skel->bss->verdict_dir = BPF_F_INGRESS;
+ err = bpf_map_update_elem(map, &zero, &ctx.p2, BPF_NOEXIST);
+ err |= bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST);
+ }
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void setup(void)
+{
+ int err;
+
+ ctx.skel = bench_sockmap_prog__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "error loading skel\n");
+ exit(1);
+ }
+
+ if (create_sockets()) {
+ fprintf(stderr, "create_net_mode error\n");
+ goto err;
+ }
+
+ if (RXMODE_BPF()) {
+ err = setup_rx_sockmap();
+ if (err) {
+ fprintf(stderr, "setup_rx_sockmap error:%d\n", err);
+ goto err;
+ }
+ } else if (TXMODE_BPF()) {
+ err = setup_tx_sockmap();
+ if (err) {
+ fprintf(stderr, "setup_tx_sockmap error:%d\n", err);
+ goto err;
+ }
+ } else {
+ fprintf(stderr, "unknown sockmap bench mode: %d\n", ctx.mode);
+ goto err;
+ }
+
+ return;
+
+err:
+ bench_sockmap_prog_destroy();
+ exit(1);
+}
+
+static void measure(struct bench_res *res)
+{
+ res->drops = atomic_swap(&ctx.prod_send, 0);
+ res->hits = atomic_swap(&ctx.skel->bss->process_byte, 0);
+ res->false_hits = atomic_swap(&ctx.user_read, 0);
+ res->important_hits = atomic_swap(&ctx.send_calls, 0);
+ res->important_hits |= atomic_swap(&ctx.read_calls, 0) << 32;
+}
+
+static void verify_data(int *check_pos, char *buf, int rcv)
+{
+ for (int i = 0 ; i < rcv; i++) {
+ if (buf[i] != snd_data[(*check_pos) % DATA_REPEAT_SIZE]) {
+ fprintf(stderr, "verify data fail");
+ exit(1);
+ }
+ (*check_pos)++;
+ if (*check_pos >= FILE_SIZE)
+ *check_pos = 0;
+ }
+}
+
+static void *consumer(void *input)
+{
+ int rcv, sent;
+ int check_pos = 0;
+ int tid = (long)input;
+ int recv_buf_size = FILE_SIZE;
+ char *buf = malloc(recv_buf_size);
+ int delay_read = ctx.delay_consumer;
+
+ if (!buf) {
+ fprintf(stderr, "fail to init read buffer");
+ return NULL;
+ }
+
+ while (true) {
+ if (tid == 1) {
+ /* consumer 1 is unused for tx test and stream verdict test */
+ if (RXMODE_BPF() || TXMODE())
+ return NULL;
+ /* it's only for RX_NORMAL which service as reserve-proxy mode */
+ rcv = read(ctx.p1, buf, recv_buf_size);
+ if (rcv < 0) {
+ fprintf(stderr, "fail to read p1");
+ return NULL;
+ }
+
+ sent = send(ctx.p2, buf, recv_buf_size, 0);
+ if (sent < 0) {
+ fprintf(stderr, "fail to send p2");
+ return NULL;
+ }
+ } else {
+ if (delay_read != 0) {
+ if (delay_read < 0)
+ return NULL;
+ sleep(delay_read);
+ delay_read = 0;
+ }
+ /* read real endpoint by consumer 0 */
+ atomic_inc(&ctx.read_calls);
+ rcv = read(ctx.c2, buf, recv_buf_size);
+ if (rcv < 0 && errno != EAGAIN) {
+ fprintf(stderr, "%s fail to read c2 %d\n", __func__, errno);
+ return NULL;
+ }
+ verify_data(&check_pos, buf, rcv);
+ atomic_add(&ctx.user_read, rcv);
+ }
+ }
+
+ return NULL;
+}
+
+static void *producer(void *input)
+{
+ int off = 0, fp, need_sent, sent;
+ int file_size = ctx.file_size;
+ struct timespec ts1, ts2;
+ int target;
+ FILE *file;
+
+ file = tmpfile();
+ if (!file) {
+ fprintf(stderr, "create file for sendfile");
+ return NULL;
+ }
+
+ /* we need simple verify */
+ for (int i = 0; i < file_size; i++) {
+ if (fwrite(&snd_data[off], sizeof(char), 1, file) != 1) {
+ fprintf(stderr, "init tmpfile error");
+ return NULL;
+ }
+ if (++off >= sizeof(snd_data))
+ off = 0;
+ }
+ fflush(file);
+ fseek(file, 0, SEEK_SET);
+
+ fp = fileno(file);
+ need_sent = file_size;
+ clock_gettime(CLOCK_MONOTONIC, &ts1);
+
+ if (RXMODE_BPF_VERDICT())
+ target = ctx.c1;
+ else if (TXMODE_BPF_EGRESS())
+ target = ctx.p1;
+ else
+ target = ctx.p2;
+ set_non_block(target, true);
+ while (true) {
+ if (ctx.prod_run_time) {
+ clock_gettime(CLOCK_MONOTONIC, &ts2);
+ if (ts2.tv_sec - ts1.tv_sec > ctx.prod_run_time)
+ return NULL;
+ }
+
+ errno = 0;
+ atomic_inc(&ctx.send_calls);
+ sent = sendfile(target, fp, NULL, need_sent);
+ if (sent < 0) {
+ if (errno != EAGAIN && errno != ENOMEM && errno != ENOBUFS) {
+ fprintf(stderr, "sendfile return %d, errorno %d:%s\n",
+ sent, errno, strerror(errno));
+ return NULL;
+ }
+ continue;
+ } else if (sent < need_sent) {
+ need_sent -= sent;
+ atomic_add(&ctx.prod_send, sent);
+ continue;
+ }
+ atomic_add(&ctx.prod_send, need_sent);
+ need_sent = file_size;
+ lseek(fp, 0, SEEK_SET);
+ }
+
+ return NULL;
+}
+
+static void report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ double speed_mbs, prod_mbs, bpf_mbs, send_hz, read_hz;
+
+ prod_mbs = res->drops / 1000000.0 / (delta_ns / 1000000000.0);
+ speed_mbs = res->false_hits / 1000000.0 / (delta_ns / 1000000000.0);
+ bpf_mbs = res->hits / 1000000.0 / (delta_ns / 1000000000.0);
+ send_hz = (res->important_hits & 0xFFFFFFFF) / (delta_ns / 1000000000.0);
+ read_hz = (res->important_hits >> 32) / (delta_ns / 1000000000.0);
+
+ printf("Iter %3d (%7.3lfus): ",
+ iter, (delta_ns - 1000000000) / 1000.0);
+ printf("Send Speed %8.3lf MB/s (%8.3lf calls/s), BPF Speed %8.3lf MB/s, "
+ "Rcv Speed %8.3lf MB/s (%8.3lf calls/s)\n",
+ prod_mbs, send_hz, bpf_mbs, speed_mbs, read_hz);
+}
+
+static void report_final(struct bench_res res[], int res_cnt)
+{
+ double verdict_mbs_mean = 0.0;
+ long verdict_total = 0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ verdict_mbs_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
+ verdict_total += res[i].hits / 1000000.0;
+ }
+
+ printf("Summary: total trans %8.3lu MB \u00B1 %5.3lf MB/s\n",
+ verdict_total, verdict_mbs_mean);
+}
+
+static const struct argp_option opts[] = {
+ { "rx-normal", ARG_FW_RX_NORMAL, NULL, 0,
+ "simple reserve-proxy mode, no bfp enabled"},
+ { "rx-pass", ARG_FW_RX_PASS, NULL, 0,
+ "run bpf prog but no redir applied"},
+ { "rx-strp", ARG_CTL_RX_STRP, "Byte", 0,
+ "enable strparser and set the encapsulation size"},
+ { "rx-verdict-egress", ARG_FW_RX_VERDICT_EGRESS, NULL, 0,
+ "forward data with bpf(stream verdict)"},
+ { "rx-verdict-ingress", ARG_FW_RX_VERDICT_INGRESS, NULL, 0,
+ "forward data with bpf(stream verdict)"},
+ { "tx-normal", ARG_FW_TX_NORMAL, NULL, 0,
+ "simple c-s mode, no bfp enabled"},
+ { "tx-pass", ARG_FW_TX_PASS, NULL, 0,
+ "run bpf prog but no redir applied"},
+ { "tx-verdict-ingress", ARG_FW_TX_VERDICT_INGRESS, NULL, 0,
+ "forward msg to ingress queue of another socket"},
+ { "tx-verdict-egress", ARG_FW_TX_VERDICT_EGRESS, NULL, 0,
+ "forward msg to egress queue of another socket"},
+ { "delay-consumer", ARG_CONSUMER_DELAY_TIME, "SEC", 0,
+ "delay consumer start"},
+ { "producer-duration", ARG_PRODUCER_DURATION, "SEC", 0,
+ "producer duration"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case ARG_FW_RX_NORMAL...ARG_FW_TX_VERDICT_EGRESS:
+ ctx.mode = key;
+ break;
+ case ARG_CONSUMER_DELAY_TIME:
+ ctx.delay_consumer = strtol(arg, NULL, 10);
+ break;
+ case ARG_PRODUCER_DURATION:
+ ctx.prod_run_time = strtol(arg, NULL, 10);
+ break;
+ case ARG_CTL_RX_STRP:
+ ctx.strp_size = strtol(arg, NULL, 10);
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/* exported into benchmark runner */
+const struct argp bench_sockmap_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+/* Benchmark performance of creating bpf local storage */
+const struct bench bench_sockmap = {
+ .name = "sockmap",
+ .argp = &bench_sockmap_argp,
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = report_progress,
+ .report_final = report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_strncmp.c b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
new file mode 100644
index 000000000000..a5e1428fd7a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_strncmp.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <argp.h>
+#include "bench.h"
+#include "strncmp_bench.skel.h"
+
+static struct strncmp_ctx {
+ struct strncmp_bench *skel;
+} ctx;
+
+static struct strncmp_args {
+ u32 cmp_str_len;
+} args = {
+ .cmp_str_len = 32,
+};
+
+enum {
+ ARG_CMP_STR_LEN = 5000,
+};
+
+static const struct argp_option opts[] = {
+ { "cmp-str-len", ARG_CMP_STR_LEN, "CMP_STR_LEN", 0,
+ "Set the length of compared string" },
+ {},
+};
+
+static error_t strncmp_parse_arg(int key, char *arg, struct argp_state *state)
+{
+ switch (key) {
+ case ARG_CMP_STR_LEN:
+ args.cmp_str_len = strtoul(arg, NULL, 10);
+ if (!args.cmp_str_len ||
+ args.cmp_str_len >= sizeof(ctx.skel->bss->str)) {
+ fprintf(stderr, "Invalid cmp str len (limit %zu)\n",
+ sizeof(ctx.skel->bss->str));
+ argp_usage(state);
+ }
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_strncmp_argp = {
+ .options = opts,
+ .parser = strncmp_parse_arg,
+};
+
+static void strncmp_validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "strncmp benchmark doesn't support consumer!\n");
+ exit(1);
+ }
+}
+
+static void strncmp_setup(void)
+{
+ int err;
+ char *target;
+ size_t i, sz;
+
+ sz = sizeof(ctx.skel->rodata->target);
+ if (!sz || sz < sizeof(ctx.skel->bss->str)) {
+ fprintf(stderr, "invalid string size (target %zu, src %zu)\n",
+ sz, sizeof(ctx.skel->bss->str));
+ exit(1);
+ }
+
+ setup_libbpf();
+
+ ctx.skel = strncmp_bench__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ srandom(time(NULL));
+ target = ctx.skel->rodata->target;
+ for (i = 0; i < sz - 1; i++)
+ target[i] = '1' + random() % 9;
+ target[sz - 1] = '\0';
+
+ ctx.skel->rodata->cmp_str_len = args.cmp_str_len;
+
+ memcpy(ctx.skel->bss->str, target, args.cmp_str_len);
+ ctx.skel->bss->str[args.cmp_str_len] = '\0';
+ /* Make bss->str < rodata->target */
+ ctx.skel->bss->str[args.cmp_str_len - 1] -= 1;
+
+ err = strncmp_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to load skeleton\n");
+ strncmp_bench__destroy(ctx.skel);
+ exit(1);
+ }
+}
+
+static void strncmp_attach_prog(struct bpf_program *prog)
+{
+ struct bpf_link *link;
+
+ link = bpf_program__attach(prog);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+}
+
+static void strncmp_no_helper_setup(void)
+{
+ strncmp_setup();
+ strncmp_attach_prog(ctx.skel->progs.strncmp_no_helper);
+}
+
+static void strncmp_helper_setup(void)
+{
+ strncmp_setup();
+ strncmp_attach_prog(ctx.skel->progs.strncmp_helper);
+}
+
+static void *strncmp_producer(void *ctx)
+{
+ while (true)
+ (void)syscall(__NR_getpgid);
+ return NULL;
+}
+
+static void strncmp_measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+}
+
+const struct bench bench_strncmp_no_helper = {
+ .name = "strncmp-no-helper",
+ .argp = &bench_strncmp_argp,
+ .validate = strncmp_validate,
+ .setup = strncmp_no_helper_setup,
+ .producer_thread = strncmp_producer,
+ .measure = strncmp_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_strncmp_helper = {
+ .name = "strncmp-helper",
+ .argp = &bench_strncmp_argp,
+ .validate = strncmp_validate,
+ .setup = strncmp_helper_setup,
+ .producer_thread = strncmp_producer,
+ .measure = strncmp_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 2a0b6c9885a4..34018fc3927f 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -1,58 +1,162 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
+#define _GNU_SOURCE
+#include <argp.h>
+#include <unistd.h>
+#include <stdint.h>
+#include "bpf_util.h"
#include "bench.h"
#include "trigger_bench.skel.h"
+#include "trace_helpers.h"
+
+#define MAX_TRIG_BATCH_ITERS 1000
+
+static struct {
+ __u32 batch_iters;
+} args = {
+ .batch_iters = 100,
+};
+
+enum {
+ ARG_TRIG_BATCH_ITERS = 7000,
+};
+
+static const struct argp_option opts[] = {
+ { "trig-batch-iters", ARG_TRIG_BATCH_ITERS, "BATCH_ITER_CNT", 0,
+ "Number of in-kernel iterations per one driver test run"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_TRIG_BATCH_ITERS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > MAX_TRIG_BATCH_ITERS) {
+ fprintf(stderr, "invalid --trig-batch-iters value (should be between %d and %d)\n",
+ 1, MAX_TRIG_BATCH_ITERS);
+ argp_usage(state);
+ }
+ args.batch_iters = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_trigger_batch_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+/* adjust slot shift in inc_hits() if changing */
+#define MAX_BUCKETS 256
+
+#pragma GCC diagnostic ignored "-Wattributes"
/* BPF triggering benchmarks */
static struct trigger_ctx {
struct trigger_bench *skel;
+ bool usermode_counters;
+ int driver_prog_fd;
} ctx;
-static struct counter base_hits;
+static struct counter base_hits[MAX_BUCKETS];
+
+static __always_inline void inc_counter(struct counter *counters)
+{
+ static __thread int tid = 0;
+ unsigned slot;
+
+ if (unlikely(tid == 0))
+ tid = sys_gettid();
+
+ /* multiplicative hashing, it's fast */
+ slot = 2654435769U * tid;
+ slot >>= 24;
+
+ atomic_inc(&base_hits[slot].value); /* use highest byte as an index */
+}
-static void trigger_validate()
+static long sum_and_reset_counters(struct counter *counters)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ int i;
+ long sum = 0;
+
+ for (i = 0; i < MAX_BUCKETS; i++)
+ sum += atomic_swap(&counters[i].value, 0);
+ return sum;
+}
+
+static void trigger_validate(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
-static void *trigger_base_producer(void *input)
+static void *trigger_producer(void *input)
{
- while (true) {
- (void)syscall(__NR_getpgid);
- atomic_inc(&base_hits.value);
+ if (ctx.usermode_counters) {
+ while (true) {
+ (void)syscall(__NR_getpgid);
+ inc_counter(base_hits);
+ }
+ } else {
+ while (true)
+ (void)syscall(__NR_getpgid);
}
return NULL;
}
-static void trigger_base_measure(struct bench_res *res)
+static void *trigger_producer_batch(void *input)
{
- res->hits = atomic_swap(&base_hits.value, 0);
-}
+ int fd = ctx.driver_prog_fd ?: bpf_program__fd(ctx.skel->progs.trigger_driver);
-static void *trigger_producer(void *input)
-{
while (true)
- (void)syscall(__NR_getpgid);
+ bpf_prog_test_run_opts(fd, NULL);
+
return NULL;
}
static void trigger_measure(struct bench_res *res)
{
- res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+ if (ctx.usermode_counters)
+ res->hits = sum_and_reset_counters(base_hits);
+ else
+ res->hits = sum_and_reset_counters(ctx.skel->bss->hits);
}
-static void setup_ctx()
+static void setup_ctx(void)
{
setup_libbpf();
- ctx.skel = trigger_bench__open_and_load();
+ ctx.skel = trigger_bench__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
+
+ /* default "driver" BPF program */
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true);
+
+ ctx.skel->rodata->batch_iters = args.batch_iters;
+}
+
+static void load_ctx(void)
+{
+ int err;
+
+ err = trigger_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
}
static void attach_bpf(struct bpf_program *prog)
@@ -60,125 +164,448 @@ static void attach_bpf(struct bpf_program *prog)
struct bpf_link *link;
link = bpf_program__attach(prog);
- if (IS_ERR(link)) {
+ if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
}
-static void trigger_tp_setup()
+static void trigger_syscall_count_setup(void)
{
- setup_ctx();
- attach_bpf(ctx.skel->progs.bench_trigger_tp);
+ ctx.usermode_counters = true;
}
-static void trigger_rawtp_setup()
+/* Batched, staying mostly in-kernel triggering setups */
+static void trigger_kernel_count_setup(void)
{
setup_ctx();
- attach_bpf(ctx.skel->progs.bench_trigger_raw_tp);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_kernel_count, true);
+ load_ctx();
+ /* override driver program */
+ ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_kernel_count);
}
-static void trigger_kprobe_setup()
+static void trigger_kprobe_setup(void)
{
setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe, true);
+ load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
}
-static void trigger_fentry_setup()
+static void trigger_kretprobe_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe, true);
+ load_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kretprobe);
+}
+
+static void trigger_kprobe_multi_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe_multi, true);
+ load_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi);
+}
+
+static void trigger_kretprobe_multi_setup(void)
{
setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe_multi, true);
+ load_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi);
+}
+
+static void trigger_fentry_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fentry, true);
+ load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
-static void trigger_fentry_sleep_setup()
+static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ char **syms = NULL;
+ size_t cnt = 0;
+
+ /* Some recursive functions will be skipped in
+ * bpf_get_ksyms -> skip_entry, as they can introduce sufficient
+ * overhead. However, it's difficut to skip all the recursive
+ * functions for a debug kernel.
+ *
+ * So, don't run the kprobe-multi-all and kretprobe-multi-all on
+ * a debug kernel.
+ */
+ if (bpf_get_ksyms(&syms, &cnt, true)) {
+ fprintf(stderr, "failed to get ksyms\n");
+ exit(1);
+ }
+
+ opts.syms = (const char **) syms;
+ opts.cnt = cnt;
+ opts.retprobe = kretprobe;
+ /* attach empty to all the kernel functions except bpf_get_numa_node_id. */
+ if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) {
+ fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n");
+ exit(1);
+ }
+}
+
+static void trigger_kprobe_multi_all_setup(void)
{
+ struct bpf_program *prog, *empty;
+
setup_ctx();
- attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
+ empty = ctx.skel->progs.bench_kprobe_multi_empty;
+ prog = ctx.skel->progs.bench_trigger_kprobe_multi;
+ bpf_program__set_autoload(empty, true);
+ bpf_program__set_autoload(prog, true);
+ load_ctx();
+
+ attach_ksyms_all(empty, false);
+ attach_bpf(prog);
}
-static void trigger_fmodret_setup()
+static void trigger_kretprobe_multi_all_setup(void)
{
+ struct bpf_program *prog, *empty;
+
setup_ctx();
+ empty = ctx.skel->progs.bench_kretprobe_multi_empty;
+ prog = ctx.skel->progs.bench_trigger_kretprobe_multi;
+ bpf_program__set_autoload(empty, true);
+ bpf_program__set_autoload(prog, true);
+ load_ctx();
+
+ attach_ksyms_all(empty, true);
+ attach_bpf(prog);
+}
+
+static void trigger_fexit_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fexit, true);
+ load_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_fexit);
+}
+
+static void trigger_fmodret_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fmodret, true);
+ load_ctx();
+ /* override driver program */
+ ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
}
-static void *trigger_consumer(void *input)
+static void trigger_tp_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_tp, true);
+ load_ctx();
+ /* override driver program */
+ ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
+ attach_bpf(ctx.skel->progs.bench_trigger_tp);
+}
+
+static void trigger_rawtp_setup(void)
+{
+ setup_ctx();
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_driver_kfunc, true);
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_rawtp, true);
+ load_ctx();
+ /* override driver program */
+ ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_driver_kfunc);
+ attach_bpf(ctx.skel->progs.bench_trigger_rawtp);
+}
+
+/* make sure call is not inlined and not avoided by compiler, so __weak and
+ * inline asm volatile in the body of the function
+ *
+ * There is a performance difference between uprobing at nop location vs other
+ * instructions. So use two different targets, one of which starts with nop
+ * and another doesn't.
+ *
+ * GCC doesn't generate stack setup preamble for these functions due to them
+ * having no input arguments and doing nothing in the body.
+ */
+__nocf_check __weak void uprobe_target_nop(void)
{
+ asm volatile ("nop");
+}
+
+__weak void opaque_noop_func(void)
+{
+}
+
+__nocf_check __weak int uprobe_target_push(void)
+{
+ /* overhead of function call is negligible compared to uprobe
+ * triggering, so this shouldn't affect benchmark results much
+ */
+ opaque_noop_func();
+ return 1;
+}
+
+__nocf_check __weak void uprobe_target_ret(void)
+{
+ asm volatile ("");
+}
+
+static void *uprobe_producer_count(void *input)
+{
+ while (true) {
+ uprobe_target_nop();
+ inc_counter(base_hits);
+ }
return NULL;
}
-const struct bench bench_trig_base = {
- .name = "trig-base",
- .validate = trigger_validate,
- .producer_thread = trigger_base_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_base_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+static void *uprobe_producer_nop(void *input)
+{
+ while (true)
+ uprobe_target_nop();
+ return NULL;
+}
-const struct bench bench_trig_tp = {
- .name = "trig-tp",
- .validate = trigger_validate,
- .setup = trigger_tp_setup,
- .producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+static void *uprobe_producer_push(void *input)
+{
+ while (true)
+ uprobe_target_push();
+ return NULL;
+}
-const struct bench bench_trig_rawtp = {
- .name = "trig-rawtp",
- .validate = trigger_validate,
- .setup = trigger_rawtp_setup,
- .producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+static void *uprobe_producer_ret(void *input)
+{
+ while (true)
+ uprobe_target_ret();
+ return NULL;
+}
-const struct bench bench_trig_kprobe = {
- .name = "trig-kprobe",
- .validate = trigger_validate,
- .setup = trigger_kprobe_setup,
- .producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+#ifdef __x86_64__
+__nocf_check __weak void uprobe_target_nop5(void)
+{
+ asm volatile (".byte 0x0f, 0x1f, 0x44, 0x00, 0x00");
+}
-const struct bench bench_trig_fentry = {
- .name = "trig-fentry",
- .validate = trigger_validate,
- .setup = trigger_fentry_setup,
- .producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+static void *uprobe_producer_nop5(void *input)
+{
+ while (true)
+ uprobe_target_nop5();
+ return NULL;
+}
+#endif
-const struct bench bench_trig_fentry_sleep = {
- .name = "trig-fentry-sleep",
- .validate = trigger_validate,
- .setup = trigger_fentry_sleep_setup,
- .producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
- .measure = trigger_measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
+static void usetup(bool use_retprobe, bool use_multi, void *target_addr)
+{
+ size_t uprobe_offset;
+ struct bpf_link *link;
+ int err;
+
+ setup_libbpf();
+
+ ctx.skel = trigger_bench__open();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ if (use_multi)
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe_multi, true);
+ else
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true);
+
+ err = trigger_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "failed to load skeleton\n");
+ exit(1);
+ }
-const struct bench bench_trig_fmodret = {
- .name = "trig-fmodret",
+ uprobe_offset = get_uprobe_offset(target_addr);
+ if (use_multi) {
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = use_retprobe,
+ .cnt = 1,
+ .offsets = &uprobe_offset,
+ );
+ link = bpf_program__attach_uprobe_multi(
+ ctx.skel->progs.bench_trigger_uprobe_multi,
+ -1 /* all PIDs */, "/proc/self/exe", NULL, &opts);
+ ctx.skel->links.bench_trigger_uprobe_multi = link;
+ } else {
+ link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
+ use_retprobe,
+ -1 /* all PIDs */,
+ "/proc/self/exe",
+ uprobe_offset);
+ ctx.skel->links.bench_trigger_uprobe = link;
+ }
+ if (!link) {
+ fprintf(stderr, "failed to attach %s!\n", use_multi ? "multi-uprobe" : "uprobe");
+ exit(1);
+ }
+}
+
+static void usermode_count_setup(void)
+{
+ ctx.usermode_counters = true;
+}
+
+static void uprobe_nop_setup(void)
+{
+ usetup(false, false /* !use_multi */, &uprobe_target_nop);
+}
+
+static void uretprobe_nop_setup(void)
+{
+ usetup(true, false /* !use_multi */, &uprobe_target_nop);
+}
+
+static void uprobe_push_setup(void)
+{
+ usetup(false, false /* !use_multi */, &uprobe_target_push);
+}
+
+static void uretprobe_push_setup(void)
+{
+ usetup(true, false /* !use_multi */, &uprobe_target_push);
+}
+
+static void uprobe_ret_setup(void)
+{
+ usetup(false, false /* !use_multi */, &uprobe_target_ret);
+}
+
+static void uretprobe_ret_setup(void)
+{
+ usetup(true, false /* !use_multi */, &uprobe_target_ret);
+}
+
+static void uprobe_multi_nop_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_nop);
+}
+
+static void uretprobe_multi_nop_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_nop);
+}
+
+static void uprobe_multi_push_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_push);
+}
+
+static void uretprobe_multi_push_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_push);
+}
+
+static void uprobe_multi_ret_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_ret);
+}
+
+static void uretprobe_multi_ret_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_ret);
+}
+
+#ifdef __x86_64__
+static void uprobe_nop5_setup(void)
+{
+ usetup(false, false /* !use_multi */, &uprobe_target_nop5);
+}
+
+static void uretprobe_nop5_setup(void)
+{
+ usetup(true, false /* !use_multi */, &uprobe_target_nop5);
+}
+
+static void uprobe_multi_nop5_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_nop5);
+}
+
+static void uretprobe_multi_nop5_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_nop5);
+}
+#endif
+
+const struct bench bench_trig_syscall_count = {
+ .name = "trig-syscall-count",
.validate = trigger_validate,
- .setup = trigger_fmodret_setup,
+ .setup = trigger_syscall_count_setup,
.producer_thread = trigger_producer,
- .consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
+
+/* batched (staying mostly in kernel) kprobe/fentry benchmarks */
+#define BENCH_TRIG_KERNEL(KIND, NAME) \
+const struct bench bench_trig_##KIND = { \
+ .name = "trig-" NAME, \
+ .setup = trigger_##KIND##_setup, \
+ .producer_thread = trigger_producer_batch, \
+ .measure = trigger_measure, \
+ .report_progress = hits_drops_report_progress, \
+ .report_final = hits_drops_report_final, \
+ .argp = &bench_trigger_batch_argp, \
+}
+
+BENCH_TRIG_KERNEL(kernel_count, "kernel-count");
+BENCH_TRIG_KERNEL(kprobe, "kprobe");
+BENCH_TRIG_KERNEL(kretprobe, "kretprobe");
+BENCH_TRIG_KERNEL(kprobe_multi, "kprobe-multi");
+BENCH_TRIG_KERNEL(kretprobe_multi, "kretprobe-multi");
+BENCH_TRIG_KERNEL(fentry, "fentry");
+BENCH_TRIG_KERNEL(kprobe_multi_all, "kprobe-multi-all");
+BENCH_TRIG_KERNEL(kretprobe_multi_all, "kretprobe-multi-all");
+BENCH_TRIG_KERNEL(fexit, "fexit");
+BENCH_TRIG_KERNEL(fmodret, "fmodret");
+BENCH_TRIG_KERNEL(tp, "tp");
+BENCH_TRIG_KERNEL(rawtp, "rawtp");
+
+/* uprobe benchmarks */
+#define BENCH_TRIG_USERMODE(KIND, PRODUCER, NAME) \
+const struct bench bench_trig_##KIND = { \
+ .name = "trig-" NAME, \
+ .validate = trigger_validate, \
+ .setup = KIND##_setup, \
+ .producer_thread = uprobe_producer_##PRODUCER, \
+ .measure = trigger_measure, \
+ .report_progress = hits_drops_report_progress, \
+ .report_final = hits_drops_report_final, \
+}
+
+BENCH_TRIG_USERMODE(usermode_count, count, "usermode-count");
+BENCH_TRIG_USERMODE(uprobe_nop, nop, "uprobe-nop");
+BENCH_TRIG_USERMODE(uprobe_push, push, "uprobe-push");
+BENCH_TRIG_USERMODE(uprobe_ret, ret, "uprobe-ret");
+BENCH_TRIG_USERMODE(uretprobe_nop, nop, "uretprobe-nop");
+BENCH_TRIG_USERMODE(uretprobe_push, push, "uretprobe-push");
+BENCH_TRIG_USERMODE(uretprobe_ret, ret, "uretprobe-ret");
+BENCH_TRIG_USERMODE(uprobe_multi_nop, nop, "uprobe-multi-nop");
+BENCH_TRIG_USERMODE(uprobe_multi_push, push, "uprobe-multi-push");
+BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret");
+BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop");
+BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push");
+BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret");
+#ifdef __x86_64__
+BENCH_TRIG_USERMODE(uprobe_nop5, nop5, "uprobe-nop5");
+BENCH_TRIG_USERMODE(uretprobe_nop5, nop5, "uretprobe-nop5");
+BENCH_TRIG_USERMODE(uprobe_multi_nop5, nop5, "uprobe-multi-nop5");
+BENCH_TRIG_USERMODE(uretprobe_multi_nop5, nop5, "uretprobe-multi-nop5");
+#endif
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh
new file mode 100755
index 000000000000..8ffd385ab2f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+header "Bloom filter map"
+for v in 2 4 8 16 40; do
+for t in 1 4 8 12 16; do
+for h in {1..10}; do
+subtitle "value_size: $v bytes, # threads: $t, # hashes: $h"
+ for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do
+ printf "%'d entries -\n" $e
+ printf "\t"
+ summarize "Lookups, total operations: " \
+ "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-lookup)"
+ printf "\t"
+ summarize "Updates, total operations: " \
+ "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-update)"
+ printf "\t"
+ summarize_percentage "False positive rate: " \
+ "$($RUN_BENCH -p $t --nr_hash_funcs $h --nr_entries $e --value_size $v bloom-false-positive)"
+ done
+ printf "\n"
+done
+done
+done
+
+header "Hashmap without bloom filter vs. hashmap with bloom filter (throughput, 8 threads)"
+for v in 2 4 8 16 40; do
+for h in {1..10}; do
+subtitle "value_size: $v, # hashes: $h"
+ for e in 10000 50000 75000 100000 250000 500000 750000 1000000 2500000 5000000; do
+ printf "%'d entries -\n" $e
+ printf "\t"
+ summarize_total "Hashmap without bloom filter: " \
+ "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-without-bloom)"
+ printf "\t"
+ summarize_total "Hashmap with bloom filter: " \
+ "$($RUN_BENCH --nr_hash_funcs $h --nr_entries $e --value_size $v -p 8 hashmap-with-bloom)"
+ done
+ printf "\n"
+done
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
new file mode 100755
index 000000000000..cd2efd3fdef3
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1`
+summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update)
+printf "$summary"
+printf "\n"
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh
new file mode 100755
index 000000000000..d4f5f73b356b
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+for t in 1 4 8 12 16; do
+for i in 10 100 500 1000 5000 10000 50000 100000 500000 1000000; do
+subtitle "nr_loops: $i, nr_threads: $t"
+ summarize_ops "bpf_loop: " \
+ "$($RUN_BENCH -p $t --nr_loops $i bpf-loop)"
+ printf "\n"
+done
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_htab_mem.sh b/tools/testing/selftests/bpf/benchs/run_bench_htab_mem.sh
new file mode 100755
index 000000000000..9ff5832463a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_htab_mem.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+htab_mem()
+{
+ echo -n "per-prod-op: "
+ echo -n "$*" | sed -E "s/.* per-prod-op\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+k\/s).*/\1/"
+ echo -n -e ", avg mem: "
+ echo -n "$*" | sed -E "s/.* memory usage\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+MiB).*/\1/"
+ echo -n ", peak mem: "
+ echo "$*" | sed -E "s/.* peak memory usage\s+([0-9]+\.[0-9]+MiB).*/\1/"
+}
+
+summarize_htab_mem()
+{
+ local bench="$1"
+ local summary=$(echo $2 | tail -n1)
+
+ printf "%-20s %s\n" "$bench" "$(htab_mem $summary)"
+}
+
+htab_mem_bench()
+{
+ local name
+
+ for name in overwrite batch_add_batch_del add_del_on_diff_cpu
+ do
+ summarize_htab_mem "$name" "$($RUN_BENCH htab-mem --use-case $name -p8 "$@")"
+ done
+}
+
+header "preallocated"
+htab_mem_bench "--preallocated"
+
+header "normal bpf ma"
+htab_mem_bench
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh
new file mode 100755
index 000000000000..2eb2b513a173
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+header "Hashmap Control"
+for i in 10 1000 10000 100000 4194304; do
+subtitle "num keys: $i"
+ summarize_local_storage "hashmap (control) sequential get: "\
+ "$(./bench --nr_maps 1 --hashmap_nr_keys_used=$i local-storage-cache-hashmap-control)"
+ printf "\n"
+done
+
+header "Local Storage"
+for i in 1 10 16 17 24 32 100 1000; do
+subtitle "num_maps: $i"
+ summarize_local_storage "local_storage cache sequential get: "\
+ "$(./bench --nr_maps $i local-storage-cache-seq-get)"
+ summarize_local_storage "local_storage cache interleaved get: "\
+ "$(./bench --nr_maps $i local-storage-cache-int-get)"
+ printf "\n"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
new file mode 100755
index 000000000000..3e8a969f2096
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+kthread_pid=`pgrep rcu_tasks_trace_kthread`
+
+if [ -z $kthread_pid ]; then
+ echo "error: Couldn't find rcu_tasks_trace_kthread"
+ exit 1
+fi
+
+./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet local-storage-tasks-trace
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
index 16f774b1cdbe..7b281dbe4165 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
@@ -2,7 +2,7 @@
set -eufo pipefail
-for i in base kprobe kretprobe rawtp fentry fexit fmodret
+for i in base kprobe kretprobe rawtp fentry fexit
do
summary=$(sudo ./bench -w2 -d5 -a rename-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
printf "%-10s: %s\n" $i "$summary"
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
index af4aa04caba6..83e05e837871 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
@@ -1,75 +1,55 @@
#!/bin/bash
-set -eufo pipefail
-
-RUN_BENCH="sudo ./bench -w3 -d10 -a"
-
-function hits()
-{
- echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
-}
-
-function drops()
-{
- echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
-}
+source ./benchs/run_common.sh
-function header()
-{
- local len=${#1}
-
- printf "\n%s\n" "$1"
- for i in $(seq 1 $len); do printf '='; done
- printf '\n'
-}
+set -eufo pipefail
-function summarize()
-{
- bench="$1"
- summary=$(echo $2 | tail -n1)
- printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)"
-}
+RUN_RB_BENCH="$RUN_BENCH -c1"
header "Single-producer, parallel producer"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH $b)"
+ summarize $b "$($RUN_RB_BENCH $b)"
done
header "Single-producer, parallel producer, sampled notification"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-sampled $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-sampled $b)"
done
header "Single-producer, back-to-back mode"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-b2b $b)"
- summarize $b-sampled "$($RUN_BENCH --rb-sampled --rb-b2b $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-b2b $b)"
+ summarize $b-sampled "$($RUN_RB_BENCH --rb-sampled --rb-b2b $b)"
done
header "Ringbuf back-to-back, effect of sample rate"
for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do
- summarize "rb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)"
+ summarize "rb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)"
done
header "Perfbuf back-to-back, effect of sample rate"
for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do
- summarize "pb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)"
+ summarize "pb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)"
done
header "Ringbuf back-to-back, reserve+commit vs output"
-summarize "reserve" "$($RUN_BENCH --rb-b2b rb-custom)"
-summarize "output" "$($RUN_BENCH --rb-b2b --rb-use-output rb-custom)"
+summarize "reserve" "$($RUN_RB_BENCH --rb-b2b rb-custom)"
+summarize "output" "$($RUN_RB_BENCH --rb-b2b --rb-use-output rb-custom)"
header "Ringbuf sampled, reserve+commit vs output"
-summarize "reserve-sampled" "$($RUN_BENCH --rb-sampled rb-custom)"
-summarize "output-sampled" "$($RUN_BENCH --rb-sampled --rb-use-output rb-custom)"
+summarize "reserve-sampled" "$($RUN_RB_BENCH --rb-sampled rb-custom)"
+summarize "output-sampled" "$($RUN_RB_BENCH --rb-sampled --rb-use-output rb-custom)"
header "Single-producer, consumer/producer competing on the same CPU, low batch count"
for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
- summarize $b "$($RUN_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)"
+ summarize $b "$($RUN_RB_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)"
done
header "Ringbuf, multi-producer contention"
for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
- summarize "rb-libbpf nr_prod $b" "$($RUN_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)"
+ summarize "rb-libbpf nr_prod $b" "$($RUN_RB_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)"
done
+header "Ringbuf, multi-producer contention in overwrite mode, no consumer"
+for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
+ summarize "rb-prod nr_prod $b" "$($RUN_BENCH -p$b --rb-batch-cnt 50 --rb-overwrite --rb-bench-producer rb-libbpf)"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_strncmp.sh b/tools/testing/selftests/bpf/benchs/run_bench_strncmp.sh
new file mode 100755
index 000000000000..142697284b45
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_strncmp.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+for s in 1 8 64 512 2048 4095; do
+ for b in no-helper helper; do
+ summarize ${b}-${s} "$($RUN_BENCH --cmp-str-len=$s strncmp-${b})"
+ done
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh b/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
index 78e83f243294..f7573708a0c3 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
@@ -2,8 +2,22 @@
set -eufo pipefail
-for i in base tp rawtp kprobe fentry fmodret
-do
- summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
- printf "%-10s: %s\n" $i "$summary"
+def_tests=( \
+ usermode-count kernel-count syscall-count \
+ fentry fexit fmodret \
+ rawtp tp \
+ kprobe kprobe-multi kprobe-multi-all \
+ kretprobe kretprobe-multi kretprobe-multi-all \
+)
+
+tests=("$@")
+if [ ${#tests[@]} -eq 0 ]; then
+ tests=("${def_tests[@]}")
+fi
+
+p=${PROD_CNT:-1}
+
+for t in "${tests[@]}"; do
+ summary=$(sudo ./bench -w2 -d5 -a -p$p trig-$t | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
+ printf "%-15s: %s\n" $t "$summary"
done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
new file mode 100755
index 000000000000..03f55405484b
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -eufo pipefail
+
+for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret,nop5}
+do
+ summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
+ printf "%-15s: %s\n" $i "$summary"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh
new file mode 100644
index 000000000000..d9f40af82006
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_common.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+RUN_BENCH="sudo ./bench -w3 -d10 -a"
+
+function header()
+{
+ local len=${#1}
+
+ printf "\n%s\n" "$1"
+ for i in $(seq 1 $len); do printf '='; done
+ printf '\n'
+}
+
+function subtitle()
+{
+ local len=${#1}
+ printf "\t%s\n" "$1"
+}
+
+function hits()
+{
+ echo "$*" | sed -E "s/.*hits\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
+}
+
+function drops()
+{
+ echo "$*" | sed -E "s/.*drops\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
+}
+
+function percentage()
+{
+ echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/"
+}
+
+function ops()
+{
+ echo -n "throughput: "
+ echo -n "$*" | sed -E "s/.*throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
+ echo -n -e ", latency: "
+ echo "$*" | sed -E "s/.*latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
+}
+
+function local_storage()
+{
+ echo -n "hits throughput: "
+ echo -n "$*" | sed -E "s/.* hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
+ echo -n -e ", hits latency: "
+ echo -n "$*" | sed -E "s/.* hits latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
+ echo -n ", important_hits throughput: "
+ echo "$*" | sed -E "s/.*important_hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
+}
+
+function total()
+{
+ echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
+}
+
+function summarize()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s (drops %s)\n" "$bench" "$(hits $summary)" "$(drops $summary)"
+}
+
+function summarize_percentage()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s%%\n" "$bench" "$(percentage $summary)"
+}
+
+function summarize_ops()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s\n" "$bench" "$(ops $summary)"
+}
+
+function summarize_local_storage()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s\n" "$bench" "$(local_storage $summary)"
+}
+
+function summarize_total()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s\n" "$bench" "$(total $summary)"
+}
diff --git a/tools/testing/selftests/bpf/bpf_arena_alloc.h b/tools/testing/selftests/bpf/bpf_arena_alloc.h
new file mode 100644
index 000000000000..c27678299e0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_alloc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+#ifndef __round_mask
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#endif
+#ifndef round_up
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#endif
+
+#ifdef __BPF__
+#define NR_CPUS (sizeof(struct cpumask) * 8)
+
+static void __arena * __arena page_frag_cur_page[NR_CPUS];
+static int __arena page_frag_cur_offset[NR_CPUS];
+
+/* Simple page_frag allocator */
+static inline void __arena* bpf_alloc(unsigned int size)
+{
+ __u64 __arena *obj_cnt;
+ __u32 cpu = bpf_get_smp_processor_id();
+ void __arena *page = page_frag_cur_page[cpu];
+ int __arena *cur_offset = &page_frag_cur_offset[cpu];
+ int offset;
+
+ size = round_up(size, 8);
+ if (size >= PAGE_SIZE - 8)
+ return NULL;
+ if (!page) {
+refill:
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page)
+ return NULL;
+ cast_kern(page);
+ page_frag_cur_page[cpu] = page;
+ *cur_offset = PAGE_SIZE - 8;
+ obj_cnt = page + PAGE_SIZE - 8;
+ *obj_cnt = 0;
+ } else {
+ cast_kern(page);
+ obj_cnt = page + PAGE_SIZE - 8;
+ }
+
+ offset = *cur_offset - size;
+ if (offset < 0)
+ goto refill;
+
+ (*obj_cnt)++;
+ *cur_offset = offset;
+ return page + offset;
+}
+
+static inline void bpf_free(void __arena *addr)
+{
+ __u64 __arena *obj_cnt;
+
+ addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
+ obj_cnt = addr + PAGE_SIZE - 8;
+ if (--(*obj_cnt) == 0)
+ bpf_arena_free_pages(&arena, addr, 1);
+}
+#else
+static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
+static inline void bpf_free(void __arena *addr) {}
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
new file mode 100644
index 000000000000..16f8ce832004
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+#endif
+
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE (-1)
+#endif
+
+#ifndef arena_container_of
+#define arena_container_of(ptr, type, member) \
+ ({ \
+ void __arena *__mptr = (void __arena *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+#ifdef __BPF__ /* when compiled as bpf program */
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+/*
+ * for older kernels try sizeof(struct genradix_node)
+ * or flexible:
+ * static inline long __bpf_page_size(void) {
+ * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
+ * }
+ * but generated code is not great.
+ */
+#endif
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
+#define __arena __attribute__((address_space(1)))
+#define __arena_global __attribute__((address_space(1)))
+#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
+#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
+#else
+#define __arena
+#define __arena_global SEC(".addr_space.1")
+#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
+#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
+#endif
+
+void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
+ int node_id, __u64 flags) __ksym __weak;
+int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak;
+void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
+
+#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start)
+
+#else /* when compiled as user space code */
+
+#define __arena
+#define __arg_arena
+#define cast_kern(ptr) /* nop for user space */
+#define cast_user(ptr) /* nop for user space */
+__weak char arena[1];
+
+#ifndef offsetof
+#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
+#endif
+
+static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
+ int node_id, __u64 flags)
+{
+ return NULL;
+}
+static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
+{
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_arena_htab.h b/tools/testing/selftests/bpf/bpf_arena_htab.h
new file mode 100644
index 000000000000..acc01a876668
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_htab.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include <errno.h>
+#include "bpf_arena_alloc.h"
+#include "bpf_arena_list.h"
+
+struct htab_bucket {
+ struct arena_list_head head;
+};
+typedef struct htab_bucket __arena htab_bucket_t;
+
+struct htab {
+ htab_bucket_t *buckets;
+ int n_buckets;
+};
+typedef struct htab __arena htab_t;
+
+static inline htab_bucket_t *__select_bucket(htab_t *htab, __u32 hash)
+{
+ htab_bucket_t *b = htab->buckets;
+
+ cast_kern(b);
+ return &b[hash & (htab->n_buckets - 1)];
+}
+
+static inline arena_list_head_t *select_bucket(htab_t *htab, __u32 hash)
+{
+ return &__select_bucket(htab, hash)->head;
+}
+
+struct hashtab_elem {
+ int hash;
+ int key;
+ int value;
+ struct arena_list_node hash_node;
+};
+typedef struct hashtab_elem __arena hashtab_elem_t;
+
+static hashtab_elem_t *lookup_elem_raw(arena_list_head_t *head, __u32 hash, int key)
+{
+ hashtab_elem_t *l;
+
+ list_for_each_entry(l, head, hash_node)
+ if (l->hash == hash && l->key == key)
+ return l;
+
+ return NULL;
+}
+
+static int htab_hash(int key)
+{
+ return key;
+}
+
+__weak int htab_lookup_elem(htab_t *htab __arg_arena, int key)
+{
+ hashtab_elem_t *l_old;
+ arena_list_head_t *head;
+
+ cast_kern(htab);
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+ if (l_old)
+ return l_old->value;
+ return 0;
+}
+
+__weak int htab_update_elem(htab_t *htab __arg_arena, int key, int value)
+{
+ hashtab_elem_t *l_new = NULL, *l_old;
+ arena_list_head_t *head;
+
+ cast_kern(htab);
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+
+ l_new = bpf_alloc(sizeof(*l_new));
+ if (!l_new)
+ return -ENOMEM;
+ l_new->key = key;
+ l_new->hash = htab_hash(key);
+ l_new->value = value;
+
+ list_add_head(&l_new->hash_node, head);
+ if (l_old) {
+ list_del(&l_old->hash_node);
+ bpf_free(l_old);
+ }
+ return 0;
+}
+
+void htab_init(htab_t *htab)
+{
+ void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
+
+ cast_user(buckets);
+ htab->buckets = buckets;
+ htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket);
+}
diff --git a/tools/testing/selftests/bpf/bpf_arena_list.h b/tools/testing/selftests/bpf/bpf_arena_list.h
new file mode 100644
index 000000000000..e16fa7d95fcf
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_list.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+struct arena_list_node;
+
+typedef struct arena_list_node __arena arena_list_node_t;
+
+struct arena_list_node {
+ arena_list_node_t *next;
+ arena_list_node_t * __arena *pprev;
+};
+
+struct arena_list_head {
+ struct arena_list_node __arena *first;
+};
+typedef struct arena_list_head __arena arena_list_head_t;
+
+#define list_entry(ptr, type, member) arena_container_of(ptr, type, member)
+
+#define list_entry_safe(ptr, type, member) \
+ ({ typeof(*ptr) * ___ptr = (ptr); \
+ ___ptr ? ({ cast_kern(___ptr); list_entry(___ptr, type, member); }) : NULL; \
+ })
+
+#ifndef __BPF__
+static inline void *bpf_iter_num_new(struct bpf_iter_num *it, int i, int j) { return NULL; }
+static inline void bpf_iter_num_destroy(struct bpf_iter_num *it) {}
+static inline bool bpf_iter_num_next(struct bpf_iter_num *it) { return true; }
+#define cond_break ({})
+#define can_loop true
+#endif
+
+/* Safely walk link list elements. Deletion of elements is allowed. */
+#define list_for_each_entry(pos, head, member) \
+ for (void * ___tmp = (pos = list_entry_safe((head)->first, \
+ typeof(*(pos)), member), \
+ (void *)0); \
+ pos && ({ ___tmp = (void *)pos->member.next; 1; }) && can_loop; \
+ pos = list_entry_safe((void __arena *)___tmp, typeof(*(pos)), member))
+
+static inline void list_add_head(arena_list_node_t *n, arena_list_head_t *h)
+{
+ arena_list_node_t *first = h->first, * __arena *tmp;
+
+ cast_user(first);
+ cast_kern(n);
+ WRITE_ONCE(n->next, first);
+ cast_kern(first);
+ if (first) {
+ tmp = &n->next;
+ cast_user(tmp);
+ WRITE_ONCE(first->pprev, tmp);
+ }
+ cast_user(n);
+ WRITE_ONCE(h->first, n);
+
+ tmp = &h->first;
+ cast_user(tmp);
+ cast_kern(n);
+ WRITE_ONCE(n->pprev, tmp);
+}
+
+static inline void __list_del(arena_list_node_t *n)
+{
+ arena_list_node_t *next = n->next;
+ arena_list_node_t * __arena *pprev = n->pprev;
+
+ cast_user(next);
+ cast_kern(pprev);
+ WRITE_ONCE(*pprev, next);
+ if (next) {
+ cast_user(pprev);
+ cast_kern(next);
+ WRITE_ONCE(next->pprev, pprev);
+ }
+}
+
+#define POISON_POINTER_DELTA 0
+
+#define LIST_POISON1 ((void __arena *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void __arena *) 0x122 + POISON_POINTER_DELTA)
+
+static inline void list_del(arena_list_node_t *n)
+{
+ __list_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
diff --git a/tools/testing/selftests/bpf/bpf_arena_strsearch.h b/tools/testing/selftests/bpf/bpf_arena_strsearch.h
new file mode 100644
index 000000000000..c1b6eaa905bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_strsearch.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+__noinline int bpf_arena_strlen(const char __arena *s __arg_arena)
+{
+ const char __arena *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ cond_break;
+ return sc - s;
+}
+
+/**
+ * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
+ * @pat: Shell-style pattern to match, e.g. "*.[ch]".
+ * @str: String to match. The pattern must match the entire string.
+ *
+ * Perform shell-style glob matching, returning true (1) if the match
+ * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
+ *
+ * Pattern metacharacters are ?, *, [ and \.
+ * (And, inside character classes, !, - and ].)
+ *
+ * This is small and simple implementation intended for device blacklists
+ * where a string is matched against a number of patterns. Thus, it
+ * does not preprocess the patterns. It is non-recursive, and run-time
+ * is at most quadratic: strlen(@str)*strlen(@pat).
+ *
+ * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
+ * it takes 6 passes over the pattern before matching the string.
+ *
+ * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
+ * treat / or leading . specially; it isn't actually used for pathnames.
+ *
+ * Note that according to glob(7) (and unlike bash), character classes
+ * are complemented by a leading !; this does not support the regex-style
+ * [^a-z] syntax.
+ *
+ * An opening bracket without a matching close is matched literally.
+ */
+__noinline bool glob_match(char const __arena *pat __arg_arena, char const __arena *str __arg_arena)
+{
+ /*
+ * Backtrack to previous * on mismatch and retry starting one
+ * character later in the string. Because * matches all characters
+ * (no exception for /), it can be easily proved that there's
+ * never a need to backtrack multiple levels.
+ */
+ char const __arena *back_pat = NULL, *back_str;
+
+ /*
+ * Loop over each token (character or class) in pat, matching
+ * it against the remaining unmatched tail of str. Return false
+ * on mismatch, or true after matching the trailing nul bytes.
+ */
+ for (;;) {
+ unsigned char c = *str++;
+ unsigned char d = *pat++;
+
+ switch (d) {
+ case '?': /* Wildcard: anything but nul */
+ if (c == '\0')
+ return false;
+ break;
+ case '*': /* Any-length wildcard */
+ if (*pat == '\0') /* Optimize trailing * case */
+ return true;
+ back_pat = pat;
+ back_str = --str; /* Allow zero-length match */
+ break;
+ case '[': { /* Character class */
+ bool match = false, inverted = (*pat == '!');
+ char const __arena *class = pat + inverted;
+ unsigned char a = *class++;
+
+ /*
+ * Iterate over each span in the character class.
+ * A span is either a single character a, or a
+ * range a-b. The first span may begin with ']'.
+ */
+ do {
+ unsigned char b = a;
+
+ if (a == '\0') /* Malformed */
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ /* Any special action if a > b? */
+ }
+ match |= (a <= c && c <= b);
+ cond_break;
+ } while ((a = *class++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ }
+ break;
+ case '\\':
+ d = *pat++;
+ __attribute__((__fallthrough__));
+ default: /* Literal character */
+literal:
+ if (c == d) {
+ if (d == '\0')
+ return true;
+ break;
+ }
+backtrack:
+ if (c == '\0' || !back_pat)
+ return false; /* No point continuing */
+ /* Try again from last *, one character later in str. */
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ cond_break;
+ }
+ return false;
+}
diff --git a/tools/testing/selftests/bpf/bpf_atomic.h b/tools/testing/selftests/bpf/bpf_atomic.h
new file mode 100644
index 000000000000..c550e5711967
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_atomic.h
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#ifndef BPF_ATOMIC_H
+#define BPF_ATOMIC_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+extern bool CONFIG_X86_64 __kconfig __weak;
+
+/*
+ * __unqual_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged,
+ *
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ *
+ * This is copied verbatim from kernel's include/linux/compiler_types.h, but
+ * with default expression (for pointers) changed from (x) to (typeof(x)0).
+ *
+ * This is because LLVM has a bug where for lvalue (x), it does not get rid of
+ * an extra address_space qualifier, but does in case of rvalue (typeof(x)0).
+ * Hence, for pointers, we need to create an rvalue expression to get the
+ * desired type. See https://github.com/llvm/llvm-project/issues/53400.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type : (unsigned type)0, signed type : (signed type)0
+
+#define __unqual_typeof(x) \
+ typeof(_Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (typeof(x))0))
+
+/* No-op for BPF */
+#define cpu_relax() ({})
+
+#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *)&(x)) = (val))
+
+#define cmpxchg(p, old, new) __sync_val_compare_and_swap((p), old, new)
+
+#define try_cmpxchg(p, pold, new) \
+ ({ \
+ __unqual_typeof(*(pold)) __o = *(pold); \
+ __unqual_typeof(*(p)) __r = cmpxchg(p, __o, new); \
+ if (__r != __o) \
+ *(pold) = __r; \
+ __r == __o; \
+ })
+
+#define try_cmpxchg_relaxed(p, pold, new) try_cmpxchg(p, pold, new)
+
+#define try_cmpxchg_acquire(p, pold, new) try_cmpxchg(p, pold, new)
+
+#define smp_mb() \
+ ({ \
+ volatile unsigned long __val; \
+ __sync_fetch_and_add(&__val, 0); \
+ })
+
+#define smp_rmb() \
+ ({ \
+ if (!CONFIG_X86_64) \
+ smp_mb(); \
+ else \
+ barrier(); \
+ })
+
+#define smp_wmb() \
+ ({ \
+ if (!CONFIG_X86_64) \
+ smp_mb(); \
+ else \
+ barrier(); \
+ })
+
+/* Control dependency provides LOAD->STORE, provide LOAD->LOAD */
+#define smp_acquire__after_ctrl_dep() ({ smp_rmb(); })
+
+#define smp_load_acquire(p) \
+ ({ \
+ __unqual_typeof(*(p)) __v = READ_ONCE(*(p)); \
+ if (!CONFIG_X86_64) \
+ smp_mb(); \
+ barrier(); \
+ __v; \
+ })
+
+#define smp_store_release(p, val) \
+ ({ \
+ if (!CONFIG_X86_64) \
+ smp_mb(); \
+ barrier(); \
+ WRITE_ONCE(*(p), val); \
+ })
+
+#define smp_cond_load_relaxed_label(p, cond_expr, label) \
+ ({ \
+ typeof(p) __ptr = (p); \
+ __unqual_typeof(*(p)) VAL; \
+ for (;;) { \
+ VAL = (__unqual_typeof(*(p)))READ_ONCE(*__ptr); \
+ if (cond_expr) \
+ break; \
+ cond_break_label(label); \
+ cpu_relax(); \
+ } \
+ (typeof(*(p)))VAL; \
+ })
+
+#define smp_cond_load_acquire_label(p, cond_expr, label) \
+ ({ \
+ __unqual_typeof(*p) __val = \
+ smp_cond_load_relaxed_label(p, cond_expr, label); \
+ smp_acquire__after_ctrl_dep(); \
+ (typeof(*(p)))__val; \
+ })
+
+#define atomic_read(p) READ_ONCE((p)->counter)
+
+#define atomic_cond_read_relaxed_label(p, cond_expr, label) \
+ smp_cond_load_relaxed_label(&(p)->counter, cond_expr, label)
+
+#define atomic_cond_read_acquire_label(p, cond_expr, label) \
+ smp_cond_load_acquire_label(&(p)->counter, cond_expr, label)
+
+#define atomic_try_cmpxchg_relaxed(p, pold, new) \
+ try_cmpxchg_relaxed(&(p)->counter, pold, new)
+
+#define atomic_try_cmpxchg_acquire(p, pold, new) \
+ try_cmpxchg_acquire(&(p)->counter, pold, new)
+
+#endif /* BPF_ATOMIC_H */
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
new file mode 100644
index 000000000000..2cd9165c7348
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -0,0 +1,656 @@
+#ifndef __BPF_EXPERIMENTAL__
+#define __BPF_EXPERIMENTAL__
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
+
+/* Description
+ * Allocates an object of the type represented by 'local_type_id' in
+ * program BTF. User may use the bpf_core_type_id_local macro to pass the
+ * type ID of a struct in program BTF.
+ *
+ * The 'local_type_id' parameter must be a known constant.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * A pointer to an object of the type corresponding to the passed in
+ * 'local_type_id', or NULL on failure.
+ */
+extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_new_impl */
+#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
+
+/* Description
+ * Free an allocated object. All fields of the object that require
+ * destruction will be destructed before the storage is freed.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * Void.
+ */
+extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_drop_impl */
+#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
+
+/* Description
+ * Increment the refcount on a refcounted local kptr, turning the
+ * non-owning reference input into an owning reference in the process.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * An owning reference to the object pointed to by 'kptr'
+ */
+extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_refcount_acquire_impl */
+#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
+
+/* Description
+ * Add a new entry to the beginning of the BPF linked list.
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
+ * Returns
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a list
+ */
+extern int bpf_list_push_front_impl(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_push_front_impl */
+#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
+
+/* Description
+ * Add a new entry to the end of the BPF linked list.
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
+ * Returns
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a list
+ */
+extern int bpf_list_push_back_impl(struct bpf_list_head *head,
+ struct bpf_list_node *node,
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_push_back_impl */
+#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
+
+/* Description
+ * Remove the entry at the beginning of the BPF linked list.
+ * Returns
+ * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
+ */
+extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+
+/* Description
+ * Remove the entry at the end of the BPF linked list.
+ * Returns
+ * Pointer to bpf_list_node of deleted entry, or NULL if list is empty.
+ */
+extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+
+/* Description
+ * Remove 'node' from rbtree with root 'root'
+ * Returns
+ * Pointer to the removed node, or NULL if 'root' didn't contain 'node'
+ */
+extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+ struct bpf_rb_node *node) __ksym;
+
+/* Description
+ * Add 'node' to rbtree with root 'root' using comparator 'less'
+ *
+ * The 'meta' and 'off' parameters are rewritten by the verifier, no need
+ * for BPF programs to set them
+ * Returns
+ * 0 if the node was successfully added
+ * -EINVAL if the node wasn't added because it's already in a tree
+ */
+extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
+ void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_rbtree_add_impl */
+#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
+
+/* Description
+ * Return the first (leftmost) node in input tree
+ * Returns
+ * Pointer to the node, which is _not_ removed from the tree. If the tree
+ * contains no nodes, returns NULL.
+ */
+extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
+
+/* Description
+ * Allocates a percpu object of the type represented by 'local_type_id' in
+ * program BTF. User may use the bpf_core_type_id_local macro to pass the
+ * type ID of a struct in program BTF.
+ *
+ * The 'local_type_id' parameter must be a known constant.
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * A pointer to a percpu object of the type corresponding to the passed in
+ * 'local_type_id', or NULL on failure.
+ */
+extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+
+/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
+#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
+
+/* Description
+ * Free an allocated percpu object. All fields of the object that require
+ * destruction will be destructed before the storage is freed.
+ *
+ * The 'meta' parameter is rewritten by the verifier, no need for BPF
+ * program to set it.
+ * Returns
+ * Void.
+ */
+extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+struct bpf_iter_task_vma;
+
+extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
+ struct task_struct *task,
+ __u64 addr) __ksym;
+extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
+extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
+
+/* Convenience macro to wrap over bpf_obj_drop_impl */
+#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
+
+/* Description
+ * Throw a BPF exception from the program, immediately terminating its
+ * execution and unwinding the stack. The supplied 'cookie' parameter
+ * will be the return value of the program when an exception is thrown,
+ * and the default exception callback is used. Otherwise, if an exception
+ * callback is set using the '__exception_cb(callback)' declaration tag
+ * on the main program, the 'cookie' parameter will be the callback's only
+ * input argument.
+ *
+ * Thus, in case of default exception callback, 'cookie' is subjected to
+ * constraints on the program's return value (as with R0 on exit).
+ * Otherwise, the return value of the marked exception callback will be
+ * subjected to the same checks.
+ *
+ * Note that throwing an exception with lingering resources (locks,
+ * references, etc.) will lead to a verification error.
+ *
+ * Note that callbacks *cannot* call this helper.
+ * Returns
+ * Never.
+ * Throws
+ * An exception with the specified 'cookie' value.
+ */
+extern void bpf_throw(u64 cookie) __ksym;
+
+/* Description
+ * Acquire a reference on the exe_file member field belonging to the
+ * mm_struct that is nested within the supplied task_struct. The supplied
+ * task_struct must be trusted/referenced.
+ * Returns
+ * A referenced file pointer pointing to the exe_file member field of the
+ * mm_struct nested in the supplied task_struct, or NULL.
+ */
+extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;
+
+/* Description
+ * Release a reference on the supplied file. The supplied file must be
+ * acquired.
+ */
+extern void bpf_put_file(struct file *file) __ksym;
+
+/* Description
+ * Resolve a pathname for the supplied path and store it in the supplied
+ * buffer. The supplied path must be trusted/referenced.
+ * Returns
+ * A positive integer corresponding to the length of the resolved pathname,
+ * including the NULL termination character, stored in the supplied
+ * buffer. On error, a negative integer is returned.
+ */
+extern int bpf_path_d_path(const struct path *path, char *buf, size_t buf__sz) __ksym;
+
+/* This macro must be used to mark the exception callback corresponding to the
+ * main program. For example:
+ *
+ * int exception_cb(u64 cookie) {
+ * return cookie;
+ * }
+ *
+ * SEC("tc")
+ * __exception_cb(exception_cb)
+ * int main_prog(struct __sk_buff *ctx) {
+ * ...
+ * return TC_ACT_OK;
+ * }
+ *
+ * Here, exception callback for the main program will be 'exception_cb'. Note
+ * that this attribute can only be used once, and multiple exception callbacks
+ * specified for the main program will lead to verification error.
+ */
+#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
+
+#define __bpf_assert_signed(x) _Generic((x), \
+ unsigned long: 0, \
+ unsigned long long: 0, \
+ signed long: 1, \
+ signed long long: 1 \
+)
+
+#define __bpf_assert_check(LHS, op, RHS) \
+ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
+ _Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
+ _Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
+ _Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
+
+#define __bpf_assert(LHS, op, cons, RHS, VAL) \
+ ({ \
+ (void)bpf_throw; \
+ asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
+ : : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
+ })
+
+#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
+ ({ \
+ __bpf_assert_check(LHS, op, RHS); \
+ if (__bpf_assert_signed(LHS) && !(supp_sign)) \
+ __bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
+ else \
+ __bpf_assert(LHS, #op, cons, RHS, VAL); \
+ })
+
+#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
+ ({ \
+ if (sizeof(typeof(RHS)) == 8) { \
+ const typeof(RHS) rhs_var = (RHS); \
+ __bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
+ } else { \
+ __bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
+ } \
+ })
+
+#define __cmp_cannot_be_signed(x) \
+ __builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
+ __builtin_strcmp(#x, "&") == 0
+
+#define __is_signed_type(type) (((type)(-1)) < (type)1)
+
+#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \
+ ({ \
+ __label__ l_true; \
+ bool ret = DEFAULT; \
+ asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \
+ :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
+ ret = !DEFAULT; \
+l_true: \
+ ret; \
+ })
+
+/* C type conversions coupled with comparison operator are tricky.
+ * Make sure BPF program is compiled with -Wsign-compare then
+ * __lhs OP __rhs below will catch the mistake.
+ * Be aware that we check only __lhs to figure out the sign of compare.
+ */
+#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \
+ ({ \
+ typeof(LHS) __lhs = (LHS); \
+ typeof(RHS) __rhs = (RHS); \
+ bool ret; \
+ _Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
+ (void)(__lhs OP __rhs); \
+ if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
+ if (sizeof(__rhs) == 8) \
+ /* "i" will truncate 64-bit constant into s32, \
+ * so we have to use extra register via "r". \
+ */ \
+ ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \
+ else \
+ ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \
+ } else { \
+ if (sizeof(__rhs) == 8) \
+ ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \
+ else \
+ ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \
+ } \
+ ret; \
+ })
+
+#ifndef bpf_cmp_unlikely
+#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
+#endif
+
+#ifndef bpf_cmp_likely
+#define bpf_cmp_likely(LHS, OP, RHS) \
+ ({ \
+ bool ret = 0; \
+ if (__builtin_strcmp(#OP, "==") == 0) \
+ ret = _bpf_cmp(LHS, !=, RHS, false); \
+ else if (__builtin_strcmp(#OP, "!=") == 0) \
+ ret = _bpf_cmp(LHS, ==, RHS, false); \
+ else if (__builtin_strcmp(#OP, "<=") == 0) \
+ ret = _bpf_cmp(LHS, >, RHS, false); \
+ else if (__builtin_strcmp(#OP, "<") == 0) \
+ ret = _bpf_cmp(LHS, >=, RHS, false); \
+ else if (__builtin_strcmp(#OP, ">") == 0) \
+ ret = _bpf_cmp(LHS, <=, RHS, false); \
+ else if (__builtin_strcmp(#OP, ">=") == 0) \
+ ret = _bpf_cmp(LHS, <, RHS, false); \
+ else \
+ asm volatile("r0 " #OP " invalid compare"); \
+ ret; \
+ })
+#endif
+
+/*
+ * Note that cond_break can only be portably used in the body of a breakable
+ * construct, whereas can_loop can be used anywhere.
+ */
+#ifdef __BPF_FEATURE_MAY_GOTO
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("may_goto %l[l_break]" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("may_goto %l[l_break]" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#else
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#else
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#endif
+#endif
+
+#define cond_break __cond_break(break)
+#define cond_break_label(label) __cond_break(goto label)
+
+#ifndef bpf_nop_mov
+#define bpf_nop_mov(var) \
+ asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
+#endif
+
+/* emit instruction:
+ * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
+ */
+#ifndef bpf_addr_space_cast
+#define bpf_addr_space_cast(var, dst_as, src_as)\
+ asm volatile(".byte 0xBF; \
+ .ifc %[reg], r0; \
+ .byte 0x00; \
+ .endif; \
+ .ifc %[reg], r1; \
+ .byte 0x11; \
+ .endif; \
+ .ifc %[reg], r2; \
+ .byte 0x22; \
+ .endif; \
+ .ifc %[reg], r3; \
+ .byte 0x33; \
+ .endif; \
+ .ifc %[reg], r4; \
+ .byte 0x44; \
+ .endif; \
+ .ifc %[reg], r5; \
+ .byte 0x55; \
+ .endif; \
+ .ifc %[reg], r6; \
+ .byte 0x66; \
+ .endif; \
+ .ifc %[reg], r7; \
+ .byte 0x77; \
+ .endif; \
+ .ifc %[reg], r8; \
+ .byte 0x88; \
+ .endif; \
+ .ifc %[reg], r9; \
+ .byte 0x99; \
+ .endif; \
+ .short %[off]; \
+ .long %[as]" \
+ : [reg]"+r"(var) \
+ : [off]"i"(BPF_ADDR_SPACE_CAST) \
+ , [as]"i"((dst_as << 16) | src_as));
+#endif
+
+void bpf_preempt_disable(void) __weak __ksym;
+void bpf_preempt_enable(void) __weak __ksym;
+
+typedef struct {
+} __bpf_preempt_t;
+
+static inline __bpf_preempt_t __bpf_preempt_constructor(void)
+{
+ __bpf_preempt_t ret = {};
+
+ bpf_preempt_disable();
+ return ret;
+}
+static inline void __bpf_preempt_destructor(__bpf_preempt_t *t)
+{
+ bpf_preempt_enable();
+}
+#define bpf_guard_preempt() \
+ __bpf_preempt_t ___bpf_apply(preempt, __COUNTER__) \
+ __attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) = \
+ __bpf_preempt_constructor()
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
+
+/* Description
+ * Assert that a conditional expression is true.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the value zero when the assertion fails.
+ */
+#define bpf_assert_range(LHS, BEG, END) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, 0, false); \
+ __bpf_assert_op(LHS, <=, END, 0, false); \
+ })
+
+/* Description
+ * Assert that LHS is in the range [BEG, END] (inclusive of both). This
+ * statement updates the known bounds of LHS during verification. Note
+ * that both BEG and END must be constant values, and must fit within the
+ * data type of LHS.
+ * Returns
+ * Void.
+ * Throws
+ * An exception with the specified value when the assertion fails.
+ */
+#define bpf_assert_range_with(LHS, BEG, END, value) \
+ ({ \
+ _Static_assert(BEG <= END, "BEG must be <= END"); \
+ barrier_var(LHS); \
+ __bpf_assert_op(LHS, >=, BEG, value, false); \
+ __bpf_assert_op(LHS, <=, END, value, false); \
+ })
+
+struct bpf_iter_css_task;
+struct cgroup_subsys_state;
+extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
+ struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
+extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
+extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
+
+struct bpf_iter_task;
+extern int bpf_iter_task_new(struct bpf_iter_task *it,
+ struct task_struct *task, unsigned int flags) __weak __ksym;
+extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
+extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
+
+struct bpf_iter_css;
+extern int bpf_iter_css_new(struct bpf_iter_css *it,
+ struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
+extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
+extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
+
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, void *value),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(timer, cb, flags) \
+ bpf_wq_set_callback_impl(timer, cb, flags, NULL)
+
+struct bpf_iter_kmem_cache;
+extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
+
+struct bpf_iter_dmabuf;
+extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym;
+extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym;
+extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;
+
+extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
+ struct bpf_dynptr *value_p) __weak __ksym;
+
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 4
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+extern bool CONFIG_PREEMPT_RT __kconfig __weak;
+#ifdef bpf_target_x86
+extern const int __preempt_count __ksym;
+#endif
+
+struct task_struct___preempt_rt {
+ int softirq_disable_cnt;
+} __attribute__((preserve_access_index));
+
+static inline int get_preempt_count(void)
+{
+#if defined(bpf_target_x86)
+ return *(int *) bpf_this_cpu_ptr(&__preempt_count);
+#elif defined(bpf_target_arm64)
+ return bpf_get_current_task_btf()->thread_info.preempt.count;
+#endif
+ return 0;
+}
+
+/* Description
+ * Report whether it is in interrupt context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_interrupt(void)
+{
+ struct task_struct___preempt_rt *tsk;
+ int pcnt;
+
+ pcnt = get_preempt_count();
+ if (!CONFIG_PREEMPT_RT)
+ return pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK);
+
+ tsk = (void *) bpf_get_current_task_btf();
+ return (pcnt & (NMI_MASK | HARDIRQ_MASK)) |
+ (tsk->softirq_disable_cnt & SOFTIRQ_MASK);
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
new file mode 100644
index 000000000000..e0189254bb6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -0,0 +1,98 @@
+#ifndef __BPF_KFUNCS__
+#define __BPF_KFUNCS__
+
+struct bpf_sock_addr_kern;
+
+/* Description
+ * Initializes an skb-type dynptr
+ * Returns
+ * Error code
+ */
+extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
+
+/* Description
+ * Initializes an xdp-type dynptr
+ * Returns
+ * Error code
+ */
+extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
+
+extern int bpf_dynptr_from_skb_meta(struct __sk_buff *skb, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
+
+/* Description
+ * Obtain a read-only pointer to the dynptr's data
+ * Returns
+ * Either a direct pointer to the dynptr data or a pointer to the user-provided
+ * buffer if unable to obtain a direct pointer
+ */
+extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u64 offset,
+ void *buffer, __u64 buffer__szk) __ksym __weak;
+
+/* Description
+ * Obtain a read-write pointer to the dynptr's data
+ * Returns
+ * Either a direct pointer to the dynptr data or a pointer to the user-provided
+ * buffer if unable to obtain a direct pointer
+ */
+extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u64 offset, void *buffer,
+ __u64 buffer__szk) __ksym __weak;
+
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u64 start, __u64 end) __ksym __weak;
+extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak;
+extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak;
+extern __u64 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak;
+extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak;
+
+/* Description
+ * Modify the address of a AF_UNIX sockaddr.
+ * Returns
+ * -EINVAL if the address size is too big or, 0 if the sockaddr was successfully modified.
+ */
+extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
+ const __u8 *sun_path, __u32 sun_path__sz) __ksym;
+
+/* Description
+ * Allocate and configure a reqsk and link it with a listener and skb.
+ * Returns
+ * Error code
+ */
+struct sock;
+struct bpf_tcp_req_attrs;
+extern int bpf_sk_assign_tcp_reqsk(struct __sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz) __ksym;
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
+
+extern int bpf_get_file_xattr(struct file *file, const char *name,
+ struct bpf_dynptr *value_ptr) __ksym;
+extern int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_ptr) __ksym;
+
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
+ struct bpf_dynptr *sig_ptr,
+ struct bpf_key *trusted_keyring) __ksym;
+
+extern bool bpf_session_is_return(void) __ksym __weak;
+extern __u64 *bpf_session_cookie(void) __ksym __weak;
+
+struct dentry;
+/* Description
+ * Returns xattr of a dentry
+ * Returns
+ * Error code
+ */
+extern int bpf_get_dentry_xattr(struct dentry *dentry, const char *name,
+ struct bpf_dynptr *value_ptr) __ksym __weak;
+
+extern int bpf_set_dentry_xattr(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags) __ksym __weak;
+extern int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name__str) __ksym __weak;
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h
index 719ab56cdb5d..bc4555a003a7 100644
--- a/tools/testing/selftests/bpf/bpf_legacy.h
+++ b/tools/testing/selftests/bpf/bpf_legacy.h
@@ -2,24 +2,22 @@
#ifndef __BPF_LEGACY__
#define __BPF_LEGACY__
-#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
- struct ____btf_map_##name { \
- type_key key; \
- type_val value; \
- }; \
- struct ____btf_map_##name \
- __attribute__ ((section(".maps." #name), used)) \
- ____btf_map_##name = { }
-
+#if __GNUC__ && !__clang__
+/* Functions to emit BPF_LD_ABS and BPF_LD_IND instructions. We
+ * provide the "standard" names as synonyms of the corresponding GCC
+ * builtins. Note how the SKB argument is ignored.
+ */
+#define load_byte(skb, off) __builtin_bpf_load_byte(off)
+#define load_half(skb, off) __builtin_bpf_load_half(off)
+#define load_word(skb, off) __builtin_bpf_load_word(off)
+#else
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
-unsigned long long load_byte(void *skb,
- unsigned long long off) asm("llvm.bpf.load.byte");
-unsigned long long load_half(void *skb,
- unsigned long long off) asm("llvm.bpf.load.half");
-unsigned long long load_word(void *skb,
- unsigned long long off) asm("llvm.bpf.load.word");
+unsigned long long load_byte(void *skb, unsigned long long off) asm("llvm.bpf.load.byte");
+unsigned long long load_half(void *skb, unsigned long long off) asm("llvm.bpf.load.half");
+unsigned long long load_word(void *skb, unsigned long long off) asm("llvm.bpf.load.word");
+#endif
#endif
diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h
deleted file mode 100644
index 9dac9b30f8ef..000000000000
--- a/tools/testing/selftests/bpf/bpf_rlimit.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <sys/resource.h>
-#include <stdio.h>
-
-static __attribute__((constructor)) void bpf_rlimit_ctor(void)
-{
- struct rlimit rlim_old, rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- /* For the sake of running the test cases, we temporarily
- * set rlimit to infinity in order for kernel to focus on
- * errors from actual test cases and not getting noise
- * from hitting memlock limits. The limit is on per-process
- * basis and not a global one, hence destructor not really
- * needed here.
- */
- if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
- perror("Unable to lift memlock rlimit");
- /* Trying out lower limit, but expect potential test
- * case failures from this!
- */
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
- }
-}
diff --git a/tools/testing/selftests/bpf/bpf_sockopt_helpers.h b/tools/testing/selftests/bpf/bpf_sockopt_helpers.h
new file mode 100644
index 000000000000..11f3a0976174
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_sockopt_helpers.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+
+int get_set_sk_priority(void *ctx)
+{
+ int prio;
+
+ /* Verify that context allows calling bpf_getsockopt and
+ * bpf_setsockopt by reading and writing back socket
+ * priority.
+ */
+
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
deleted file mode 100644
index 6a9053162cf2..000000000000
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __BPF_TCP_HELPERS_H
-#define __BPF_TCP_HELPERS_H
-
-#include <stdbool.h>
-#include <linux/types.h>
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_core_read.h>
-#include <bpf/bpf_tracing.h>
-
-#define BPF_STRUCT_OPS(name, args...) \
-SEC("struct_ops/"#name) \
-BPF_PROG(name, args)
-
-#define tcp_jiffies32 ((__u32)bpf_jiffies64())
-
-struct sock_common {
- unsigned char skc_state;
- __u16 skc_num;
-} __attribute__((preserve_access_index));
-
-enum sk_pacing {
- SK_PACING_NONE = 0,
- SK_PACING_NEEDED = 1,
- SK_PACING_FQ = 2,
-};
-
-struct sock {
- struct sock_common __sk_common;
- unsigned long sk_pacing_rate;
- __u32 sk_pacing_status; /* see enum sk_pacing */
-} __attribute__((preserve_access_index));
-
-struct inet_sock {
- struct sock sk;
-} __attribute__((preserve_access_index));
-
-struct inet_connection_sock {
- struct inet_sock icsk_inet;
- __u8 icsk_ca_state:6,
- icsk_ca_setsockopt:1,
- icsk_ca_dst_locked:1;
- struct {
- __u8 pending;
- } icsk_ack;
- __u64 icsk_ca_priv[104 / sizeof(__u64)];
-} __attribute__((preserve_access_index));
-
-struct request_sock {
- struct sock_common __req_common;
-} __attribute__((preserve_access_index));
-
-struct tcp_sock {
- struct inet_connection_sock inet_conn;
-
- __u32 rcv_nxt;
- __u32 snd_nxt;
- __u32 snd_una;
- __u32 window_clamp;
- __u8 ecn_flags;
- __u32 delivered;
- __u32 delivered_ce;
- __u32 snd_cwnd;
- __u32 snd_cwnd_cnt;
- __u32 snd_cwnd_clamp;
- __u32 snd_ssthresh;
- __u8 syn_data:1, /* SYN includes data */
- syn_fastopen:1, /* SYN includes Fast Open option */
- syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
- syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- save_syn:1, /* Save headers of SYN packet */
- is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
- syn_smc:1; /* SYN includes SMC */
- __u32 max_packets_out;
- __u32 lsndtime;
- __u32 prior_cwnd;
- __u64 tcp_mstamp; /* most recent packet received/sent */
-} __attribute__((preserve_access_index));
-
-static __always_inline struct inet_connection_sock *inet_csk(const struct sock *sk)
-{
- return (struct inet_connection_sock *)sk;
-}
-
-static __always_inline void *inet_csk_ca(const struct sock *sk)
-{
- return (void *)inet_csk(sk)->icsk_ca_priv;
-}
-
-static __always_inline struct tcp_sock *tcp_sk(const struct sock *sk)
-{
- return (struct tcp_sock *)sk;
-}
-
-static __always_inline bool before(__u32 seq1, __u32 seq2)
-{
- return (__s32)(seq1-seq2) < 0;
-}
-#define after(seq2, seq1) before(seq1, seq2)
-
-#define TCP_ECN_OK 1
-#define TCP_ECN_QUEUE_CWR 2
-#define TCP_ECN_DEMAND_CWR 4
-#define TCP_ECN_SEEN 8
-
-enum inet_csk_ack_state_t {
- ICSK_ACK_SCHED = 1,
- ICSK_ACK_TIMER = 2,
- ICSK_ACK_PUSHED = 4,
- ICSK_ACK_PUSHED2 = 8,
- ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
-};
-
-enum tcp_ca_event {
- CA_EVENT_TX_START = 0,
- CA_EVENT_CWND_RESTART = 1,
- CA_EVENT_COMPLETE_CWR = 2,
- CA_EVENT_LOSS = 3,
- CA_EVENT_ECN_NO_CE = 4,
- CA_EVENT_ECN_IS_CE = 5,
-};
-
-struct ack_sample {
- __u32 pkts_acked;
- __s32 rtt_us;
- __u32 in_flight;
-} __attribute__((preserve_access_index));
-
-struct rate_sample {
- __u64 prior_mstamp; /* starting timestamp for interval */
- __u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
- __s32 delivered; /* number of packets delivered over interval */
- long interval_us; /* time for tp->delivered to incr "delivered" */
- __u32 snd_interval_us; /* snd interval for delivered packets */
- __u32 rcv_interval_us; /* rcv interval for delivered packets */
- long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
- int losses; /* number of packets marked lost upon ACK */
- __u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
- __u32 prior_in_flight; /* in flight before this ACK */
- bool is_app_limited; /* is sample from packet with bubble in pipe? */
- bool is_retrans; /* is sample from retransmission? */
- bool is_ack_delayed; /* is this (likely) a delayed ACK? */
-} __attribute__((preserve_access_index));
-
-#define TCP_CA_NAME_MAX 16
-#define TCP_CONG_NEEDS_ECN 0x2
-
-struct tcp_congestion_ops {
- char name[TCP_CA_NAME_MAX];
- __u32 flags;
-
- /* initialize private data (optional) */
- void (*init)(struct sock *sk);
- /* cleanup private data (optional) */
- void (*release)(struct sock *sk);
-
- /* return slow start threshold (required) */
- __u32 (*ssthresh)(struct sock *sk);
- /* do new cwnd calculation (required) */
- void (*cong_avoid)(struct sock *sk, __u32 ack, __u32 acked);
- /* call before changing ca_state (optional) */
- void (*set_state)(struct sock *sk, __u8 new_state);
- /* call when cwnd event occurs (optional) */
- void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
- /* call when ack arrives (optional) */
- void (*in_ack_event)(struct sock *sk, __u32 flags);
- /* new value of cwnd after loss (required) */
- __u32 (*undo_cwnd)(struct sock *sk);
- /* hook for packet ack accounting (optional) */
- void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
- /* override sysctl_tcp_min_tso_segs */
- __u32 (*min_tso_segs)(struct sock *sk);
- /* returns the multiplier used in tcp_sndbuf_expand (optional) */
- __u32 (*sndbuf_expand)(struct sock *sk);
- /* call when packets are delivered to update cwnd and pacing rate,
- * after all the ca_state processing. (optional)
- */
- void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
-};
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#define min_not_zero(x, y) ({ \
- typeof(x) __x = (x); \
- typeof(y) __y = (y); \
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-
-static __always_inline __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked)
-{
- __u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
-
- acked -= cwnd - tp->snd_cwnd;
- tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
-
- return acked;
-}
-
-static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp)
-{
- return tp->snd_cwnd < tp->snd_ssthresh;
-}
-
-static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
-
- /* If in slow start, ensure cwnd grows to twice what was ACKed. */
- if (tcp_in_slow_start(tp))
- return tp->snd_cwnd < 2 * tp->max_packets_out;
-
- return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited);
-}
-
-static __always_inline void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked)
-{
- /* If credits accumulated at a higher w, apply them gently now. */
- if (tp->snd_cwnd_cnt >= w) {
- tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd++;
- }
-
- tp->snd_cwnd_cnt += acked;
- if (tp->snd_cwnd_cnt >= w) {
- __u32 delta = tp->snd_cwnd_cnt / w;
-
- tp->snd_cwnd_cnt -= delta * w;
- tp->snd_cwnd += delta;
- }
- tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
-}
-
-#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/Makefile b/tools/testing/selftests/bpf/bpf_testmod/Makefile
deleted file mode 100644
index 15cb36c4483a..000000000000
--- a/tools/testing/selftests/bpf/bpf_testmod/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
-KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
-
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
-MODULES = bpf_testmod.ko
-
-obj-m += bpf_testmod.o
-CFLAGS_bpf_testmod.o = -I$(src)
-
-all:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
-
-clean:
- +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
-
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
deleted file mode 100644
index b83ea448bc79..000000000000
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2020 Facebook */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM bpf_testmod
-
-#if !defined(_BPF_TESTMOD_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _BPF_TESTMOD_EVENTS_H
-
-#include <linux/tracepoint.h>
-#include "bpf_testmod.h"
-
-TRACE_EVENT(bpf_testmod_test_read,
- TP_PROTO(struct task_struct *task, struct bpf_testmod_test_read_ctx *ctx),
- TP_ARGS(task, ctx),
- TP_STRUCT__entry(
- __field(pid_t, pid)
- __array(char, comm, TASK_COMM_LEN)
- __field(loff_t, off)
- __field(size_t, len)
- ),
- TP_fast_assign(
- __entry->pid = task->pid;
- memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
- __entry->off = ctx->off;
- __entry->len = ctx->len;
- ),
- TP_printk("pid=%d comm=%s off=%llu len=%zu",
- __entry->pid, __entry->comm, __entry->off, __entry->len)
-);
-
-#endif /* _BPF_TESTMOD_EVENTS_H */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE bpf_testmod-events
-#include <trace/define_trace.h>
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
deleted file mode 100644
index 2df19d73ca49..000000000000
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2020 Facebook */
-#include <linux/error-injection.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/sysfs.h>
-#include <linux/tracepoint.h>
-#include "bpf_testmod.h"
-
-#define CREATE_TRACE_POINTS
-#include "bpf_testmod-events.h"
-
-noinline ssize_t
-bpf_testmod_test_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t len)
-{
- struct bpf_testmod_test_read_ctx ctx = {
- .buf = buf,
- .off = off,
- .len = len,
- };
-
- trace_bpf_testmod_test_read(current, &ctx);
-
- return -EIO; /* always fail */
-}
-EXPORT_SYMBOL(bpf_testmod_test_read);
-ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
-
-static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
- .attr = { .name = "bpf_testmod", .mode = 0444, },
- .read = bpf_testmod_test_read,
-};
-
-static int bpf_testmod_init(void)
-{
- return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
-}
-
-static void bpf_testmod_exit(void)
-{
- return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
-}
-
-module_init(bpf_testmod_init);
-module_exit(bpf_testmod_exit);
-
-MODULE_AUTHOR("Andrii Nakryiko");
-MODULE_DESCRIPTION("BPF selftests module");
-MODULE_LICENSE("Dual BSD/GPL");
-
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
deleted file mode 100644
index b81adfedb4f6..000000000000
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2020 Facebook */
-#ifndef _BPF_TESTMOD_H
-#define _BPF_TESTMOD_H
-
-#include <linux/types.h>
-
-struct bpf_testmod_test_read_ctx {
- char *buf;
- loff_t off;
- size_t len;
-};
-
-#endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index a3352a64c067..4bc2d25f33e1 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -6,6 +6,7 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <syscall.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
static inline unsigned int bpf_num_possible_cpus(void)
@@ -20,6 +21,25 @@ static inline unsigned int bpf_num_possible_cpus(void)
return possible_cpus;
}
+/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
+ * is zero-terminated string no matter what (unless sz == 0, in which case
+ * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
+ * in what is returned. Given this is internal helper, it's trivial to extend
+ * this, when necessary. Use this instead of strncpy inside libbpf source code.
+ */
+static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
+{
+ size_t i;
+
+ if (sz == 0)
+ return;
+
+ sz--;
+ for (i = 0; i < sz && src[i]; i++)
+ dst[i] = src[i];
+ dst[i] = '\0';
+}
+
#define __bpf_percpu_val_align __attribute__((__aligned__(8)))
#define BPF_DECLARE_PERCPU(type, name) \
@@ -40,4 +60,18 @@ static inline unsigned int bpf_num_possible_cpus(void)
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
#endif
+/* Availability of gettid across glibc versions is hit-and-miss, therefore
+ * fallback to syscall in this macro and use it everywhere.
+ */
+#ifndef sys_gettid
+#define sys_gettid() syscall(SYS_gettid)
+#endif
+
+/* and poison usage to ensure it does not creep back in. */
+#pragma GCC poison gettid
+
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c
index 48f90490f922..1c1c2c26690a 100644
--- a/tools/testing/selftests/bpf/btf_helpers.c
+++ b/tools/testing/selftests/bpf/btf_helpers.c
@@ -23,11 +23,15 @@ static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
+ [BTF_KIND_FLOAT] = "FLOAT",
+ [BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
+ [BTF_KIND_ENUM64] = "ENUM64",
};
static const char *btf_kind_str(__u16 kind)
{
- if (kind > BTF_KIND_DATASEC)
+ if (kind > BTF_KIND_ENUM64)
return "UNKNOWN";
return btf_kind_str_mapping[kind];
}
@@ -107,6 +111,7 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
fprintf(out, " type_id=%u", t->type);
break;
case BTF_KIND_ARRAY: {
@@ -135,14 +140,32 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
}
case BTF_KIND_ENUM: {
const struct btf_enum *v = btf_enum(t);
+ const char *fmt_str;
- fprintf(out, " size=%u vlen=%u", t->size, vlen);
+ fmt_str = btf_kflag(t) ? "\n\t'%s' val=%d" : "\n\t'%s' val=%u";
+ fprintf(out, " encoding=%s size=%u vlen=%u",
+ btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
for (i = 0; i < vlen; i++, v++) {
- fprintf(out, "\n\t'%s' val=%u",
+ fprintf(out, fmt_str,
btf_str(btf, v->name_off), v->val);
}
break;
}
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *v = btf_enum64(t);
+ const char *fmt_str;
+
+ fmt_str = btf_kflag(t) ? "\n\t'%s' val=%lld" : "\n\t'%s' val=%llu";
+
+ fprintf(out, " encoding=%s size=%u vlen=%u",
+ btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
+ for (i = 0; i < vlen; i++, v++) {
+ fprintf(out, fmt_str,
+ btf_str(btf, v->name_off),
+ ((__u64)v->val_hi32 << 32) | v->val_lo32);
+ }
+ break;
+ }
case BTF_KIND_FWD:
fprintf(out, " fwd_kind=%s", btf_kflag(t) ? "union" : "struct");
break;
@@ -173,6 +196,13 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
}
break;
}
+ case BTF_KIND_FLOAT:
+ fprintf(out, " size=%u", t->size);
+ break;
+ case BTF_KIND_DECL_TAG:
+ fprintf(out, " type_id=%u component_idx=%d",
+ t->type, btf_decl_tag(t)->component_idx);
+ break;
default:
break;
}
@@ -206,7 +236,7 @@ int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[])
int i;
bool ok = true;
- ASSERT_EQ(btf__get_nr_types(btf), nr_types, "btf_nr_types");
+ ASSERT_EQ(btf__type_cnt(btf) - 1, nr_types, "btf_nr_types");
for (i = 1; i <= nr_types; i++) {
if (!ASSERT_STREQ(btf_type_raw_dump(btf, i), exp_types[i - 1], "raw_dump"))
@@ -229,7 +259,6 @@ const char *btf_type_c_dump(const struct btf *btf)
static char buf[16 * 1024];
FILE *buf_file;
struct btf_dump *d = NULL;
- struct btf_dump_opts opts = {};
int err, i;
buf_file = fmemopen(buf, sizeof(buf) - 1, "w");
@@ -238,22 +267,26 @@ const char *btf_type_c_dump(const struct btf *btf)
return NULL;
}
- opts.ctx = buf_file;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
+ d = btf_dump__new(btf, btf_dump_printf, buf_file, NULL);
if (libbpf_get_error(d)) {
fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d));
- return NULL;
+ goto err_out;
}
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
if (err) {
fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err);
- return NULL;
+ goto err_out;
}
}
+ btf_dump__free(d);
fflush(buf_file);
fclose(buf_file);
return buf;
+err_out:
+ btf_dump__free(d);
+ fclose(buf_file);
+ return NULL;
}
diff --git a/tools/testing/selftests/bpf/cap_helpers.c b/tools/testing/selftests/bpf/cap_helpers.c
new file mode 100644
index 000000000000..98f840c3a38f
--- /dev/null
+++ b/tools/testing/selftests/bpf/cap_helpers.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "cap_helpers.h"
+
+/* Avoid including <sys/capability.h> from the libcap-devel package,
+ * so directly declare them here and use them from glibc.
+ */
+int capget(cap_user_header_t header, cap_user_data_t data);
+int capset(cap_user_header_t header, const cap_user_data_t data);
+
+int cap_enable_effective(__u64 caps, __u64 *old_caps)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ __u32 cap0 = caps;
+ __u32 cap1 = caps >> 32;
+ int err;
+
+ err = capget(&hdr, data);
+ if (err)
+ return -errno;
+
+ if (old_caps)
+ *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
+
+ if ((data[0].effective & cap0) == cap0 &&
+ (data[1].effective & cap1) == cap1)
+ return 0;
+
+ data[0].effective |= cap0;
+ data[1].effective |= cap1;
+ err = capset(&hdr, data);
+ if (err)
+ return -errno;
+
+ return 0;
+}
+
+int cap_disable_effective(__u64 caps, __u64 *old_caps)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ __u32 cap0 = caps;
+ __u32 cap1 = caps >> 32;
+ int err;
+
+ err = capget(&hdr, data);
+ if (err)
+ return -errno;
+
+ if (old_caps)
+ *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
+
+ if (!(data[0].effective & cap0) && !(data[1].effective & cap1))
+ return 0;
+
+ data[0].effective &= ~cap0;
+ data[1].effective &= ~cap1;
+ err = capset(&hdr, data);
+ if (err)
+ return -errno;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/cap_helpers.h b/tools/testing/selftests/bpf/cap_helpers.h
new file mode 100644
index 000000000000..8dcb28557f76
--- /dev/null
+++ b/tools/testing/selftests/bpf/cap_helpers.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CAP_HELPERS_H
+#define __CAP_HELPERS_H
+
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <errno.h>
+
+#ifndef CAP_PERFMON
+#define CAP_PERFMON 38
+#endif
+
+#ifndef CAP_BPF
+#define CAP_BPF 39
+#endif
+
+int cap_enable_effective(__u64 caps, __u64 *old_caps);
+int cap_disable_effective(__u64 caps, __u64 *old_caps);
+
+#endif
diff --git a/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h b/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h
new file mode 100644
index 000000000000..a525d3544fd7
--- /dev/null
+++ b/tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+BPF_RETVAL_HOOK(ingress, "cgroup_skb/ingress", __sk_buff, -EINVAL)
+BPF_RETVAL_HOOK(egress, "cgroup_skb/egress", __sk_buff, -EINVAL)
+BPF_RETVAL_HOOK(sock_create, "cgroup/sock_create", bpf_sock, 0)
+BPF_RETVAL_HOOK(sock_ops, "sockops", bpf_sock_ops, -EINVAL)
+BPF_RETVAL_HOOK(dev, "cgroup/dev", bpf_cgroup_dev_ctx, 0)
+BPF_RETVAL_HOOK(bind4, "cgroup/bind4", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(bind6, "cgroup/bind6", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(connect4, "cgroup/connect4", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(connect6, "cgroup/connect6", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(post_bind4, "cgroup/post_bind4", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(post_bind6, "cgroup/post_bind6", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(sendmsg4, "cgroup/sendmsg4", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(sendmsg6, "cgroup/sendmsg6", bpf_sock_addr, 0)
+BPF_RETVAL_HOOK(sysctl, "cgroup/sysctl", bpf_sysctl, 0)
+BPF_RETVAL_HOOK(recvmsg4, "cgroup/recvmsg4", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(recvmsg6, "cgroup/recvmsg6", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(getsockopt, "cgroup/getsockopt", bpf_sockopt, 0)
+BPF_RETVAL_HOOK(setsockopt, "cgroup/setsockopt", bpf_sockopt, 0)
+BPF_RETVAL_HOOK(getpeername4, "cgroup/getpeername4", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(getpeername6, "cgroup/getpeername6", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(getsockname4, "cgroup/getsockname4", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(getsockname6, "cgroup/getsockname6", bpf_sock_addr, -EINVAL)
+BPF_RETVAL_HOOK(sock_release, "cgroup/sock_release", bpf_sock, 0)
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 033051717ba5..20cede4db3ce 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -4,6 +4,7 @@
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <sys/xattr.h>
#include <linux/limits.h>
#include <stdio.h>
#include <stdlib.h>
@@ -12,62 +13,81 @@
#include <unistd.h>
#include <ftw.h>
-
#include "cgroup_helpers.h"
+#include "bpf_util.h"
/*
* To avoid relying on the system setup, when setup_cgroup_env is called
- * we create a new mount namespace, and cgroup namespace. The cgroup2
- * root is mounted at CGROUP_MOUNT_PATH
- *
- * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
- * It's easier to create our own mount namespace and manage it ourselves.
+ * we create a new mount namespace, and cgroup namespace. The cgroupv2
+ * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
+ * have cgroupv2 enabled at this point in time. It's easier to create our
+ * own mount namespace and manage it ourselves. We assume /mnt exists.
*
- * We assume /mnt exists.
+ * Related cgroupv1 helpers are named *classid*(), since we only use the
+ * net_cls controller for tagging net_cls.classid. We assume the default
+ * mount under /sys/fs/cgroup/net_cls, which should be the case for the
+ * vast majority of users.
*/
#define WALK_FD_LIMIT 16
+
#define CGROUP_MOUNT_PATH "/mnt"
+#define CGROUP_MOUNT_DFLT "/sys/fs/cgroup"
+#define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls"
#define CGROUP_WORK_DIR "/cgroup-test-work-dir"
+
+#define format_cgroup_path_pid(buf, path, pid) \
+ snprintf(buf, sizeof(buf), "%s%s%d%s", CGROUP_MOUNT_PATH, \
+ CGROUP_WORK_DIR, pid, path)
+
#define format_cgroup_path(buf, path) \
- snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
- CGROUP_WORK_DIR, path)
+ format_cgroup_path_pid(buf, path, getpid())
-/**
- * enable_all_controllers() - Enable all available cgroup v2 controllers
- *
- * Enable all available cgroup v2 controllers in order to increase
- * the code coverage.
- *
- * If successful, 0 is returned.
- */
-static int enable_all_controllers(char *cgroup_path)
+#define format_parent_cgroup_path(buf, path) \
+ format_cgroup_path_pid(buf, path, getppid())
+
+#define format_classid_path_pid(buf, pid) \
+ snprintf(buf, sizeof(buf), "%s%s%d", NETCLS_MOUNT_PATH, \
+ CGROUP_WORK_DIR, pid)
+
+#define format_classid_path(buf) \
+ format_classid_path_pid(buf, getpid())
+
+static __thread bool cgroup_workdir_mounted;
+
+static void __cleanup_cgroup_environment(void);
+
+static int __enable_controllers(const char *cgroup_path, const char *controllers)
{
char path[PATH_MAX + 1];
- char buf[PATH_MAX];
+ char enable[PATH_MAX + 1];
char *c, *c2;
int fd, cfd;
ssize_t len;
- snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path);
- fd = open(path, O_RDONLY);
- if (fd < 0) {
- log_err("Opening cgroup.controllers: %s", path);
- return 1;
- }
-
- len = read(fd, buf, sizeof(buf) - 1);
- if (len < 0) {
+ /* If not controllers are passed, enable all available controllers */
+ if (!controllers) {
+ snprintf(path, sizeof(path), "%s/cgroup.controllers",
+ cgroup_path);
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ log_err("Opening cgroup.controllers: %s", path);
+ return 1;
+ }
+ len = read(fd, enable, sizeof(enable) - 1);
+ if (len < 0) {
+ close(fd);
+ log_err("Reading cgroup.controllers: %s", path);
+ return 1;
+ } else if (len == 0) { /* No controllers to enable */
+ close(fd);
+ return 0;
+ }
+ enable[len] = 0;
close(fd);
- log_err("Reading cgroup.controllers: %s", path);
- return 1;
+ } else {
+ bpf_strlcpy(enable, controllers, sizeof(enable));
}
- buf[len] = 0;
- close(fd);
-
- /* No controllers available? We're probably on cgroup v1. */
- if (len == 0)
- return 0;
snprintf(path, sizeof(path), "%s/cgroup.subtree_control", cgroup_path);
cfd = open(path, O_RDWR);
@@ -76,7 +96,7 @@ static int enable_all_controllers(char *cgroup_path)
return 1;
}
- for (c = strtok_r(buf, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) {
+ for (c = strtok_r(enable, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) {
if (dprintf(cfd, "+%s\n", c) <= 0) {
log_err("Enabling controller %s: %s", c, path);
close(cfd);
@@ -88,6 +108,87 @@ static int enable_all_controllers(char *cgroup_path)
}
/**
+ * enable_controllers() - Enable cgroup v2 controllers
+ * @relative_path: The cgroup path, relative to the workdir
+ * @controllers: List of controllers to enable in cgroup.controllers format
+ *
+ *
+ * Enable given cgroup v2 controllers, if @controllers is NULL, enable all
+ * available controllers.
+ *
+ * If successful, 0 is returned.
+ */
+int enable_controllers(const char *relative_path, const char *controllers)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ return __enable_controllers(cgroup_path, controllers);
+}
+
+static int __write_cgroup_file(const char *cgroup_path, const char *file,
+ const char *buf)
+{
+ char file_path[PATH_MAX + 1];
+ int fd;
+
+ snprintf(file_path, sizeof(file_path), "%s/%s", cgroup_path, file);
+ fd = open(file_path, O_RDWR);
+ if (fd < 0) {
+ log_err("Opening %s", file_path);
+ return 1;
+ }
+
+ if (dprintf(fd, "%s", buf) <= 0) {
+ log_err("Writing to %s", file_path);
+ close(fd);
+ return 1;
+ }
+ close(fd);
+ return 0;
+}
+
+/**
+ * write_cgroup_file() - Write to a cgroup file
+ * @relative_path: The cgroup path, relative to the workdir
+ * @file: The name of the file in cgroupfs to write to
+ * @buf: Buffer to write to the file
+ *
+ * Write to a file in the given cgroup's directory.
+ *
+ * If successful, 0 is returned.
+ */
+int write_cgroup_file(const char *relative_path, const char *file,
+ const char *buf)
+{
+ char cgroup_path[PATH_MAX - 24];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ return __write_cgroup_file(cgroup_path, file, buf);
+}
+
+/**
+ * write_cgroup_file_parent() - Write to a cgroup file in the parent process
+ * workdir
+ * @relative_path: The cgroup path, relative to the parent process workdir
+ * @file: The name of the file in cgroupfs to write to
+ * @buf: Buffer to write to the file
+ *
+ * Write to a file in the given cgroup's directory under the parent process
+ * workdir.
+ *
+ * If successful, 0 is returned.
+ */
+int write_cgroup_file_parent(const char *relative_path, const char *file,
+ const char *buf)
+{
+ char cgroup_path[PATH_MAX - 24];
+
+ format_parent_cgroup_path(cgroup_path, relative_path);
+ return __write_cgroup_file(cgroup_path, file, buf);
+}
+
+/**
* setup_cgroup_environment() - Setup the cgroup environment
*
* After calling this function, cleanup_cgroup_environment should be called
@@ -102,6 +203,11 @@ int setup_cgroup_environment(void)
format_cgroup_path(cgroup_workdir, "");
+ if (mkdir(CGROUP_MOUNT_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir mount");
+ return 1;
+ }
+
if (unshare(CLONE_NEWNS)) {
log_err("unshare");
return 1;
@@ -116,16 +222,19 @@ int setup_cgroup_environment(void)
log_err("mount cgroup2");
return 1;
}
+ cgroup_workdir_mounted = true;
/* Cleanup existing failed runs, now that the environment is setup */
- cleanup_cgroup_environment();
+ __cleanup_cgroup_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
return 1;
}
- if (enable_all_controllers(cgroup_workdir))
+ /* Enable all available controllers to increase test coverage */
+ if (__enable_controllers(CGROUP_MOUNT_PATH, NULL) ||
+ __enable_controllers(cgroup_workdir, NULL))
return 1;
return 0;
@@ -139,8 +248,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
return 0;
}
-
-static int join_cgroup_from_top(char *cgroup_path)
+static int join_cgroup_from_top(const char *cgroup_path)
{
char cgroup_procs_path[PATH_MAX + 1];
pid_t pid = getpid();
@@ -166,7 +274,7 @@ static int join_cgroup_from_top(char *cgroup_path)
/**
* join_cgroup() - Join a cgroup
- * @path: The cgroup path, relative to the workdir, to join
+ * @relative_path: The cgroup path, relative to the workdir, to join
*
* This function expects a cgroup to already be created, relative to the cgroup
* work dir, and it joins it. For example, passing "/my-cgroup" as the path
@@ -175,19 +283,82 @@ static int join_cgroup_from_top(char *cgroup_path)
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
-int join_cgroup(const char *path)
+int join_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
- format_cgroup_path(cgroup_path, path);
+ format_cgroup_path(cgroup_path, relative_path);
return join_cgroup_from_top(cgroup_path);
}
/**
+ * join_root_cgroup() - Join the root cgroup
+ *
+ * This function joins the root cgroup.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_root_cgroup(void)
+{
+ return join_cgroup_from_top(CGROUP_MOUNT_PATH);
+}
+
+/**
+ * join_parent_cgroup() - Join a cgroup in the parent process workdir
+ * @relative_path: The cgroup path, relative to parent process workdir, to join
+ *
+ * See join_cgroup().
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_parent_cgroup(const char *relative_path)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_parent_cgroup_path(cgroup_path, relative_path);
+ return join_cgroup_from_top(cgroup_path);
+}
+
+/**
+ * set_cgroup_xattr() - Set xattr on a cgroup dir
+ * @relative_path: The cgroup path, relative to the workdir, to set xattr
+ * @name: xattr name
+ * @value: xattr value
+ *
+ * This function set xattr on cgroup dir.
+ *
+ * On success, it returns 0, otherwise on failure it returns -1.
+ */
+int set_cgroup_xattr(const char *relative_path,
+ const char *name,
+ const char *value)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ return setxattr(cgroup_path, name, value, strlen(value) + 1, 0);
+}
+
+/**
+ * __cleanup_cgroup_environment() - Delete temporary cgroups
+ *
+ * This is a helper for cleanup_cgroup_environment() that is responsible for
+ * deletion of all temporary cgroups that have been created during the test.
+ */
+static void __cleanup_cgroup_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_workdir, "");
+ join_cgroup_from_top(CGROUP_MOUNT_PATH);
+ nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
+
+/**
* cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment
*
* This is an idempotent function to delete all temporary cgroups that
- * have been created during the test, including the cgroup testing work
+ * have been created during the test and unmount the cgroup testing work
* directory.
*
* At call time, it moves the calling process to the root cgroup, and then
@@ -198,16 +369,72 @@ int join_cgroup(const char *path)
*/
void cleanup_cgroup_environment(void)
{
- char cgroup_workdir[PATH_MAX + 1];
+ __cleanup_cgroup_environment();
+ if (cgroup_workdir_mounted && umount(CGROUP_MOUNT_PATH))
+ log_err("umount cgroup2");
+ cgroup_workdir_mounted = false;
+}
- format_cgroup_path(cgroup_workdir, "");
- join_cgroup_from_top(CGROUP_MOUNT_PATH);
- nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+/**
+ * get_root_cgroup() - Get the FD of the root cgroup
+ *
+ * On success, it returns the file descriptor. On failure, it returns -1.
+ * If there is a failure, it prints the error to stderr.
+ */
+int get_root_cgroup(void)
+{
+ int fd;
+
+ fd = open(CGROUP_MOUNT_PATH, O_RDONLY);
+ if (fd < 0) {
+ log_err("Opening root cgroup");
+ return -1;
+ }
+ return fd;
+}
+
+/*
+ * remove_cgroup() - Remove a cgroup
+ * @relative_path: The cgroup path, relative to the workdir, to remove
+ *
+ * This function expects a cgroup to already be created, relative to the cgroup
+ * work dir. It also expects the cgroup doesn't have any children or live
+ * processes and it removes the cgroup.
+ *
+ * On failure, it will print an error to stderr.
+ */
+void remove_cgroup(const char *relative_path)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ if (rmdir(cgroup_path))
+ log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
+}
+
+/*
+ * remove_cgroup_pid() - Remove a cgroup setup by process identified by PID
+ * @relative_path: The cgroup path, relative to the workdir, to remove
+ * @pid: PID to be used to find cgroup_path
+ *
+ * This function expects a cgroup to already be created, relative to the cgroup
+ * work dir. It also expects the cgroup doesn't have any children or live
+ * processes and it removes the cgroup.
+ *
+ * On failure, it will print an error to stderr.
+ */
+void remove_cgroup_pid(const char *relative_path, int pid)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path_pid(cgroup_path, relative_path, pid);
+ if (rmdir(cgroup_path))
+ log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
}
/**
* create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD
- * @path: The cgroup path, relative to the workdir, to join
+ * @relative_path: The cgroup path, relative to the workdir, to join
*
* This function creates a cgroup under the top level workdir and returns the
* file descriptor. It is idempotent.
@@ -215,14 +442,14 @@ void cleanup_cgroup_environment(void)
* On success, it returns the file descriptor. On failure it returns -1.
* If there is a failure, it prints the error to stderr.
*/
-int create_and_get_cgroup(const char *path)
+int create_and_get_cgroup(const char *relative_path)
{
char cgroup_path[PATH_MAX + 1];
int fd;
- format_cgroup_path(cgroup_path, path);
+ format_cgroup_path(cgroup_path, relative_path);
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
- log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
+ log_err("mkdiring cgroup %s .. %s", relative_path, cgroup_path);
return -1;
}
@@ -236,26 +463,23 @@ int create_and_get_cgroup(const char *path)
}
/**
- * get_cgroup_id() - Get cgroup id for a particular cgroup path
- * @path: The cgroup path, relative to the workdir, to join
+ * get_cgroup_id_from_path - Get cgroup id for a particular cgroup path
+ * @cgroup_workdir: The absolute cgroup path
*
* On success, it returns the cgroup id. On failure it returns 0,
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
-unsigned long long get_cgroup_id(const char *path)
+static unsigned long long get_cgroup_id_from_path(const char *cgroup_workdir)
{
int dirfd, err, flags, mount_id, fhsize;
union {
unsigned long long cgid;
unsigned char raw_bytes[8];
} id;
- char cgroup_workdir[PATH_MAX + 1];
struct file_handle *fhp, *fhp2;
unsigned long long ret = 0;
- format_cgroup_path(cgroup_workdir, path);
-
dirfd = AT_FDCWD;
flags = 0;
fhsize = sizeof(*fhp);
@@ -291,6 +515,14 @@ free_mem:
return ret;
}
+unsigned long long get_cgroup_id(const char *relative_path)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_workdir, relative_path);
+ return get_cgroup_id_from_path(cgroup_workdir);
+}
+
int cgroup_setup_and_join(const char *path) {
int cg_fd;
@@ -313,3 +545,207 @@ int cgroup_setup_and_join(const char *path) {
}
return cg_fd;
}
+
+/**
+ * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+ *
+ * This function should only be called in a custom mount namespace, e.g.
+ * created by running setup_cgroup_environment.
+ *
+ * After calling this function, cleanup_classid_environment should be called
+ * once testing is complete.
+ *
+ * This function will print an error to stderr and return 1 if it is unable
+ * to setup the cgroup environment. If setup is successful, 0 is returned.
+ */
+int setup_classid_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+
+ if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
+ errno != EBUSY) {
+ log_err("mount cgroup base");
+ return 1;
+ }
+
+ if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup net_cls");
+ return 1;
+ }
+
+ if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls")) {
+ if (errno != EBUSY) {
+ log_err("mount cgroup net_cls");
+ return 1;
+ }
+
+ if (rmdir(NETCLS_MOUNT_PATH)) {
+ log_err("rmdir cgroup net_cls");
+ return 1;
+ }
+ if (umount(CGROUP_MOUNT_DFLT)) {
+ log_err("umount cgroup base");
+ return 1;
+ }
+ }
+
+ cleanup_classid_environment();
+
+ if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup work dir");
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * set_classid() - Set a cgroupv1 net_cls classid
+ *
+ * Writes the classid into the cgroup work dir's net_cls.classid
+ * file in order to later on trigger socket tagging.
+ *
+ * We leverage the current pid as the classid, ensuring unique identification.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1. If there
+ * is a failure, it prints the error to stderr.
+ */
+int set_classid(void)
+{
+ char cgroup_workdir[PATH_MAX - 42];
+ char cgroup_classid_path[PATH_MAX + 1];
+ int fd, rc = 0;
+
+ format_classid_path(cgroup_workdir);
+ snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
+ "%s/net_cls.classid", cgroup_workdir);
+
+ fd = open(cgroup_classid_path, O_WRONLY);
+ if (fd < 0) {
+ log_err("Opening cgroup classid: %s", cgroup_classid_path);
+ return 1;
+ }
+
+ if (dprintf(fd, "%u\n", getpid()) < 0) {
+ log_err("Setting cgroup classid");
+ rc = 1;
+ }
+
+ close(fd);
+ return rc;
+}
+
+/**
+ * join_classid() - Join a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * join it here. This causes the process sockets to be tagged with the given
+ * net_cls classid.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_classid(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ return join_cgroup_from_top(cgroup_workdir);
+}
+
+/**
+ * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
+ *
+ * At call time, it moves the calling process to the root cgroup, and then
+ * runs the deletion process.
+ *
+ * On failure, it will print an error to stderr, and try to continue.
+ */
+void cleanup_classid_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ join_cgroup_from_top(NETCLS_MOUNT_PATH);
+ nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
+
+/**
+ * get_classid_cgroup_id - Get the cgroup id of a net_cls cgroup
+ */
+unsigned long long get_classid_cgroup_id(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ return get_cgroup_id_from_path(cgroup_workdir);
+}
+
+/**
+ * get_cgroup1_hierarchy_id - Retrieves the ID of a cgroup1 hierarchy from the cgroup1 subsys name.
+ * @subsys_name: The cgroup1 subsys name, which can be retrieved from /proc/self/cgroup. It can be
+ * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-controllers like
+ * "net_cls,net_prio".
+ */
+int get_cgroup1_hierarchy_id(const char *subsys_name)
+{
+ char *c, *c2, *c3, *c4;
+ bool found = false;
+ char line[1024];
+ FILE *file;
+ int i, id;
+
+ if (!subsys_name)
+ return -1;
+
+ file = fopen("/proc/self/cgroup", "r");
+ if (!file) {
+ log_err("fopen /proc/self/cgroup");
+ return -1;
+ }
+
+ while (fgets(line, 1024, file)) {
+ i = 0;
+ for (c = strtok_r(line, ":", &c2); c && i < 2; c = strtok_r(NULL, ":", &c2)) {
+ if (i == 0) {
+ id = strtol(c, NULL, 10);
+ } else if (i == 1) {
+ if (!strcmp(c, subsys_name)) {
+ found = true;
+ break;
+ }
+
+ /* Multiple subsystems may share one single mount point */
+ for (c3 = strtok_r(c, ",", &c4); c3;
+ c3 = strtok_r(NULL, ",", &c4)) {
+ if (!strcmp(c, subsys_name)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ i++;
+ }
+ if (found)
+ break;
+ }
+ fclose(file);
+ return found ? id : -1;
+}
+
+/**
+ * open_classid() - Open a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * open it here.
+ *
+ * On success, it returns the file descriptor. On failure it returns -1.
+ */
+int open_classid(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ return open(cgroup_workdir, O_RDONLY);
+}
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 5fe3d88e4f0d..3857304be874 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CGROUP_HELPERS_H
#define __CGROUP_HELPERS_H
+
#include <errno.h>
#include <string.h>
@@ -8,12 +9,38 @@
#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
+/* cgroupv2 related */
+int enable_controllers(const char *relative_path, const char *controllers);
+int write_cgroup_file(const char *relative_path, const char *file,
+ const char *buf);
+int write_cgroup_file_parent(const char *relative_path, const char *file,
+ const char *buf);
+int cgroup_setup_and_join(const char *relative_path);
+int get_root_cgroup(void);
+int create_and_get_cgroup(const char *relative_path);
+void remove_cgroup(const char *relative_path);
+void remove_cgroup_pid(const char *relative_path, int pid);
+unsigned long long get_cgroup_id(const char *relative_path);
+int get_cgroup1_hierarchy_id(const char *subsys_name);
+
+int join_cgroup(const char *relative_path);
+int join_root_cgroup(void);
+int join_parent_cgroup(const char *relative_path);
+
+int set_cgroup_xattr(const char *relative_path,
+ const char *name,
+ const char *value);
-int cgroup_setup_and_join(const char *path);
-int create_and_get_cgroup(const char *path);
-int join_cgroup(const char *path);
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(const char *path);
-#endif
+/* cgroupv1 related */
+int set_classid(void);
+int join_classid(void);
+unsigned long long get_classid_cgroup_id(void);
+int open_classid(void);
+
+int setup_classid_environment(void);
+void cleanup_classid_environment(void);
+
+#endif /* __CGROUP_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/cgroup_tcp_skb.h b/tools/testing/selftests/bpf/cgroup_tcp_skb.h
new file mode 100644
index 000000000000..7f6b24f102fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/cgroup_tcp_skb.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+/* Define states of a socket to tracking messages sending to and from the
+ * socket.
+ *
+ * These states are based on rfc9293 with some modifications to support
+ * tracking of messages sent out from a socket. For example, when a SYN is
+ * received, a new socket is transiting to the SYN_RECV state defined in
+ * rfc9293. But, we put it in SYN_RECV_SENDING_SYN_ACK state and when
+ * SYN-ACK is sent out, it moves to SYN_RECV state. With this modification,
+ * we can track the message sent out from a socket.
+ */
+
+#ifndef __CGROUP_TCP_SKB_H__
+#define __CGROUP_TCP_SKB_H__
+
+enum {
+ INIT,
+ CLOSED,
+ SYN_SENT,
+ SYN_RECV_SENDING_SYN_ACK,
+ SYN_RECV,
+ ESTABLISHED,
+ FIN_WAIT1,
+ FIN_WAIT2,
+ CLOSE_WAIT_SENDING_ACK,
+ CLOSE_WAIT,
+ CLOSING,
+ LAST_ACK,
+ TIME_WAIT_SENDING_ACK,
+ TIME_WAIT,
+};
+
+#endif /* __CGROUP_TCP_SKB_H__ */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 37e1f303fc11..558839e3c185 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -1,46 +1,133 @@
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BPF=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_NET_CLS_BPF=m
CONFIG_BPF_EVENTS=y
-CONFIG_TEST_BPF=m
-CONFIG_CGROUP_BPF=y
-CONFIG_NETDEVSIM=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_IPIP=y
-CONFIG_IPV6=y
-CONFIG_NET_IPGRE_DEMUX=y
-CONFIG_NET_IPGRE=y
-CONFIG_IPV6_GRE=y
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_HMAC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_VXLAN=y
-CONFIG_GENEVE=y
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_LWTUNNEL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_BPF_LSM=y
CONFIG_BPF_STREAM_PARSER=y
-CONFIG_XDP_SOCKETS=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_CGROUP_BPF=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_USER_API=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_SKCIPHER=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_AES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DUMMY=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
-CONFIG_IPV6_TUNNEL=y
+CONFIG_FUNCTION_ERROR_INJECTION=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FS_VERITY=y
+CONFIG_GENEVE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMA=y
+CONFIG_IMA_READ_POLICY=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_INET_ESP=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IPV6=y
+CONFIG_IPV6_FOU=y
+CONFIG_IPV6_FOU_TUNNEL=y
CONFIG_IPV6_GRE=y
CONFIG_IPV6_SEG6_BPF=y
-CONFIG_NET_FOU=m
-CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_KEYS=y
+CONFIG_LIRC=y
+CONFIG_LIVEPATCH=y
+CONFIG_LWTUNNEL=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
CONFIG_MPLS=y
-CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_IPV6_SIT=m
-CONFIG_BPF_JIT=y
-CONFIG_BPF_LSM=y
+CONFIG_MPLS_IPTUNNEL=y
+CONFIG_MPLS_ROUTING=y
+CONFIG_MPTCP=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBMOD=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_FLOWER=y
+CONFIG_NET_CLS_MATCHALL=y
+CONFIG_NET_FOU=y
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_MPLS_GSO=y
+CONFIG_NET_SCH_BPF=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCHED=y
+CONFIG_NETDEVSIM=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NETFILTER_SYNPROXY=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETKIT=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
+CONFIG_IP6_NF_IPTABLES_LEGACY=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
+CONFIG_NF_FLOW_TABLE=y
+CONFIG_NF_FLOW_TABLE_INET=y
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NFT_FLOW_OFFLOAD=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_NF_NAT=y
+CONFIG_PACKET=y
+CONFIG_RC_CORE=y
+CONFIG_SAMPLES=y
+CONFIG_SAMPLE_LIVEPATCH=m
CONFIG_SECURITY=y
-CONFIG_LIRC=y
-CONFIG_IMA=y
CONFIG_SECURITYFS=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_READ_POLICY=y
-CONFIG_BLK_DEV_LOOP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_TEST_BPF=m
+CONFIG_UDMABUF=y
+CONFIG_USERFAULTFD=y
+CONFIG_VSOCKETS=y
+CONFIG_VXLAN=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_TCP_CONG_DCTCP=y
+CONFIG_TCP_CONG_BBR=y
+CONFIG_INFINIBAND=y
+CONFIG_SMC=y
+CONFIG_SMC_HS_CTRL_BPF=y
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64
new file mode 100644
index 000000000000..7efad36ceb26
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.aarch64
@@ -0,0 +1,154 @@
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_ATA=y
+CONFIG_AUDIT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BRIDGE=m
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_COMPAT=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_REDUCED=n
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DRM=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FB=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FUSE_FS=y
+CONFIG_FW_CFG_SYSFS_CMDLINE=y
+CONFIG_FW_CFG_SYSFS=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAILBOX=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NET_KEY=y
+CONFIG_NET_VRF=y
+CONFIG_NET=y
+CONFIG_NLMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_OVERLAY_FS=y
+CONFIG_PACKET_DIAG=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI=y
+CONFIG_PL320_MBOX=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TCG_TIS=y
+CONFIG_TCG_TPM=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TLS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.ppc64el b/tools/testing/selftests/bpf/config.ppc64el
new file mode 100644
index 000000000000..b53afb5e0b71
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.ppc64el
@@ -0,0 +1,92 @@
+CONFIG_ALTIVEC=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1"
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPUSETS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_FRAME_WARN=1280
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HVC_CONSOLE=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_NET=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NONPORTABLE=y
+CONFIG_NR_CPUS=256
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PPC64=y
+CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
+CONFIG_PPC_PSERIES=y
+CONFIG_PPC_RADIX_MMU=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SMP=y
+CONFIG_SOC_VIRT=y
+CONFIG_SYSVIPC=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_THREAD_SHIFT=14
+CONFIG_TLS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_VSX=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.riscv64 b/tools/testing/selftests/bpf/config.riscv64
new file mode 100644
index 000000000000..7bee24a79a71
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.riscv64
@@ -0,0 +1,83 @@
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CPUSETS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FRAME_POINTER=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_INET=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_NONPORTABLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_RISCV_ISA_C=y
+CONFIG_RISCV_PMU=y
+CONFIG_RISCV_PMU_SBI=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SMP=y
+CONFIG_SOC_VIRT=y
+CONFIG_SYSVIPC=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
new file mode 100644
index 000000000000..db61878148e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -0,0 +1,125 @@
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_MARCH_Z10_FEATURES=y
+CONFIG_HAVE_MARCH_Z196_FEATURES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MARCH_Z196=y
+CONFIG_MARCH_Z196_TUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_KEY=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.vm b/tools/testing/selftests/bpf/config.vm
new file mode 100644
index 000000000000..da543b24c144
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.vm
@@ -0,0 +1,15 @@
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_9P_FS=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_FUSE_FS=y
+CONFIG_FUSE_PASSTHROUGH=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_9P=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_FS=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_VSOCKETS_COMMON=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
new file mode 100644
index 000000000000..42ad817b00ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -0,0 +1,227 @@
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_VIA=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_AUDIT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BONDING=y
+CONFIG_BOOTTIME_TRACING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=7
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPUSETS=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_XXHASH=y
+CONFIG_DCB=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEFAULT_FQ_CODEL=y
+CONFIG_DEFAULT_RENO=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FAIL_FUNCTION=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_VESA=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONTS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_GART_IOMMU=y
+CONFIG_GENERIC_PHY=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KYE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPET=y
+CONFIG_HUGETLBFS=y
+CONFIG_HWPOISON_INJECT=y
+CONFIG_HZ_1000=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INTEL_POWERCLAMP=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IRQ_POLL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_KEXEC=y
+CONFIG_KPROBES=y
+CONFIG_KSM=y
+CONFIG_LEGACY_VSYSCALL_NONE=y
+CONFIG_LOG_BUF_SHIFT=21
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=0
+CONFIG_LOGO=y
+CONFIG_LSM="selinux,bpf,integrity"
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MCORE2=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETLABEL=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NO_HZ=y
+CONFIG_NR_CPUS=128
+CONFIG_NUMA=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NVMEM=y
+CONFIG_OSF_PARTITION=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SMP=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_SYNC_FILE=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_USER_NS=y
+CONFIG_VALIDATE_FS_PARSER=y
+CONFIG_VETH=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_X86_ACPI_CPUFREQ=y
+CONFIG_X86_CPUID=y
+CONFIG_X86_MSR=y
+CONFIG_X86_POWERNOW_K8=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZEROPLUS_FF=y
diff --git a/tools/testing/selftests/bpf/disasm.c b/tools/testing/selftests/bpf/disasm.c
new file mode 120000
index 000000000000..b1571927bd54
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm.c
@@ -0,0 +1 @@
+../../../../kernel/bpf/disasm.c \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/disasm.h b/tools/testing/selftests/bpf/disasm.h
new file mode 120000
index 000000000000..8054fd497340
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm.h
@@ -0,0 +1 @@
+../../../../kernel/bpf/disasm.h \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/disasm_helpers.c b/tools/testing/selftests/bpf/disasm_helpers.c
new file mode 100644
index 000000000000..f529f1c8c171
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm_helpers.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include <bpf/bpf.h>
+#include "disasm.h"
+
+struct print_insn_context {
+ char scratch[16];
+ char *buf;
+ size_t sz;
+};
+
+static void print_insn_cb(void *private_data, const char *fmt, ...)
+{
+ struct print_insn_context *ctx = private_data;
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(ctx->buf, ctx->sz, fmt, args);
+ va_end(args);
+}
+
+static const char *print_call_cb(void *private_data, const struct bpf_insn *insn)
+{
+ struct print_insn_context *ctx = private_data;
+
+ /* For pseudo calls verifier.c:jit_subprogs() hides original
+ * imm to insn->off and changes insn->imm to be an index of
+ * the subprog instead.
+ */
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ snprintf(ctx->scratch, sizeof(ctx->scratch), "%+d", insn->off);
+ return ctx->scratch;
+ }
+
+ return NULL;
+}
+
+struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz)
+{
+ struct print_insn_context ctx = {
+ .buf = buf,
+ .sz = buf_sz,
+ };
+ struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_cb,
+ .cb_call = print_call_cb,
+ .private_data = &ctx,
+ };
+ char *tmp, *pfx_end, *sfx_start;
+ bool double_insn;
+ int len;
+
+ print_bpf_insn(&cbs, insn, true);
+ /* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
+ * for each instruction (FF stands for instruction `code` byte).
+ * Remove the prefix inplace, and also simplify call instructions.
+ * E.g.: "(85) call foo#10" -> "call foo".
+ * Also remove newline in the end (the 'max(strlen(buf) - 1, 0)' thing).
+ */
+ pfx_end = buf + 5;
+ sfx_start = buf + max((int)strlen(buf) - 1, 0);
+ if (strncmp(pfx_end, "call ", 5) == 0 && (tmp = strrchr(buf, '#')))
+ sfx_start = tmp;
+ len = sfx_start - pfx_end;
+ memmove(buf, pfx_end, len);
+ buf[len] = 0;
+ double_insn = insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+ return insn + (double_insn ? 2 : 1);
+}
diff --git a/tools/testing/selftests/bpf/disasm_helpers.h b/tools/testing/selftests/bpf/disasm_helpers.h
new file mode 100644
index 000000000000..7b26cab70099
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm_helpers.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+#ifndef __DISASM_HELPERS_H
+#define __DISASM_HELPERS_H
+
+#include <stdlib.h>
+
+struct bpf_insn;
+
+struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz);
+
+#endif /* __DISASM_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 3fd83b9dc1bf..c8be6406777f 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -11,13 +11,12 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
bool cfg_attach = true;
-char *cfg_section_name;
+char *cfg_prog_name;
char *cfg_path_name;
static void load_and_attach_program(void)
@@ -25,7 +24,10 @@ static void load_and_attach_program(void)
int prog_fd, ret;
struct bpf_object *obj;
- ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+ ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name,
cfg_map_name, NULL, &prog_fd, NULL);
if (ret)
error(1, 0, "bpf_flow_load %s", cfg_path_name);
@@ -75,15 +77,15 @@ static void parse_opts(int argc, char **argv)
break;
case 'p':
if (cfg_path_name)
- error(1, 0, "only one prog name can be given");
+ error(1, 0, "only one path can be given");
cfg_path_name = optarg;
break;
case 's':
- if (cfg_section_name)
- error(1, 0, "only one section can be given");
+ if (cfg_prog_name)
+ error(1, 0, "only one prog can be given");
- cfg_section_name = optarg;
+ cfg_prog_name = optarg;
break;
}
}
@@ -94,7 +96,7 @@ static void parse_opts(int argc, char **argv)
if (cfg_attach && !cfg_path_name)
error(1, 0, "must provide a path to the BPF program");
- if (cfg_attach && !cfg_section_name)
+ if (cfg_attach && !cfg_prog_name)
error(1, 0, "must provide a section name");
}
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index 7290401ec172..f40b585f4e7e 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -4,10 +4,11 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "testing_helpers.h"
static inline int bpf_flow_load(struct bpf_object **obj,
const char *path,
- const char *section_name,
+ const char *prog_name,
const char *map_name,
const char *keys_map_name,
int *prog_fd,
@@ -18,18 +19,12 @@ static inline int bpf_flow_load(struct bpf_object **obj,
int prog_array_fd;
int ret, fd, i;
- ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ ret = bpf_prog_test_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
prog_fd);
if (ret)
return ret;
- main_prog = NULL;
- bpf_object__for_each_program(prog, *obj) {
- if (strcmp(section_name, bpf_program__section_name(prog)) == 0) {
- main_prog = prog;
- break;
- }
- }
+ main_prog = bpf_object__find_program_by_name(*obj, prog_name);
if (!main_prog)
return -1;
diff --git a/tools/testing/selftests/bpf/generate_udp_fragments.py b/tools/testing/selftests/bpf/generate_udp_fragments.py
new file mode 100755
index 000000000000..2b8a1187991c
--- /dev/null
+++ b/tools/testing/selftests/bpf/generate_udp_fragments.py
@@ -0,0 +1,90 @@
+#!/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+This script helps generate fragmented UDP packets.
+
+While it is technically possible to dynamically generate
+fragmented packets in C, it is much harder to read and write
+said code. `scapy` is relatively industry standard and really
+easy to read / write.
+
+So we choose to write this script that generates a valid C
+header. Rerun script and commit generated file after any
+modifications.
+"""
+
+import argparse
+import os
+
+from scapy.all import *
+
+
+# These constants must stay in sync with `ip_check_defrag.c`
+VETH1_ADDR = "172.16.1.200"
+VETH0_ADDR6 = "fc00::100"
+VETH1_ADDR6 = "fc00::200"
+CLIENT_PORT = 48878
+SERVER_PORT = 48879
+MAGIC_MESSAGE = "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME"
+
+
+def print_header(f):
+ f.write("// SPDX-License-Identifier: GPL-2.0\n")
+ f.write("/* DO NOT EDIT -- this file is generated */\n")
+ f.write("\n")
+ f.write("#ifndef _IP_CHECK_DEFRAG_FRAGS_H\n")
+ f.write("#define _IP_CHECK_DEFRAG_FRAGS_H\n")
+ f.write("\n")
+ f.write("#include <stdint.h>\n")
+ f.write("\n")
+
+
+def print_frags(f, frags, v6):
+ for idx, frag in enumerate(frags):
+ # 10 bytes per line to keep width in check
+ chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)]
+ chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks]
+ suffix = "6" if v6 else ""
+
+ f.write(f"static uint8_t frag{suffix}_{idx}[] = {{\n")
+ for chunk in chunks_fmted:
+ f.write(f"\t{chunk},\n")
+ f.write(f"}};\n")
+
+
+def print_trailer(f):
+ f.write("\n")
+ f.write("#endif /* _IP_CHECK_DEFRAG_FRAGS_H */\n")
+
+
+def main(f):
+ # srcip of 0 is filled in by IP_HDRINCL
+ sip = "0.0.0.0"
+ sip6 = VETH0_ADDR6
+ dip = VETH1_ADDR
+ dip6 = VETH1_ADDR6
+ sport = CLIENT_PORT
+ dport = SERVER_PORT
+ payload = MAGIC_MESSAGE.encode()
+
+ # Disable UDPv4 checksums to keep code simpler
+ pkt = IP(src=sip,dst=dip) / UDP(sport=sport,dport=dport,chksum=0) / Raw(load=payload)
+ # UDPv6 requires a checksum
+ # Also pin the ipv6 fragment header ID, otherwise it's a random value
+ pkt6 = IPv6(src=sip6,dst=dip6) / IPv6ExtHdrFragment(id=0xBEEF) / UDP(sport=sport,dport=dport) / Raw(load=payload)
+
+ frags = [f.build() for f in pkt.fragment(24)]
+ frags6 = [f.build() for f in fragment6(pkt6, 72)]
+
+ print_header(f)
+ print_frags(f, frags, False)
+ print_frags(f, frags6, True)
+ print_trailer(f)
+
+
+if __name__ == "__main__":
+ dir = os.path.dirname(os.path.realpath(__file__))
+ header = f"{dir}/ip_check_defrag_frags.h"
+ with open(header, "w") as f:
+ main(f)
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
deleted file mode 100644
index b8d6aef99db4..000000000000
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <syscall.h>
-#include <unistd.h>
-#include <linux/perf_event.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "cgroup_helpers.h"
-#include "bpf_rlimit.h"
-
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
- } else { \
- printf("%s:PASS:%s\n", __func__, tag); \
- } \
- __ret; \
-})
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
- const char *name)
-{
- struct bpf_map *map;
-
- map = bpf_object__find_map_by_name(obj, name);
- if (!map)
- return -1;
- return bpf_map__fd(map);
-}
-
-#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
-
-int main(int argc, char **argv)
-{
- const char *probe_name = "syscalls/sys_enter_nanosleep";
- const char *file = "get_cgroup_id_kern.o";
- int err, bytes, efd, prog_fd, pmu_fd;
- int cgroup_fd, cgidmap_fd, pidmap_fd;
- struct perf_event_attr attr = {};
- struct bpf_object *obj;
- __u64 kcgid = 0, ucgid;
- __u32 key = 0, pid;
- int exit_code = 1;
- char buf[256];
-
- cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
- if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
- return 1;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
- goto cleanup_cgroup_env;
-
- cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
- if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- cgidmap_fd, errno))
- goto close_prog;
-
- pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
- if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- pidmap_fd, errno))
- goto close_prog;
-
- pid = getpid();
- bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
- "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
-
- /* attach to this pid so the all bpf invocations will be in the
- * cgroup associated with this pid.
- */
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
- errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- /* trigger some syscalls */
- sleep(1);
-
- err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
- if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
- goto close_pmu;
-
- ucgid = get_cgroup_id(TEST_CGROUP);
- if (CHECK(kcgid != ucgid, "compare_cgroup_id",
- "kern cgid %llx user cgid %llx", kcgid, ucgid))
- goto close_pmu;
-
- exit_code = 0;
- printf("%s:PASS\n", argv[0]);
-
-close_pmu:
- close(pmu_fd);
-close_prog:
- bpf_object__close(obj);
-cleanup_cgroup_env:
- cleanup_cgroup_environment();
- return exit_code;
-}
diff --git a/tools/testing/selftests/bpf/gnu/stubs.h b/tools/testing/selftests/bpf/gnu/stubs.h
index 719225b16626..1c638d9dce1a 100644
--- a/tools/testing/selftests/bpf/gnu/stubs.h
+++ b/tools/testing/selftests/bpf/gnu/stubs.h
@@ -1 +1 @@
-/* dummy .h to trick /usr/include/features.h to work with 'clang -target bpf' */
+/* dummy .h to trick /usr/include/features.h to work with 'clang --target=bpf' */
diff --git a/tools/testing/selftests/bpf/ima_setup.sh b/tools/testing/selftests/bpf/ima_setup.sh
index 8e62581113a3..8ecead4ccad0 100755
--- a/tools/testing/selftests/bpf/ima_setup.sh
+++ b/tools/testing/selftests/bpf/ima_setup.sh
@@ -12,7 +12,7 @@ LOG_FILE="$(mktemp /tmp/ima_setup.XXXX.log)"
usage()
{
- echo "Usage: $0 <setup|cleanup|run> <existing_tmp_dir>"
+ echo "Usage: $0 <setup|cleanup|run|modify-bin|restore-bin|load-policy> <existing_tmp_dir>"
exit 1
}
@@ -51,6 +51,7 @@ setup()
ensure_mount_securityfs
echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${IMA_POLICY_FILE}
+ echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${mount_dir}/policy_test
}
cleanup() {
@@ -77,6 +78,32 @@ run()
exec "${copied_bin_path}"
}
+modify_bin()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+ local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
+
+ echo "mod" >> "${copied_bin_path}"
+}
+
+restore_bin()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+ local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
+
+ truncate -s -4 "${copied_bin_path}"
+}
+
+load_policy()
+{
+ local tmp_dir="$1"
+ local mount_dir="${tmp_dir}/mnt"
+
+ echo ${mount_dir}/policy_test > ${IMA_POLICY_FILE} 2> /dev/null
+}
+
catch()
{
local exit_code="$1"
@@ -105,6 +132,12 @@ main()
cleanup "${tmp_dir}"
elif [[ "${action}" == "run" ]]; then
run "${tmp_dir}"
+ elif [[ "${action}" == "modify-bin" ]]; then
+ modify_bin "${tmp_dir}"
+ elif [[ "${action}" == "restore-bin" ]]; then
+ restore_bin "${tmp_dir}"
+ elif [[ "${action}" == "load-policy" ]]; then
+ load_policy "${tmp_dir}"
else
echo "Unknown action: ${action}"
exit 1
diff --git a/tools/testing/selftests/bpf/io_helpers.c b/tools/testing/selftests/bpf/io_helpers.c
new file mode 100644
index 000000000000..4ada0a74aa1f
--- /dev/null
+++ b/tools/testing/selftests/bpf/io_helpers.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/select.h>
+#include <unistd.h>
+#include <errno.h>
+
+int read_with_timeout(int fd, char *buf, size_t count, long usec)
+{
+ const long M = 1000 * 1000;
+ struct timeval tv = { usec / M, usec % M };
+ fd_set fds;
+ int err;
+
+ FD_ZERO(&fds);
+ FD_SET(fd, &fds);
+ err = select(fd + 1, &fds, NULL, NULL, &tv);
+ if (err < 0)
+ return err;
+ if (FD_ISSET(fd, &fds))
+ return read(fd, buf, count);
+ return -EAGAIN;
+}
diff --git a/tools/testing/selftests/bpf/io_helpers.h b/tools/testing/selftests/bpf/io_helpers.h
new file mode 100644
index 000000000000..21e1134cd3ce
--- /dev/null
+++ b/tools/testing/selftests/bpf/io_helpers.h
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
+
+/* As a regular read(2), but allows to specify a timeout in micro-seconds.
+ * Returns -EAGAIN on timeout.
+ */
+int read_with_timeout(int fd, char *buf, size_t count, long usec);
diff --git a/tools/testing/selftests/bpf/ip_check_defrag_frags.h b/tools/testing/selftests/bpf/ip_check_defrag_frags.h
new file mode 100644
index 000000000000..70ab7e9fa22b
--- /dev/null
+++ b/tools/testing/selftests/bpf/ip_check_defrag_frags.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DO NOT EDIT -- this file is generated */
+
+#ifndef _IP_CHECK_DEFRAG_FRAGS_H
+#define _IP_CHECK_DEFRAG_FRAGS_H
+
+#include <stdint.h>
+
+static uint8_t frag_0[] = {
+ 0x45, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x20, 0x0, 0x40, 0x11,
+ 0xac, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8,
+ 0xbe, 0xee, 0xbe, 0xef, 0x0, 0x3a, 0x0, 0x0, 0x54, 0x48,
+ 0x49, 0x53, 0x20, 0x49, 0x53, 0x20, 0x54, 0x48, 0x45, 0x20,
+ 0x4f, 0x52, 0x49, 0x47,
+};
+static uint8_t frag_1[] = {
+ 0x45, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x20, 0x3, 0x40, 0x11,
+ 0xac, 0xe5, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8,
+ 0x49, 0x4e, 0x41, 0x4c, 0x20, 0x4d, 0x45, 0x53, 0x53, 0x41,
+ 0x47, 0x45, 0x2c, 0x20, 0x50, 0x4c, 0x45, 0x41, 0x53, 0x45,
+ 0x20, 0x52, 0x45, 0x41,
+};
+static uint8_t frag_2[] = {
+ 0x45, 0x0, 0x0, 0x1e, 0x0, 0x1, 0x0, 0x6, 0x40, 0x11,
+ 0xcc, 0xf0, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8,
+ 0x53, 0x53, 0x45, 0x4d, 0x42, 0x4c, 0x45, 0x20, 0x4d, 0x45,
+};
+static uint8_t frag6_0[] = {
+ 0x60, 0x0, 0x0, 0x0, 0x0, 0x20, 0x2c, 0x40, 0xfc, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0,
+ 0x11, 0x0, 0x0, 0x1, 0x0, 0x0, 0xbe, 0xef, 0xbe, 0xee,
+ 0xbe, 0xef, 0x0, 0x3a, 0xd0, 0xf8, 0x54, 0x48, 0x49, 0x53,
+ 0x20, 0x49, 0x53, 0x20, 0x54, 0x48, 0x45, 0x20, 0x4f, 0x52,
+ 0x49, 0x47,
+};
+static uint8_t frag6_1[] = {
+ 0x60, 0x0, 0x0, 0x0, 0x0, 0x20, 0x2c, 0x40, 0xfc, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0,
+ 0x11, 0x0, 0x0, 0x19, 0x0, 0x0, 0xbe, 0xef, 0x49, 0x4e,
+ 0x41, 0x4c, 0x20, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x2c, 0x20, 0x50, 0x4c, 0x45, 0x41, 0x53, 0x45, 0x20, 0x52,
+ 0x45, 0x41,
+};
+static uint8_t frag6_2[] = {
+ 0x60, 0x0, 0x0, 0x0, 0x0, 0x12, 0x2c, 0x40, 0xfc, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0,
+ 0x11, 0x0, 0x0, 0x30, 0x0, 0x0, 0xbe, 0xef, 0x53, 0x53,
+ 0x45, 0x4d, 0x42, 0x4c, 0x45, 0x20, 0x4d, 0x45,
+};
+
+#endif /* _IP_CHECK_DEFRAG_FRAGS_H */
diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.c b/tools/testing/selftests/bpf/jit_disasm_helpers.c
new file mode 100644
index 000000000000..febd6b12e372
--- /dev/null
+++ b/tools/testing/selftests/bpf/jit_disasm_helpers.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <test_progs.h>
+
+#ifdef HAVE_LLVM_SUPPORT
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/TargetMachine.h>
+
+/* The intent is to use get_jited_program_text() for small test
+ * programs written in BPF assembly, thus assume that 32 local labels
+ * would be sufficient.
+ */
+#define MAX_LOCAL_LABELS 32
+
+/* Local labels are encoded as 'L42', this requires 4 bytes of storage:
+ * 3 characters + zero byte
+ */
+#define LOCAL_LABEL_LEN 4
+
+static bool llvm_initialized;
+
+struct local_labels {
+ bool print_phase;
+ __u32 prog_len;
+ __u32 cnt;
+ __u32 pcs[MAX_LOCAL_LABELS];
+ char names[MAX_LOCAL_LABELS][LOCAL_LABEL_LEN];
+};
+
+static const char *lookup_symbol(void *data, uint64_t ref_value, uint64_t *ref_type,
+ uint64_t ref_pc, const char **ref_name)
+{
+ struct local_labels *labels = data;
+ uint64_t type = *ref_type;
+ int i;
+
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ *ref_name = NULL;
+ if (type != LLVMDisassembler_ReferenceType_In_Branch)
+ return NULL;
+ /* Depending on labels->print_phase either discover local labels or
+ * return a name assigned with local jump target:
+ * - if print_phase is true and ref_value is in labels->pcs,
+ * return corresponding labels->name.
+ * - if print_phase is false, save program-local jump targets
+ * in labels->pcs;
+ */
+ if (labels->print_phase) {
+ for (i = 0; i < labels->cnt; ++i)
+ if (labels->pcs[i] == ref_value)
+ return labels->names[i];
+ } else {
+ if (labels->cnt < MAX_LOCAL_LABELS && ref_value < labels->prog_len)
+ labels->pcs[labels->cnt++] = ref_value;
+ }
+ return NULL;
+}
+
+static int disasm_insn(LLVMDisasmContextRef ctx, uint8_t *image, __u32 len, __u32 pc,
+ char *buf, __u32 buf_sz)
+{
+ int i, cnt;
+
+ cnt = LLVMDisasmInstruction(ctx, image + pc, len - pc, pc,
+ buf, buf_sz);
+ if (cnt > 0)
+ return cnt;
+ PRINT_FAIL("Can't disasm instruction at offset %d:", pc);
+ for (i = 0; i < 16 && pc + i < len; ++i)
+ printf(" %02x", image[pc + i]);
+ printf("\n");
+ return -EINVAL;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ __u32 a = *(__u32 *)_a;
+ __u32 b = *(__u32 *)_b;
+
+ if (a < b)
+ return -1;
+ if (a > b)
+ return 1;
+ return 0;
+}
+
+static int disasm_one_func(FILE *text_out, uint8_t *image, __u32 len)
+{
+ char *label, *colon, *triple = NULL;
+ LLVMDisasmContextRef ctx = NULL;
+ struct local_labels labels = {};
+ __u32 *label_pc, pc;
+ int i, cnt, err = 0;
+ char buf[64];
+
+ triple = LLVMGetDefaultTargetTriple();
+ ctx = LLVMCreateDisasm(triple, &labels, 0, NULL, lookup_symbol);
+ if (!ASSERT_OK_PTR(ctx, "LLVMCreateDisasm")) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ cnt = LLVMSetDisasmOptions(ctx, LLVMDisassembler_Option_PrintImmHex);
+ if (!ASSERT_EQ(cnt, 1, "LLVMSetDisasmOptions")) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* discover labels */
+ labels.prog_len = len;
+ pc = 0;
+ while (pc < len) {
+ cnt = disasm_insn(ctx, image, len, pc, buf, 1);
+ if (cnt < 0) {
+ err = cnt;
+ goto out;
+ }
+ pc += cnt;
+ }
+ qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
+ for (i = 0; i < labels.cnt; ++i)
+ /* gcc is unable to infer upper bound for labels.cnt and assumes
+ * it to be U32_MAX. U32_MAX takes 10 decimal digits.
+ * snprintf below prints into labels.names[*],
+ * which has space only for two digits and a letter.
+ * To avoid truncation warning use (i % MAX_LOCAL_LABELS),
+ * which informs gcc about printed value upper bound.
+ */
+ snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS);
+
+ /* now print with labels */
+ labels.print_phase = true;
+ pc = 0;
+ while (pc < len) {
+ cnt = disasm_insn(ctx, image, len, pc, buf, sizeof(buf));
+ if (cnt < 0) {
+ err = cnt;
+ goto out;
+ }
+ label_pc = bsearch(&pc, labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
+ label = "";
+ colon = "";
+ if (label_pc) {
+ label = labels.names[label_pc - labels.pcs];
+ colon = ":";
+ }
+ fprintf(text_out, "%x:\t", pc);
+ for (i = 0; i < cnt; ++i)
+ fprintf(text_out, "%02x ", image[pc + i]);
+ for (i = cnt * 3; i < 12 * 3; ++i)
+ fputc(' ', text_out);
+ fprintf(text_out, "%s%s%s\n", label, colon, buf);
+ pc += cnt;
+ }
+
+out:
+ if (triple)
+ LLVMDisposeMessage(triple);
+ if (ctx)
+ LLVMDisasmDispose(ctx);
+ return err;
+}
+
+int get_jited_program_text(int fd, char *text, size_t text_sz)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 jited_funcs, len, pc;
+ __u32 *func_lens = NULL;
+ FILE *text_out = NULL;
+ uint8_t *image = NULL;
+ int i, err = 0;
+
+ if (!llvm_initialized) {
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ llvm_initialized = 1;
+ }
+
+ text_out = fmemopen(text, text_sz, "w");
+ if (!ASSERT_OK_PTR(text_out, "open_memstream")) {
+ err = -errno;
+ goto out;
+ }
+
+ /* first call is to find out jited program len */
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #1"))
+ goto out;
+
+ len = info.jited_prog_len;
+ image = malloc(len);
+ if (!ASSERT_OK_PTR(image, "malloc(info.jited_prog_len)")) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ jited_funcs = info.nr_jited_func_lens;
+ func_lens = malloc(jited_funcs * sizeof(__u32));
+ if (!ASSERT_OK_PTR(func_lens, "malloc(info.nr_jited_func_lens)")) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.jited_prog_insns = (__u64)image;
+ info.jited_prog_len = len;
+ info.jited_func_lens = (__u64)func_lens;
+ info.nr_jited_func_lens = jited_funcs;
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #2"))
+ goto out;
+
+ for (pc = 0, i = 0; i < jited_funcs; ++i) {
+ fprintf(text_out, "func #%d:\n", i);
+ disasm_one_func(text_out, image + pc, func_lens[i]);
+ fprintf(text_out, "\n");
+ pc += func_lens[i];
+ }
+
+out:
+ if (text_out)
+ fclose(text_out);
+ if (image)
+ free(image);
+ if (func_lens)
+ free(func_lens);
+ return err;
+}
+
+#else /* HAVE_LLVM_SUPPORT */
+
+int get_jited_program_text(int fd, char *text, size_t text_sz)
+{
+ if (env.verbosity >= VERBOSE_VERY)
+ printf("compiled w/o llvm development libraries, can't dis-assembly binary code");
+ return -EOPNOTSUPP;
+}
+
+#endif /* HAVE_LLVM_SUPPORT */
diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.h b/tools/testing/selftests/bpf/jit_disasm_helpers.h
new file mode 100644
index 000000000000..e6924fd65ecf
--- /dev/null
+++ b/tools/testing/selftests/bpf/jit_disasm_helpers.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+#ifndef __JIT_DISASM_HELPERS_H
+#define __JIT_DISASM_HELPERS_H
+
+#include <stddef.h>
+
+int get_jited_program_text(int fd, char *text, size_t text_sz);
+
+#endif /* __JIT_DISASM_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/json_writer.c b/tools/testing/selftests/bpf/json_writer.c
new file mode 120000
index 000000000000..5effa31e2f39
--- /dev/null
+++ b/tools/testing/selftests/bpf/json_writer.c
@@ -0,0 +1 @@
+../../../bpf/bpftool/json_writer.c \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/json_writer.h b/tools/testing/selftests/bpf/json_writer.h
new file mode 120000
index 000000000000..e0a264c26752
--- /dev/null
+++ b/tools/testing/selftests/bpf/json_writer.h
@@ -0,0 +1 @@
+../../../bpf/bpftool/json_writer.h \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/liburandom_read.map b/tools/testing/selftests/bpf/liburandom_read.map
new file mode 100644
index 000000000000..38a97a419a04
--- /dev/null
+++ b/tools/testing/selftests/bpf/liburandom_read.map
@@ -0,0 +1,15 @@
+LIBURANDOM_READ_1.0.0 {
+ global:
+ urandlib_api;
+ urandlib_api_sameoffset;
+ urandlib_read_without_sema;
+ urandlib_read_with_sema;
+ urandlib_read_with_sema_semaphore;
+ local:
+ *;
+};
+
+LIBURANDOM_READ_2.0.0 {
+ global:
+ urandlib_api;
+} LIBURANDOM_READ_1.0.0;
diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
index f0a64d8ac59a..b595556315bc 100644
--- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
@@ -3,16 +3,20 @@
#include <stdio.h>
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_maps.h>
+static int nr_cpus;
+
static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
- int *values)
+ __s64 *values, bool is_pcpu)
{
- int i, err;
+ int i, j, err;
+ int cpu_offset = 0;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
@@ -20,22 +24,41 @@ static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
for (i = 0; i < max_entries; i++) {
keys[i] = i;
- values[i] = i + 1;
+ if (is_pcpu) {
+ cpu_offset = i * nr_cpus;
+ for (j = 0; j < nr_cpus; j++)
+ (values + cpu_offset)[j] = i + 1 + j;
+ } else {
+ values[i] = i + 1;
+ }
}
err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
}
-static void map_batch_verify(int *visited, __u32 max_entries,
- int *keys, int *values)
+static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
+ __s64 *values, bool is_pcpu)
{
- int i;
+ int i, j;
+ int cpu_offset = 0;
memset(visited, 0, max_entries * sizeof(*visited));
for (i = 0; i < max_entries; i++) {
- CHECK(keys[i] + 1 != values[i], "key/value checking",
- "error: i %d key %d value %d\n", i, keys[i], values[i]);
+ if (is_pcpu) {
+ cpu_offset = i * nr_cpus;
+ for (j = 0; j < nr_cpus; j++) {
+ __s64 value = (values + cpu_offset)[j];
+ CHECK(keys[i] + j + 1 != value,
+ "key/value checking",
+ "error: i %d j %d key %d value %lld\n", i,
+ j, keys[i], value);
+ }
+ } else {
+ CHECK(keys[i] + 1 != values[i], "key/value checking",
+ "error: i %d key %d value %lld\n", i, keys[i],
+ values[i]);
+ }
visited[i] = 1;
}
for (i = 0; i < max_entries; i++) {
@@ -44,59 +67,53 @@ static void map_batch_verify(int *visited, __u32 max_entries,
}
}
-void test_array_map_batch_ops(void)
+static void __test_map_lookup_and_update_batch(bool is_pcpu)
{
- struct bpf_create_map_attr xattr = {
- .name = "array_map",
- .map_type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- };
- int map_fd, *keys, *values, *visited;
+ int map_fd, *keys, *visited;
__u32 count, total, total_success;
const __u32 max_entries = 10;
- bool nospace_err;
__u64 batch = 0;
- int err, step;
+ int err, step, value_size;
+ void *values;
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
- xattr.max_entries = max_entries;
- map_fd = bpf_create_map_xattr(&xattr);
+ map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY,
+ "array_map", sizeof(int), sizeof(__s64), max_entries, NULL);
CHECK(map_fd == -1,
- "bpf_create_map_xattr()", "error:%s\n", strerror(errno));
+ "bpf_map_create()", "error:%s\n", strerror(errno));
+
+ value_size = sizeof(__s64);
+ if (is_pcpu)
+ value_size *= nr_cpus;
- keys = malloc(max_entries * sizeof(int));
- values = malloc(max_entries * sizeof(int));
- visited = malloc(max_entries * sizeof(int));
+ keys = calloc(max_entries, sizeof(*keys));
+ values = calloc(max_entries, value_size);
+ visited = calloc(max_entries, sizeof(*visited));
CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
strerror(errno));
- /* populate elements to the map */
- map_batch_update(map_fd, max_entries, keys, values);
-
/* test 1: lookup in a loop with various steps. */
total_success = 0;
for (step = 1; step < max_entries; step++) {
- map_batch_update(map_fd, max_entries, keys, values);
- map_batch_verify(visited, max_entries, keys, values);
+ map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
+ map_batch_verify(visited, max_entries, keys, values, is_pcpu);
memset(keys, 0, max_entries * sizeof(*keys));
- memset(values, 0, max_entries * sizeof(*values));
+ memset(values, 0, max_entries * value_size);
batch = 0;
total = 0;
/* iteratively lookup/delete elements with 'step'
* elements each.
*/
count = step;
- nospace_err = false;
while (true) {
err = bpf_map_lookup_batch(map_fd,
- total ? &batch : NULL, &batch,
- keys + total,
- values + total,
- &count, &opts);
+ total ? &batch : NULL,
+ &batch, keys + total,
+ values + total * value_size,
+ &count, &opts);
CHECK((err && errno != ENOENT), "lookup with steps",
"error: %s\n", strerror(errno));
@@ -107,13 +124,10 @@ void test_array_map_batch_ops(void)
}
- if (nospace_err == true)
- continue;
-
CHECK(total != max_entries, "lookup with steps",
"total = %u, max_entries = %u\n", total, max_entries);
- map_batch_verify(visited, max_entries, keys, values);
+ map_batch_verify(visited, max_entries, keys, values, is_pcpu);
total_success++;
}
@@ -121,9 +135,31 @@ void test_array_map_batch_ops(void)
CHECK(total_success == 0, "check total_success",
"unexpected failure\n");
- printf("%s:PASS\n", __func__);
-
free(keys);
free(values);
free(visited);
+ close(map_fd);
+}
+
+static void array_map_batch_ops(void)
+{
+ __test_map_lookup_and_update_batch(false);
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void array_percpu_map_batch_ops(void)
+{
+ __test_map_lookup_and_update_batch(true);
+ printf("test_%s:PASS\n", __func__);
+}
+
+void test_array_map_batch_ops(void)
+{
+ nr_cpus = libbpf_num_possible_cpus();
+
+ CHECK(nr_cpus < 0, "nr_cpus checking",
+ "error: get possible cpus failed");
+
+ array_map_batch_ops();
+ array_percpu_map_batch_ops();
}
diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
index 976bf415fbdd..5da493b94ae2 100644
--- a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
@@ -83,22 +84,15 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu)
int err, step, value_size;
bool nospace_err;
void *values;
- struct bpf_create_map_attr xattr = {
- .name = "hash_map",
- .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH :
- BPF_MAP_TYPE_HASH,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- };
DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
.elem_flags = 0,
.flags = 0,
);
- xattr.max_entries = max_entries;
- map_fd = bpf_create_map_xattr(&xattr);
+ map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH,
+ "hash_map", sizeof(int), sizeof(int), max_entries, NULL);
CHECK(map_fd == -1,
- "bpf_create_map_xattr()", "error:%s\n", strerror(errno));
+ "bpf_map_create()", "error:%s\n", strerror(errno));
value_size = is_pcpu ? sizeof(value) : sizeof(int);
keys = malloc(max_entries * sizeof(int));
@@ -203,7 +197,7 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu)
CHECK(total != max_entries, "delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
- /* check map is empty, errono == ENOENT */
+ /* check map is empty, errno == ENOENT */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
"error: %s\n", strerror(errno));
@@ -262,6 +256,7 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu)
free(visited);
if (!is_pcpu)
free(values);
+ close(map_fd);
}
void htab_map_batch_ops(void)
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
index 006be3963977..d32e4edac930 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
@@ -20,13 +20,14 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
+#include <endian.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <bpf/bpf.h>
+#include <test_maps.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
@@ -34,6 +35,22 @@ struct tlpm_node {
uint8_t key[];
};
+struct lpm_trie_bytes_key {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ unsigned char data[8];
+};
+
+struct lpm_trie_int_key {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ unsigned int data;
+};
+
static struct tlpm_node *tlpm_match(struct tlpm_node *list,
const uint8_t *key,
size_t n_bits);
@@ -208,9 +225,11 @@ static void test_lpm_order(void)
static void test_lpm_map(int keysize)
{
- size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups;
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+ volatile size_t n_matches, n_matches_after_delete;
+ size_t i, j, n_nodes, n_lookups;
struct tlpm_node *t, *list = NULL;
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
uint8_t *data, *value;
int r, map;
@@ -222,7 +241,7 @@ static void test_lpm_map(int keysize)
n_matches = 0;
n_matches_after_delete = 0;
n_nodes = 1 << 8;
- n_lookups = 1 << 16;
+ n_lookups = 1 << 9;
data = alloca(keysize);
memset(data, 0, keysize);
@@ -233,11 +252,11 @@ static void test_lpm_map(int keysize)
key = alloca(sizeof(*key) + keysize);
memset(key, 0, sizeof(*key) + keysize);
- map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
sizeof(*key) + keysize,
keysize + 1,
4096,
- BPF_F_NO_PREALLOC);
+ &opts);
assert(map >= 0);
for (i = 0; i < n_nodes; ++i) {
@@ -329,8 +348,9 @@ static void test_lpm_map(int keysize)
static void test_lpm_ipaddr(void)
{
- struct bpf_lpm_trie_key *key_ipv4;
- struct bpf_lpm_trie_key *key_ipv6;
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+ struct bpf_lpm_trie_key_u8 *key_ipv4;
+ struct bpf_lpm_trie_key_u8 *key_ipv6;
size_t key_size_ipv4;
size_t key_size_ipv6;
int map_fd_ipv4;
@@ -342,14 +362,14 @@ static void test_lpm_ipaddr(void)
key_ipv4 = alloca(key_size_ipv4);
key_ipv6 = alloca(key_size_ipv6);
- map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size_ipv4, sizeof(value),
- 100, BPF_F_NO_PREALLOC);
+ 100, &opts);
assert(map_fd_ipv4 >= 0);
- map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size_ipv6, sizeof(value),
- 100, BPF_F_NO_PREALLOC);
+ 100, &opts);
assert(map_fd_ipv6 >= 0);
/* Fill data some IPv4 and IPv6 address ranges */
@@ -406,16 +426,13 @@ static void test_lpm_ipaddr(void)
/* Test some lookups that should not match any entry */
inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
- assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT);
close(map_fd_ipv4);
close(map_fd_ipv6);
@@ -423,7 +440,8 @@ static void test_lpm_ipaddr(void)
static void test_lpm_delete(void)
{
- struct bpf_lpm_trie_key *key;
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+ struct bpf_lpm_trie_key_u8 *key;
size_t key_size;
int map_fd;
__u64 value;
@@ -431,9 +449,9 @@ static void test_lpm_delete(void)
key_size = sizeof(*key) + sizeof(__u32);
key = alloca(key_size);
- map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL,
key_size, sizeof(value),
- 100, BPF_F_NO_PREALLOC);
+ 100, &opts);
assert(map_fd >= 0);
/* Add nodes:
@@ -471,18 +489,15 @@ static void test_lpm_delete(void)
/* remove non-existent node */
key->prefixlen = 32;
inet_pton(AF_INET, "10.0.0.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
key->prefixlen = 30; // unused prefix so far
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
key->prefixlen = 16; // same prefix as the root node
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
/* assert initial lookup */
key->prefixlen = 32;
@@ -527,15 +542,15 @@ static void test_lpm_delete(void)
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
close(map_fd);
}
static void test_lpm_get_next_key(void)
{
- struct bpf_lpm_trie_key *key_p, *next_key_p;
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+ struct bpf_lpm_trie_key_u8 *key_p, *next_key_p;
size_t key_size;
__u32 value = 0;
int map_fd;
@@ -544,13 +559,11 @@ static void test_lpm_get_next_key(void)
key_p = alloca(key_size);
next_key_p = alloca(key_size);
- map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value),
- 100, BPF_F_NO_PREALLOC);
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts);
assert(map_fd >= 0);
/* empty tree. get_next_key should return ENOENT */
- assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT);
/* get and verify the first key, get the second one should fail. */
key_p->prefixlen = 16;
@@ -562,8 +575,7 @@ static void test_lpm_get_next_key(void)
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should get the first one in post order. */
key_p->prefixlen = 8;
@@ -587,8 +599,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total three) */
key_p->prefixlen = 24;
@@ -611,8 +622,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total four) */
key_p->prefixlen = 24;
@@ -640,8 +650,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
@@ -675,8 +684,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
@@ -703,9 +711,9 @@ static void *lpm_test_command(void *arg)
{
int i, j, ret, iter, key_size;
struct lpm_mt_test_info *info = arg;
- struct bpf_lpm_trie_key *key_p;
+ struct bpf_lpm_trie_key_u8 *key_p;
- key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+ key_size = sizeof(*key_p) + sizeof(__u32);
key_p = alloca(key_size);
for (iter = 0; iter < info->iter; iter++)
for (i = 0; i < MAX_TEST_KEYS; i++) {
@@ -727,7 +735,7 @@ static void *lpm_test_command(void *arg)
ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
assert(ret == 0 || errno == ENOENT);
} else {
- struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+ struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size);
ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
}
@@ -753,6 +761,7 @@ static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd)
static void test_lpm_multi_thread(void)
{
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
struct lpm_mt_test_info info[4];
size_t key_size, value_size;
pthread_t thread_id[4];
@@ -761,9 +770,8 @@ static void test_lpm_multi_thread(void)
/* create a trie */
value_size = sizeof(__u32);
- key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
- map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size,
- 100, BPF_F_NO_PREALLOC);
+ key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
/* create 4 threads to test update, delete, lookup and get_next_key */
setup_lpm_mt_test_info(&info[0], map_fd);
@@ -780,7 +788,379 @@ static void test_lpm_multi_thread(void)
close(map_fd);
}
-int main(void)
+static int lpm_trie_create(unsigned int key_size, unsigned int value_size, unsigned int max_entries)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ int fd;
+
+ opts.map_flags = BPF_F_NO_PREALLOC;
+ fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie", key_size, value_size, max_entries,
+ &opts);
+ CHECK(fd < 0, "bpf_map_create", "error %d\n", errno);
+
+ return fd;
+}
+
+static void test_lpm_trie_update_flags(void)
+{
+ struct lpm_trie_int_key key;
+ unsigned int value, got;
+ int fd, err;
+
+ fd = lpm_trie_create(sizeof(key), sizeof(value), 3);
+
+ /* invalid flags (Error) */
+ key.prefixlen = 32;
+ key.data = 0;
+ value = 0;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_F_LOCK);
+ CHECK(err != -EINVAL, "invalid update flag", "error %d\n", err);
+
+ /* invalid flags (Error) */
+ key.prefixlen = 32;
+ key.data = 0;
+ value = 0;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST | BPF_EXIST);
+ CHECK(err != -EINVAL, "invalid update flag", "error %d\n", err);
+
+ /* overwrite an empty qp-trie (Error) */
+ key.prefixlen = 32;
+ key.data = 0;
+ value = 2;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ CHECK(err != -ENOENT, "overwrite empty qp-trie", "error %d\n", err);
+
+ /* add a new node */
+ key.prefixlen = 16;
+ key.data = 0;
+ value = 1;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* add the same node as new node (Error) */
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err != -EEXIST, "add new elem again", "error %d\n", err);
+
+ /* overwrite the existed node */
+ value = 4;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ CHECK(err, "overwrite elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* overwrite the node */
+ value = 1;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
+ CHECK(err, "update elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* overwrite a non-existent node which is the prefix of the first
+ * node (Error).
+ */
+ key.prefixlen = 8;
+ key.data = 0;
+ value = 2;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ CHECK(err != -ENOENT, "overwrite nonexistent elem", "error %d\n", err);
+
+ /* add a new node which is the prefix of the first node */
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup key", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* add another new node which will be the sibling of the first node */
+ key.prefixlen = 9;
+ key.data = htobe32(1 << 23);
+ value = 5;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup key", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* overwrite the third node */
+ value = 3;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
+ CHECK(err, "overwrite elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup key", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* delete the second node to make it an intermediate node */
+ key.prefixlen = 8;
+ key.data = 0;
+ err = bpf_map_delete_elem(fd, &key);
+ CHECK(err, "del elem", "error %d\n", err);
+
+ /* overwrite the intermediate node (Error) */
+ value = 2;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ CHECK(err != -ENOENT, "overwrite nonexistent elem", "error %d\n", err);
+
+ close(fd);
+}
+
+static void test_lpm_trie_update_full_map(void)
+{
+ struct lpm_trie_int_key key;
+ int value, got;
+ int fd, err;
+
+ fd = lpm_trie_create(sizeof(key), sizeof(value), 3);
+
+ /* add a new node */
+ key.prefixlen = 16;
+ key.data = 0;
+ value = 0;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* add new node */
+ key.prefixlen = 8;
+ key.data = 0;
+ value = 1;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* add new node */
+ key.prefixlen = 9;
+ key.data = htobe32(1 << 23);
+ value = 2;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add new elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* try to add more node (Error) */
+ key.prefixlen = 32;
+ key.data = 0;
+ value = 3;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
+ CHECK(err != -ENOSPC, "add to full trie", "error %d\n", err);
+
+ /* update the value of an existed node with BPF_EXIST */
+ key.prefixlen = 16;
+ key.data = 0;
+ value = 4;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ CHECK(err, "overwrite elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ /* update the value of an existed node with BPF_ANY */
+ key.prefixlen = 9;
+ key.data = htobe32(1 << 23);
+ value = 5;
+ err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
+ CHECK(err, "overwrite elem", "error %d\n", err);
+ got = 0;
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "error %d\n", err);
+ CHECK(got != value, "check value", "got %d exp %d\n", got, value);
+
+ close(fd);
+}
+
+static int cmp_str(const void *a, const void *b)
+{
+ const char *str_a = *(const char **)a, *str_b = *(const char **)b;
+
+ return strcmp(str_a, str_b);
+}
+
+/* Save strings in LPM trie. The trailing '\0' for each string will be
+ * accounted in the prefixlen. The strings returned during the iteration
+ * should be sorted as expected.
+ */
+static void test_lpm_trie_iterate_strs(void)
+{
+ static const char * const keys[] = {
+ "ab", "abO", "abc", "abo", "abS", "abcd",
+ };
+ const char *sorted_keys[ARRAY_SIZE(keys)];
+ struct lpm_trie_bytes_key key, next_key;
+ unsigned int value, got, i, j, len;
+ struct lpm_trie_bytes_key *cur;
+ int fd, err;
+
+ fd = lpm_trie_create(sizeof(key), sizeof(value), ARRAY_SIZE(keys));
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++) {
+ unsigned int flags;
+
+ /* add i-th element */
+ flags = i % 2 ? BPF_NOEXIST : 0;
+ len = strlen(keys[i]);
+ /* include the trailing '\0' */
+ key.prefixlen = (len + 1) * 8;
+ memset(key.data, 0, sizeof(key.data));
+ memcpy(key.data, keys[i], len);
+ value = i + 100;
+ err = bpf_map_update_elem(fd, &key, &value, flags);
+ CHECK(err, "add elem", "#%u error %d\n", i, err);
+
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "#%u error %d\n", i, err);
+ CHECK(got != value, "lookup elem", "#%u expect %u got %u\n", i, value, got);
+
+ /* re-add i-th element (Error) */
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err != -EEXIST, "re-add elem", "#%u error %d\n", i, err);
+
+ /* Overwrite i-th element */
+ flags = i % 2 ? 0 : BPF_EXIST;
+ value = i;
+ err = bpf_map_update_elem(fd, &key, &value, flags);
+ CHECK(err, "update elem", "error %d\n", err);
+
+ /* Lookup #[0~i] elements */
+ for (j = 0; j <= i; j++) {
+ len = strlen(keys[j]);
+ key.prefixlen = (len + 1) * 8;
+ memset(key.data, 0, sizeof(key.data));
+ memcpy(key.data, keys[j], len);
+ err = bpf_map_lookup_elem(fd, &key, &got);
+ CHECK(err, "lookup elem", "#%u/%u error %d\n", i, j, err);
+ CHECK(got != j, "lookup elem", "#%u/%u expect %u got %u\n",
+ i, j, value, got);
+ }
+ }
+
+ /* Add element to a full qp-trie (Error) */
+ key.prefixlen = sizeof(key.data) * 8;
+ memset(key.data, 0, sizeof(key.data));
+ value = 0;
+ err = bpf_map_update_elem(fd, &key, &value, 0);
+ CHECK(err != -ENOSPC, "add to full qp-trie", "error %d\n", err);
+
+ /* Iterate sorted elements: no deletion */
+ memcpy(sorted_keys, keys, sizeof(keys));
+ qsort(sorted_keys, ARRAY_SIZE(sorted_keys), sizeof(sorted_keys[0]), cmp_str);
+ cur = NULL;
+ for (i = 0; i < ARRAY_SIZE(sorted_keys); i++) {
+ len = strlen(sorted_keys[i]);
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err, "iterate", "#%u error %d\n", i, err);
+ CHECK(next_key.prefixlen != (len + 1) * 8, "iterate",
+ "#%u invalid len %u expect %u\n",
+ i, next_key.prefixlen, (len + 1) * 8);
+ CHECK(memcmp(sorted_keys[i], next_key.data, len + 1), "iterate",
+ "#%u got %.*s exp %.*s\n", i, len, next_key.data, len, sorted_keys[i]);
+
+ cur = &next_key;
+ }
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err != -ENOENT, "more element", "error %d\n", err);
+
+ /* Iterate sorted elements: delete the found key after each iteration */
+ cur = NULL;
+ for (i = 0; i < ARRAY_SIZE(sorted_keys); i++) {
+ len = strlen(sorted_keys[i]);
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err, "iterate", "#%u error %d\n", i, err);
+ CHECK(next_key.prefixlen != (len + 1) * 8, "iterate",
+ "#%u invalid len %u expect %u\n",
+ i, next_key.prefixlen, (len + 1) * 8);
+ CHECK(memcmp(sorted_keys[i], next_key.data, len + 1), "iterate",
+ "#%u got %.*s exp %.*s\n", i, len, next_key.data, len, sorted_keys[i]);
+
+ cur = &next_key;
+
+ err = bpf_map_delete_elem(fd, cur);
+ CHECK(err, "delete", "#%u error %d\n", i, err);
+ }
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err != -ENOENT, "non-empty qp-trie", "error %d\n", err);
+
+ close(fd);
+}
+
+/* Use the fixed prefixlen (32) and save integers in LPM trie. The iteration of
+ * LPM trie will return these integers in big-endian order, therefore, convert
+ * these integers to big-endian before update. After each iteration, delete the
+ * found key (the smallest integer) and expect the next iteration will return
+ * the second smallest number.
+ */
+static void test_lpm_trie_iterate_ints(void)
+{
+ struct lpm_trie_int_key key, next_key;
+ unsigned int i, max_entries;
+ struct lpm_trie_int_key *cur;
+ unsigned int *data_set;
+ int fd, err;
+ bool value;
+
+ max_entries = 4096;
+ data_set = calloc(max_entries, sizeof(*data_set));
+ CHECK(!data_set, "malloc", "no mem\n");
+ for (i = 0; i < max_entries; i++)
+ data_set[i] = i;
+
+ fd = lpm_trie_create(sizeof(key), sizeof(value), max_entries);
+ value = true;
+ for (i = 0; i < max_entries; i++) {
+ key.prefixlen = 32;
+ key.data = htobe32(data_set[i]);
+
+ err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(err, "add elem", "#%u error %d\n", i, err);
+ }
+
+ cur = NULL;
+ for (i = 0; i < max_entries; i++) {
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err, "iterate", "#%u error %d\n", i, err);
+ CHECK(next_key.prefixlen != 32, "iterate", "#%u invalid len %u\n",
+ i, next_key.prefixlen);
+ CHECK(be32toh(next_key.data) != data_set[i], "iterate", "#%u got 0x%x exp 0x%x\n",
+ i, be32toh(next_key.data), data_set[i]);
+ cur = &next_key;
+
+ /*
+ * Delete the minimal key, the next call of bpf_get_next_key()
+ * will return the second minimal key.
+ */
+ err = bpf_map_delete_elem(fd, &next_key);
+ CHECK(err, "del elem", "#%u elem error %d\n", i, err);
+ }
+ err = bpf_map_get_next_key(fd, cur, &next_key);
+ CHECK(err != -ENOENT, "more element", "error %d\n", err);
+
+ err = bpf_map_get_next_key(fd, NULL, &next_key);
+ CHECK(err != -ENOENT, "no-empty qp-trie", "error %d\n", err);
+
+ free(data_set);
+
+ close(fd);
+}
+
+void test_lpm_trie_map_basic_ops(void)
{
int i;
@@ -799,6 +1179,10 @@ int main(void)
test_lpm_get_next_key();
test_lpm_multi_thread();
- printf("test_lpm: OK\n");
- return 0;
+ test_lpm_trie_update_flags();
+ test_lpm_trie_update_full_map();
+ test_lpm_trie_iterate_strs();
+ test_lpm_trie_iterate_ints();
+
+ printf("%s: PASS\n", __func__);
}
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
new file mode 100644
index 000000000000..fe3e19f96244
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <arpa/inet.h>
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+struct test_lpm_key {
+ __u32 prefix;
+ struct in_addr ipv4;
+};
+
+static void map_batch_update(int map_fd, __u32 max_entries,
+ struct test_lpm_key *keys, int *values)
+{
+ __u32 i;
+ int err;
+ char buff[16] = { 0 };
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ for (i = 0; i < max_entries; i++) {
+ keys[i].prefix = 32;
+ snprintf(buff, 16, "192.168.1.%d", i + 1);
+ inet_pton(AF_INET, buff, &keys[i].ipv4);
+ values[i] = i + 1;
+ }
+
+ err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
+ CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
+}
+
+static void map_batch_verify(int *visited, __u32 max_entries,
+ struct test_lpm_key *keys, int *values)
+{
+ char buff[16] = { 0 };
+ int lower_byte = 0;
+ __u32 i;
+
+ memset(visited, 0, max_entries * sizeof(*visited));
+ for (i = 0; i < max_entries; i++) {
+ inet_ntop(AF_INET, &keys[i].ipv4, buff, 32);
+ CHECK(sscanf(buff, "192.168.1.%d", &lower_byte) == EOF,
+ "sscanf()", "error: i %d\n", i);
+ CHECK(lower_byte != values[i], "key/value checking",
+ "error: i %d key %s value %d\n", i, buff, values[i]);
+ visited[i] = 1;
+ }
+ for (i = 0; i < max_entries; i++) {
+ CHECK(visited[i] != 1, "visited checking",
+ "error: keys array at index %d missing\n", i);
+ }
+}
+
+void test_lpm_trie_map_batch_ops(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts, .map_flags = BPF_F_NO_PREALLOC);
+ struct test_lpm_key *keys, key;
+ int map_fd, *values, *visited;
+ __u32 step, count, total, total_success;
+ const __u32 max_entries = 10;
+ __u64 batch = 0;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map",
+ sizeof(struct test_lpm_key), sizeof(int),
+ max_entries, &create_opts);
+ CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n",
+ strerror(errno));
+
+ keys = malloc(max_entries * sizeof(struct test_lpm_key));
+ values = malloc(max_entries * sizeof(int));
+ visited = malloc(max_entries * sizeof(int));
+ CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
+ strerror(errno));
+
+ total_success = 0;
+ for (step = 1; step < max_entries; step++) {
+ map_batch_update(map_fd, max_entries, keys, values);
+ map_batch_verify(visited, max_entries, keys, values);
+ memset(keys, 0, max_entries * sizeof(*keys));
+ memset(values, 0, max_entries * sizeof(*values));
+ batch = 0;
+ total = 0;
+ /* iteratively lookup/delete elements with 'step'
+ * elements each.
+ */
+ count = step;
+ while (true) {
+ err = bpf_map_lookup_batch(map_fd,
+ total ? &batch : NULL, &batch,
+ keys + total, values + total, &count, &opts);
+
+ CHECK((err && errno != ENOENT), "lookup with steps",
+ "error: %s\n", strerror(errno));
+
+ total += count;
+ if (err)
+ break;
+ }
+
+ CHECK(total != max_entries, "lookup with steps",
+ "total = %u, max_entries = %u\n", total, max_entries);
+
+ map_batch_verify(visited, max_entries, keys, values);
+
+ total = 0;
+ count = step;
+ while (total < max_entries) {
+ if (max_entries - total < step)
+ count = max_entries - total;
+ err = bpf_map_delete_batch(map_fd, keys + total, &count,
+ &opts);
+ CHECK((err && errno != ENOENT), "delete batch",
+ "error: %s\n", strerror(errno));
+ total += count;
+ if (err)
+ break;
+ }
+ CHECK(total != max_entries, "delete with steps",
+ "total = %u, max_entries = %u\n", total, max_entries);
+
+ /* check map is empty, errno == ENOENT */
+ err = bpf_map_get_next_key(map_fd, NULL, &key);
+ CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
+ "error: %s\n", strerror(errno));
+
+ total_success++;
+ }
+
+ CHECK(total_success == 0, "check total_success",
+ "unexpected failure\n");
+
+ printf("%s:PASS\n", __func__);
+
+ free(keys);
+ free(values);
+ free(visited);
+ close(map_fd);
+}
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
new file mode 100644
index 000000000000..0ba015686492
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+struct test_lpm_key {
+ __u32 prefix;
+ __u32 data;
+};
+
+struct get_next_key_ctx {
+ struct test_lpm_key key;
+ bool start;
+ bool stop;
+ int map_fd;
+ int loop;
+};
+
+static void *get_next_key_fn(void *arg)
+{
+ struct get_next_key_ctx *ctx = arg;
+ struct test_lpm_key next_key;
+ int i = 0;
+
+ while (!ctx->start)
+ usleep(1);
+
+ while (!ctx->stop && i++ < ctx->loop)
+ bpf_map_get_next_key(ctx->map_fd, &ctx->key, &next_key);
+
+ return NULL;
+}
+
+static void abort_get_next_key(struct get_next_key_ctx *ctx, pthread_t *tids,
+ unsigned int nr)
+{
+ unsigned int i;
+
+ ctx->stop = true;
+ ctx->start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+void test_lpm_trie_map_get_next_key(void)
+{
+#define MAX_NR_THREADS 8
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts,
+ .map_flags = BPF_F_NO_PREALLOC);
+ struct test_lpm_key key = {};
+ __u32 val = 0;
+ int map_fd;
+ const __u32 max_prefixlen = 8 * (sizeof(key) - sizeof(key.prefix));
+ const __u32 max_entries = max_prefixlen + 1;
+ unsigned int i, nr = MAX_NR_THREADS, loop = 65536;
+ pthread_t tids[MAX_NR_THREADS];
+ struct get_next_key_ctx ctx;
+ int err;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map",
+ sizeof(struct test_lpm_key), sizeof(__u32),
+ max_entries, &create_opts);
+ CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n",
+ strerror(errno));
+
+ for (i = 0; i <= max_prefixlen; i++) {
+ key.prefix = i;
+ err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+ CHECK(err, "bpf_map_update_elem()", "error:%s\n",
+ strerror(errno));
+ }
+
+ ctx.start = false;
+ ctx.stop = false;
+ ctx.map_fd = map_fd;
+ ctx.loop = loop;
+ memcpy(&ctx.key, &key, sizeof(key));
+
+ for (i = 0; i < nr; i++) {
+ err = pthread_create(&tids[i], NULL, get_next_key_fn, &ctx);
+ if (err) {
+ abort_get_next_key(&ctx, tids, i);
+ CHECK(err, "pthread_create", "error %d\n", err);
+ }
+ }
+
+ ctx.start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+
+ printf("%s:PASS\n", __func__);
+
+ close(map_fd);
+}
diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
new file mode 100644
index 000000000000..79c3ccadb962
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+#define OUTER_MAP_ENTRIES 10
+
+static __u32 get_map_id_from_fd(int map_fd)
+{
+ struct bpf_map_info map_info = {};
+ uint32_t info_len = sizeof(map_info);
+ int ret;
+
+ ret = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(ret < 0, "Finding map info failed", "error:%s\n",
+ strerror(errno));
+
+ return map_info.id;
+}
+
+/* This creates number of OUTER_MAP_ENTRIES maps that will be stored
+ * in outer map and return the created map_fds
+ */
+static void create_inner_maps(enum bpf_map_type map_type,
+ __u32 *inner_map_fds)
+{
+ int map_fd, map_index, ret;
+ __u32 map_key = 0, map_id;
+ char map_name[16];
+
+ for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) {
+ memset(map_name, 0, sizeof(map_name));
+ snprintf(map_name, sizeof(map_name), "inner_map_fd_%d", map_index);
+ map_fd = bpf_map_create(map_type, map_name, sizeof(__u32),
+ sizeof(__u32), 1, NULL);
+ CHECK(map_fd < 0,
+ "inner bpf_map_create() failed",
+ "map_type=(%d) map_name(%s), error:%s\n",
+ map_type, map_name, strerror(errno));
+
+ /* keep track of the inner map fd as it is required
+ * to add records in outer map
+ */
+ inner_map_fds[map_index] = map_fd;
+
+ /* Add entry into this created map
+ * eg: map1 key = 0, value = map1's map id
+ * map2 key = 0, value = map2's map id
+ */
+ map_id = get_map_id_from_fd(map_fd);
+ ret = bpf_map_update_elem(map_fd, &map_key, &map_id, 0);
+ CHECK(ret != 0,
+ "bpf_map_update_elem failed",
+ "map_type=(%d) map_name(%s), error:%s\n",
+ map_type, map_name, strerror(errno));
+ }
+}
+
+static int create_outer_map(enum bpf_map_type map_type, __u32 inner_map_fd)
+{
+ int outer_map_fd;
+ LIBBPF_OPTS(bpf_map_create_opts, attr);
+
+ attr.inner_map_fd = inner_map_fd;
+ outer_map_fd = bpf_map_create(map_type, "outer_map", sizeof(__u32),
+ sizeof(__u32), OUTER_MAP_ENTRIES,
+ &attr);
+ CHECK(outer_map_fd < 0,
+ "outer bpf_map_create()",
+ "map_type=(%d), error:%s\n",
+ map_type, strerror(errno));
+
+ return outer_map_fd;
+}
+
+static void validate_fetch_results(int outer_map_fd,
+ __u32 *fetched_keys, __u32 *fetched_values,
+ __u32 max_entries_fetched)
+{
+ __u32 inner_map_key, inner_map_value;
+ int inner_map_fd, entry, err;
+ __u32 outer_map_value;
+
+ for (entry = 0; entry < max_entries_fetched; ++entry) {
+ outer_map_value = fetched_values[entry];
+ inner_map_fd = bpf_map_get_fd_by_id(outer_map_value);
+ CHECK(inner_map_fd < 0,
+ "Failed to get inner map fd",
+ "from id(%d), error=%s\n",
+ outer_map_value, strerror(errno));
+ err = bpf_map_get_next_key(inner_map_fd, NULL, &inner_map_key);
+ CHECK(err != 0,
+ "Failed to get inner map key",
+ "error=%s\n", strerror(errno));
+
+ err = bpf_map_lookup_elem(inner_map_fd, &inner_map_key,
+ &inner_map_value);
+
+ close(inner_map_fd);
+
+ CHECK(err != 0,
+ "Failed to get inner map value",
+ "for key(%d), error=%s\n",
+ inner_map_key, strerror(errno));
+
+ /* Actual value validation */
+ CHECK(outer_map_value != inner_map_value,
+ "Failed to validate inner map value",
+ "fetched(%d) and lookedup(%d)!\n",
+ outer_map_value, inner_map_value);
+ }
+}
+
+static void fetch_and_validate(int outer_map_fd,
+ struct bpf_map_batch_opts *opts,
+ __u32 batch_size, bool delete_entries,
+ bool has_holes)
+{
+ int err, max_entries = OUTER_MAP_ENTRIES - !!has_holes;
+ __u32 *fetched_keys, *fetched_values, total_fetched = 0, i;
+ __u32 batch_key = 0, fetch_count, step_size = batch_size;
+ __u32 value_size = sizeof(__u32);
+
+ /* Total entries needs to be fetched */
+ fetched_keys = calloc(max_entries, value_size);
+ fetched_values = calloc(max_entries, value_size);
+ CHECK((!fetched_keys || !fetched_values),
+ "Memory allocation failed for fetched_keys or fetched_values",
+ "error=%s\n", strerror(errno));
+
+ /* hash map may not always return full batch */
+ for (i = 0; i < OUTER_MAP_ENTRIES; i++) {
+ fetch_count = step_size;
+ err = delete_entries
+ ? bpf_map_lookup_and_delete_batch(outer_map_fd,
+ total_fetched ? &batch_key : NULL,
+ &batch_key,
+ fetched_keys + total_fetched,
+ fetched_values + total_fetched,
+ &fetch_count, opts)
+ : bpf_map_lookup_batch(outer_map_fd,
+ total_fetched ? &batch_key : NULL,
+ &batch_key,
+ fetched_keys + total_fetched,
+ fetched_values + total_fetched,
+ &fetch_count, opts);
+
+ if (err && errno == ENOSPC) {
+ /* Fetch again with higher batch size */
+ total_fetched = 0;
+ step_size += batch_size;
+ continue;
+ }
+
+ CHECK((err < 0 && (errno != ENOENT)),
+ "lookup with steps failed",
+ "error: %s\n", strerror(errno));
+
+ /* Update the total fetched number */
+ total_fetched += fetch_count;
+ if (err)
+ break;
+ }
+
+ CHECK((total_fetched != max_entries),
+ "Unable to fetch expected entries !",
+ "total_fetched(%d) and max_entries(%d) error: (%d):%s\n",
+ total_fetched, max_entries, errno, strerror(errno));
+
+ /* validate the fetched entries */
+ validate_fetch_results(outer_map_fd, fetched_keys,
+ fetched_values, total_fetched);
+ printf("batch_op(%s) is successful with batch_size(%d)\n",
+ delete_entries ? "LOOKUP_AND_DELETE" : "LOOKUP", batch_size);
+
+ free(fetched_keys);
+ free(fetched_values);
+}
+
+static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
+ enum bpf_map_type inner_map_type,
+ bool has_holes)
+{
+ __u32 max_entries = OUTER_MAP_ENTRIES - !!has_holes;
+ __u32 *outer_map_keys, *inner_map_fds;
+ LIBBPF_OPTS(bpf_map_batch_opts, opts);
+ __u32 value_size = sizeof(__u32);
+ int batch_size[2] = {5, 10};
+ __u32 map_index, op_index;
+ int outer_map_fd, ret;
+
+ outer_map_keys = calloc(OUTER_MAP_ENTRIES, value_size);
+ inner_map_fds = calloc(OUTER_MAP_ENTRIES, value_size);
+ CHECK((!outer_map_keys || !inner_map_fds),
+ "Memory allocation failed for outer_map_keys or inner_map_fds",
+ "error=%s\n", strerror(errno));
+
+ create_inner_maps(inner_map_type, inner_map_fds);
+
+ outer_map_fd = create_outer_map(outer_map_type, *inner_map_fds);
+ /* create outer map keys */
+ for (map_index = 0; map_index < max_entries; map_index++)
+ outer_map_keys[map_index] =
+ ((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
+ ? 9 : 1000) - map_index;
+
+ /* This condition is only meaningful for array of maps.
+ *
+ * max_entries == OUTER_MAP_ENTRIES - 1 if it is true. Say
+ * max_entries is short for n, then outer_map_keys looks like:
+ *
+ * [n, n-1, ... 2, 1]
+ *
+ * We change it to
+ *
+ * [n, n-1, ... 2, 0]
+ *
+ * So it will leave key 1 as a hole. It will serve to test the
+ * correctness when batch on an array: a "non-exist" key might be
+ * actually allocated and returned from key iteration.
+ */
+ if (has_holes)
+ outer_map_keys[max_entries - 1]--;
+
+ /* batch operation - map_update */
+ ret = bpf_map_update_batch(outer_map_fd, outer_map_keys,
+ inner_map_fds, &max_entries, &opts);
+ CHECK(ret != 0,
+ "Failed to update the outer map batch ops",
+ "error=%s\n", strerror(errno));
+
+ /* batch operation - map_lookup */
+ for (op_index = 0; op_index < 2; ++op_index)
+ fetch_and_validate(outer_map_fd, &opts,
+ batch_size[op_index], false,
+ has_holes);
+
+ /* batch operation - map_lookup_delete */
+ if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ fetch_and_validate(outer_map_fd, &opts,
+ max_entries, true /*delete*/,
+ has_holes);
+
+ /* close all map fds */
+ for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++)
+ close(inner_map_fds[map_index]);
+ close(outer_map_fd);
+
+ free(inner_map_fds);
+ free(outer_map_keys);
+}
+
+void test_map_in_map_batch_ops_array(void)
+{
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, false);
+ printf("%s:PASS with inner ARRAY map\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, false);
+ printf("%s:PASS with inner HASH map\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, true);
+ printf("%s:PASS with inner ARRAY map with holes\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, true);
+ printf("%s:PASS with inner HASH map with holes\n", __func__);
+}
+
+void test_map_in_map_batch_ops_hash(void)
+{
+ _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY, false);
+ printf("%s:PASS with inner ARRAY map\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH, false);
+ printf("%s:PASS with inner HASH map\n", __func__);
+}
diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
new file mode 100644
index 000000000000..1c7c04288eff
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include <errno.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <bpf_util.h>
+#include <test_maps.h>
+
+#include "map_percpu_stats.skel.h"
+
+#define MAX_ENTRIES 16384
+#define MAX_ENTRIES_HASH_OF_MAPS 64
+#define N_THREADS 8
+#define MAX_MAP_KEY_SIZE 4
+#define PCPU_MIN_UNIT_SIZE 32768
+
+static void map_info(int map_fd, struct bpf_map_info *info)
+{
+ __u32 len = sizeof(*info);
+ int ret;
+
+ memset(info, 0, sizeof(*info));
+
+ ret = bpf_obj_get_info_by_fd(map_fd, info, &len);
+ CHECK(ret < 0, "bpf_obj_get_info_by_fd", "error: %s\n", strerror(errno));
+}
+
+static const char *map_type_to_s(__u32 type)
+{
+ switch (type) {
+ case BPF_MAP_TYPE_HASH:
+ return "HASH";
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ return "PERCPU_HASH";
+ case BPF_MAP_TYPE_LRU_HASH:
+ return "LRU_HASH";
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ return "LRU_PERCPU_HASH";
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ return "BPF_MAP_TYPE_HASH_OF_MAPS";
+ default:
+ return "<define-me>";
+ }
+}
+
+static __u32 map_count_elements(__u32 type, int map_fd)
+{
+ __u32 key = -1;
+ int n = 0;
+
+ while (!bpf_map_get_next_key(map_fd, &key, &key))
+ n++;
+ return n;
+}
+
+#define BATCH true
+
+static void delete_and_lookup_batch(int map_fd, void *keys, __u32 count)
+{
+ static __u8 values[(8 << 10) * MAX_ENTRIES];
+ void *in_batch = NULL, *out_batch;
+ __u32 save_count = count;
+ int ret;
+
+ ret = bpf_map_lookup_and_delete_batch(map_fd,
+ &in_batch, &out_batch,
+ keys, values, &count,
+ NULL);
+
+ /*
+ * Despite what uapi header says, lookup_and_delete_batch will return
+ * -ENOENT in case we successfully have deleted all elements, so check
+ * this separately
+ */
+ CHECK(ret < 0 && (errno != ENOENT || !count), "bpf_map_lookup_and_delete_batch",
+ "error: %s\n", strerror(errno));
+
+ CHECK(count != save_count,
+ "bpf_map_lookup_and_delete_batch",
+ "deleted not all elements: removed=%u expected=%u\n",
+ count, save_count);
+}
+
+static void delete_all_elements(__u32 type, int map_fd, bool batch)
+{
+ static __u8 val[8 << 10]; /* enough for 1024 CPUs */
+ __u32 key = -1;
+ void *keys;
+ __u32 i, n;
+ int ret;
+
+ keys = calloc(MAX_MAP_KEY_SIZE, MAX_ENTRIES);
+ CHECK(!keys, "calloc", "error: %s\n", strerror(errno));
+
+ for (n = 0; !bpf_map_get_next_key(map_fd, &key, &key); n++)
+ memcpy(keys + n*MAX_MAP_KEY_SIZE, &key, MAX_MAP_KEY_SIZE);
+
+ if (batch) {
+ /* Can't mix delete_batch and delete_and_lookup_batch because
+ * they have different semantics in relation to the keys
+ * argument. However, delete_batch utilize map_delete_elem,
+ * so we actually test it in non-batch scenario */
+ delete_and_lookup_batch(map_fd, keys, n);
+ } else {
+ /* Intentionally mix delete and lookup_and_delete so we can test both */
+ for (i = 0; i < n; i++) {
+ void *keyp = keys + i*MAX_MAP_KEY_SIZE;
+
+ if (i % 2 || type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ ret = bpf_map_delete_elem(map_fd, keyp);
+ CHECK(ret < 0, "bpf_map_delete_elem",
+ "error: key %u: %s\n", i, strerror(errno));
+ } else {
+ ret = bpf_map_lookup_and_delete_elem(map_fd, keyp, val);
+ CHECK(ret < 0, "bpf_map_lookup_and_delete_elem",
+ "error: key %u: %s\n", i, strerror(errno));
+ }
+ }
+ }
+
+ free(keys);
+}
+
+static bool is_lru(__u32 map_type)
+{
+ return map_type == BPF_MAP_TYPE_LRU_HASH ||
+ map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
+}
+
+static bool is_percpu(__u32 map_type)
+{
+ return map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
+}
+
+struct upsert_opts {
+ __u32 map_type;
+ int map_fd;
+ __u32 n;
+ bool retry_for_nomem;
+};
+
+static int create_small_hash(void)
+{
+ int map_fd;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, "small", 4, 4, 4, NULL);
+ CHECK(map_fd < 0, "bpf_map_create()", "error:%s (name=%s)\n",
+ strerror(errno), "small");
+
+ return map_fd;
+}
+
+static bool retry_for_nomem_fn(int err)
+{
+ return err == ENOMEM;
+}
+
+static void *patch_map_thread(void *arg)
+{
+ /* 8KB is enough for 1024 CPUs. And it is shared between N_THREADS. */
+ static __u8 blob[8 << 10];
+ struct upsert_opts *opts = arg;
+ void *val_ptr;
+ int val;
+ int ret;
+ int i;
+
+ for (i = 0; i < opts->n; i++) {
+ if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ val = create_small_hash();
+ val_ptr = &val;
+ } else if (is_percpu(opts->map_type)) {
+ val_ptr = blob;
+ } else {
+ val = rand();
+ val_ptr = &val;
+ }
+
+ /* 2 seconds may be enough ? */
+ if (opts->retry_for_nomem)
+ ret = map_update_retriable(opts->map_fd, &i, val_ptr, 0,
+ 40, retry_for_nomem_fn);
+ else
+ ret = bpf_map_update_elem(opts->map_fd, &i, val_ptr, 0);
+ CHECK(ret < 0, "bpf_map_update_elem", "key=%d error: %s\n", i, strerror(errno));
+
+ if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ close(val);
+ }
+ return NULL;
+}
+
+static void upsert_elements(struct upsert_opts *opts)
+{
+ pthread_t threads[N_THREADS];
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ ret = pthread_create(&i[threads], NULL, patch_map_thread, opts);
+ CHECK(ret != 0, "pthread_create", "error: %s\n", strerror(ret));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ ret = pthread_join(i[threads], NULL);
+ CHECK(ret != 0, "pthread_join", "error: %s\n", strerror(ret));
+ }
+}
+
+static __u32 read_cur_elements(int iter_fd)
+{
+ char buf[64];
+ ssize_t n;
+ __u32 ret;
+
+ n = read(iter_fd, buf, sizeof(buf)-1);
+ CHECK(n <= 0, "read", "error: %s\n", strerror(errno));
+ buf[n] = '\0';
+
+ errno = 0;
+ ret = (__u32)strtol(buf, NULL, 10);
+ CHECK(errno != 0, "strtol", "error: %s\n", strerror(errno));
+
+ return ret;
+}
+
+static __u32 get_cur_elements(int map_id)
+{
+ struct map_percpu_stats *skel;
+ struct bpf_link *link;
+ __u32 n_elements;
+ int iter_fd;
+ int ret;
+
+ skel = map_percpu_stats__open();
+ CHECK(skel == NULL, "map_percpu_stats__open", "error: %s", strerror(errno));
+
+ skel->bss->target_id = map_id;
+
+ ret = map_percpu_stats__load(skel);
+ CHECK(ret != 0, "map_percpu_stats__load", "error: %s", strerror(errno));
+
+ link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
+ CHECK(!link, "bpf_program__attach_iter", "error: %s\n", strerror(errno));
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ CHECK(iter_fd < 0, "bpf_iter_create", "error: %s\n", strerror(errno));
+
+ n_elements = read_cur_elements(iter_fd);
+
+ close(iter_fd);
+ bpf_link__destroy(link);
+ map_percpu_stats__destroy(skel);
+
+ return n_elements;
+}
+
+static void check_expected_number_elements(__u32 n_inserted, int map_fd,
+ struct bpf_map_info *info)
+{
+ __u32 n_real;
+ __u32 n_iter;
+
+ /* Count the current number of elements in the map by iterating through
+ * all the map keys via bpf_get_next_key
+ */
+ n_real = map_count_elements(info->type, map_fd);
+
+ /* The "real" number of elements should be the same as the inserted
+ * number of elements in all cases except LRU maps, where some elements
+ * may have been evicted
+ */
+ if (n_inserted == 0 || !is_lru(info->type))
+ CHECK(n_inserted != n_real, "map_count_elements",
+ "n_real(%u) != n_inserted(%u)\n", n_real, n_inserted);
+
+ /* Count the current number of elements in the map using an iterator */
+ n_iter = get_cur_elements(info->id);
+
+ /* Both counts should be the same, as all updates are over */
+ CHECK(n_iter != n_real, "get_cur_elements",
+ "n_iter=%u, expected %u (map_type=%s,map_flags=%08x)\n",
+ n_iter, n_real, map_type_to_s(info->type), info->map_flags);
+}
+
+static void __test(int map_fd)
+{
+ struct upsert_opts opts = {
+ .map_fd = map_fd,
+ };
+ struct bpf_map_info info;
+
+ map_info(map_fd, &info);
+ opts.map_type = info.type;
+ opts.n = info.max_entries;
+
+ /* Reduce the number of elements we are updating such that we don't
+ * bump into -E2BIG from non-preallocated hash maps, but still will
+ * have some evictions for LRU maps */
+ if (opts.map_type != BPF_MAP_TYPE_HASH_OF_MAPS)
+ opts.n -= 512;
+ else
+ opts.n /= 2;
+
+ /* per-cpu bpf memory allocator may not be able to allocate per-cpu
+ * pointer successfully and it can not refill free llist timely, and
+ * bpf_map_update_elem() will return -ENOMEM. so just retry to mitigate
+ * the problem temporarily.
+ */
+ opts.retry_for_nomem = is_percpu(opts.map_type) && (info.map_flags & BPF_F_NO_PREALLOC);
+
+ /*
+ * Upsert keys [0, n) under some competition: with random values from
+ * N_THREADS threads. Check values, then delete all elements and check
+ * values again.
+ */
+ upsert_elements(&opts);
+ check_expected_number_elements(opts.n, map_fd, &info);
+ delete_all_elements(info.type, map_fd, !BATCH);
+ check_expected_number_elements(0, map_fd, &info);
+
+ /* Now do the same, but using batch delete operations */
+ upsert_elements(&opts);
+ check_expected_number_elements(opts.n, map_fd, &info);
+ delete_all_elements(info.type, map_fd, BATCH);
+ check_expected_number_elements(0, map_fd, &info);
+
+ close(map_fd);
+}
+
+static int map_create_opts(__u32 type, const char *name,
+ struct bpf_map_create_opts *map_opts,
+ __u32 key_size, __u32 val_size)
+{
+ int max_entries;
+ int map_fd;
+
+ if (type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ max_entries = MAX_ENTRIES_HASH_OF_MAPS;
+ else
+ max_entries = MAX_ENTRIES;
+
+ map_fd = bpf_map_create(type, name, key_size, val_size, max_entries, map_opts);
+ CHECK(map_fd < 0, "bpf_map_create()", "error:%s (name=%s)\n",
+ strerror(errno), name);
+
+ return map_fd;
+}
+
+static int map_create(__u32 type, const char *name, struct bpf_map_create_opts *map_opts)
+{
+ return map_create_opts(type, name, map_opts, sizeof(int), sizeof(int));
+}
+
+static int create_hash(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
+
+ return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts);
+}
+
+static int create_percpu_hash(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
+
+ return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts);
+}
+
+static int create_hash_prealloc(void)
+{
+ return map_create(BPF_MAP_TYPE_HASH, "hash", NULL);
+}
+
+static int create_percpu_hash_prealloc(void)
+{
+ return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash_prealloc", NULL);
+}
+
+static int create_lru_hash(__u32 type, __u32 map_flags)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = map_flags);
+
+ return map_create(type, "lru_hash", &map_opts);
+}
+
+static int create_hash_of_maps(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .map_flags = BPF_F_NO_PREALLOC,
+ .inner_map_fd = create_small_hash(),
+ );
+ int ret;
+
+ ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
+ &map_opts, sizeof(int), sizeof(int));
+ close(map_opts.inner_map_fd);
+ return ret;
+}
+
+static void map_percpu_stats_hash(void)
+{
+ __test(create_hash());
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_percpu_hash(void)
+{
+ __test(create_percpu_hash());
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_hash_prealloc(void)
+{
+ __test(create_hash_prealloc());
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_percpu_hash_prealloc(void)
+{
+ __test(create_percpu_hash_prealloc());
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_lru_hash(void)
+{
+ __test(create_lru_hash(BPF_MAP_TYPE_LRU_HASH, 0));
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_lru_hash_no_common(void)
+{
+ __test(create_lru_hash(BPF_MAP_TYPE_LRU_HASH, BPF_F_NO_COMMON_LRU));
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_percpu_lru_hash(void)
+{
+ __test(create_lru_hash(BPF_MAP_TYPE_LRU_PERCPU_HASH, 0));
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_percpu_lru_hash_no_common(void)
+{
+ __test(create_lru_hash(BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_F_NO_COMMON_LRU));
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_hash_of_maps(void)
+{
+ __test(create_hash_of_maps());
+ printf("test_%s:PASS\n", __func__);
+}
+
+static void map_percpu_stats_map_value_size(void)
+{
+ int fd;
+ int value_sz = PCPU_MIN_UNIT_SIZE + 1;
+ struct bpf_map_create_opts opts = { .sz = sizeof(opts) };
+ enum bpf_map_type map_types[] = { BPF_MAP_TYPE_PERCPU_ARRAY,
+ BPF_MAP_TYPE_PERCPU_HASH,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH };
+ for (int i = 0; i < ARRAY_SIZE(map_types); i++) {
+ fd = bpf_map_create(map_types[i], NULL, sizeof(__u32), value_sz, 1, &opts);
+ CHECK(fd < 0 && errno != E2BIG, "percpu map value size",
+ "error: %s\n", strerror(errno));
+ }
+ printf("test_%s:PASS\n", __func__);
+}
+
+void test_map_percpu_stats(void)
+{
+ map_percpu_stats_hash();
+ map_percpu_stats_percpu_hash();
+ map_percpu_stats_hash_prealloc();
+ map_percpu_stats_percpu_hash_prealloc();
+ map_percpu_stats_lru_hash();
+ map_percpu_stats_lru_hash_no_common();
+ map_percpu_stats_percpu_lru_hash();
+ map_percpu_stats_percpu_lru_hash_no_common();
+ map_percpu_stats_hash_of_maps();
+ map_percpu_stats_map_value_size();
+}
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
index e569edc679d8..af10c309359a 100644
--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -19,16 +19,12 @@
#include <test_btf.h>
#include <test_maps.h>
-static struct bpf_create_map_attr xattr = {
- .name = "sk_storage_map",
- .map_type = BPF_MAP_TYPE_SK_STORAGE,
- .map_flags = BPF_F_NO_PREALLOC,
- .max_entries = 0,
- .key_size = 4,
- .value_size = 8,
+static struct bpf_map_create_opts map_opts = {
+ .sz = sizeof(map_opts),
.btf_key_type_id = 1,
.btf_value_type_id = 3,
.btf_fd = -1,
+ .map_flags = BPF_F_NO_PREALLOC,
};
static unsigned int nr_sk_threads_done;
@@ -140,7 +136,7 @@ static int load_btf(void)
memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
btf_str_sec, sizeof(btf_str_sec));
- return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
+ return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL);
}
static int create_sk_storage_map(void)
@@ -150,13 +146,13 @@ static int create_sk_storage_map(void)
btf_fd = load_btf();
CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
btf_fd, errno);
- xattr.btf_fd = btf_fd;
+ map_opts.btf_fd = btf_fd;
- map_fd = bpf_create_map_xattr(&xattr);
- xattr.btf_fd = -1;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
+ map_opts.btf_fd = -1;
close(btf_fd);
CHECK(map_fd == -1,
- "bpf_create_map_xattr()", "errno:%d\n", errno);
+ "bpf_map_create()", "errno:%d\n", errno);
return map_fd;
}
@@ -416,7 +412,7 @@ static void test_sk_storage_map_stress_free(void)
rlim_new.rlim_max = rlim_new.rlim_cur + 128;
err = setrlimit(RLIMIT_NOFILE, &rlim_new);
CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
- rlim_new.rlim_cur, errno);
+ (unsigned long) rlim_new.rlim_cur, errno);
}
err = do_sk_storage_map_stress_free();
@@ -462,21 +458,21 @@ static void test_sk_storage_map_basic(void)
struct {
int cnt;
int lock;
- } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
- struct bpf_create_map_attr bad_xattr;
+ } value = { .cnt = 0xeB9f, .lock = 1, }, lookup_value;
+ struct bpf_map_create_opts bad_xattr;
int btf_fd, map_fd, sk_fd, err;
btf_fd = load_btf();
CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
btf_fd, errno);
- xattr.btf_fd = btf_fd;
+ map_opts.btf_fd = btf_fd;
sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
sk_fd, errno);
- map_fd = bpf_create_map_xattr(&xattr);
- CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)",
+ map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
+ CHECK(map_fd == -1, "bpf_map_create(good_xattr)",
"map_fd:%d errno:%d\n", map_fd, errno);
/* Add new elem */
@@ -487,38 +483,41 @@ static void test_sk_storage_map_basic(void)
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_EXIST | BPF_F_LOCK);
CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt and update with BPF_EXIST */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
"err:%d errno:%d\n", err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Update with BPF_NOEXIST */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value,
BPF_NOEXIST | BPF_F_LOCK);
CHECK(!err || errno != EEXIST,
@@ -530,22 +529,23 @@ static void test_sk_storage_map_basic(void)
value.cnt -= 1;
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Bump the cnt again and update with map_flags == 0 */
value.cnt += 1;
+ value.lock = 2;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
BPF_F_LOCK);
- CHECK(err || lookup_value.cnt != value.cnt,
+ CHECK(err || lookup_value.lock || lookup_value.cnt != value.cnt,
"bpf_map_lookup_elem_flags(BPF_F_LOCK)",
- "err:%d errno:%d cnt:%x(%x)\n",
- err, errno, lookup_value.cnt, value.cnt);
+ "err:%d errno:%d lock:%x cnt:%x(%x)\n",
+ err, errno, lookup_value.lock, lookup_value.cnt, value.cnt);
/* Test delete elem */
err = bpf_map_delete_elem(map_fd, &sk_fd);
@@ -560,31 +560,29 @@ static void test_sk_storage_map_basic(void)
CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
"err:%d errno:%d\n", err, errno);
- memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.btf_key_type_id = 0;
- err = bpf_create_map_xattr(&bad_xattr);
- CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
+ CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
- memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.btf_key_type_id = 3;
- err = bpf_create_map_xattr(&bad_xattr);
- CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
+ CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
- memcpy(&bad_xattr, &xattr, sizeof(xattr));
- bad_xattr.max_entries = 1;
- err = bpf_create_map_xattr(&bad_xattr);
- CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts);
+ CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
"err:%d errno:%d\n", err, errno);
- memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
bad_xattr.map_flags = 0;
- err = bpf_create_map_xattr(&bad_xattr);
+ err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
"err:%d errno:%d\n", err, errno);
- xattr.btf_fd = -1;
+ map_opts.btf_fd = -1;
close(btf_fd);
close(map_fd);
close(sk_fd);
diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c
new file mode 100644
index 000000000000..a4121d2248ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_util.h"
+#include "test_maps.h"
+#include "task_local_storage_helpers.h"
+#include "read_bpf_task_storage_busy.skel.h"
+
+struct lookup_ctx {
+ bool start;
+ bool stop;
+ int pid_fd;
+ int map_fd;
+ int loop;
+};
+
+static void *lookup_fn(void *arg)
+{
+ struct lookup_ctx *ctx = arg;
+ long value;
+ int i = 0;
+
+ while (!ctx->start)
+ usleep(1);
+
+ while (!ctx->stop && i++ < ctx->loop)
+ bpf_map_lookup_elem(ctx->map_fd, &ctx->pid_fd, &value);
+ return NULL;
+}
+
+static void abort_lookup(struct lookup_ctx *ctx, pthread_t *tids, unsigned int nr)
+{
+ unsigned int i;
+
+ ctx->stop = true;
+ ctx->start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+void test_task_storage_map_stress_lookup(void)
+{
+#define MAX_NR_THREAD 4096
+ unsigned int i, nr = 256, loop = 8192, cpu = 0;
+ struct read_bpf_task_storage_busy *skel;
+ pthread_t tids[MAX_NR_THREAD];
+ struct lookup_ctx ctx;
+ cpu_set_t old, new;
+ const char *cfg;
+ int err;
+
+ cfg = getenv("TASK_STORAGE_MAP_NR_THREAD");
+ if (cfg) {
+ nr = atoi(cfg);
+ if (nr > MAX_NR_THREAD)
+ nr = MAX_NR_THREAD;
+ }
+ cfg = getenv("TASK_STORAGE_MAP_NR_LOOP");
+ if (cfg)
+ loop = atoi(cfg);
+ cfg = getenv("TASK_STORAGE_MAP_PIN_CPU");
+ if (cfg)
+ cpu = atoi(cfg);
+
+ skel = read_bpf_task_storage_busy__open_and_load();
+ err = libbpf_get_error(skel);
+ CHECK(err, "open_and_load", "error %d\n", err);
+
+ /* Only for a fully preemptible kernel */
+ if (!skel->kconfig->CONFIG_PREEMPTION) {
+ printf("%s SKIP (no CONFIG_PREEMPTION)\n", __func__);
+ read_bpf_task_storage_busy__destroy(skel);
+ skips++;
+ return;
+ }
+
+ /* Save the old affinity setting */
+ sched_getaffinity(getpid(), sizeof(old), &old);
+
+ /* Pinned on a specific CPU */
+ CPU_ZERO(&new);
+ CPU_SET(cpu, &new);
+ sched_setaffinity(getpid(), sizeof(new), &new);
+
+ ctx.start = false;
+ ctx.stop = false;
+ ctx.pid_fd = sys_pidfd_open(getpid(), 0);
+ ctx.map_fd = bpf_map__fd(skel->maps.task);
+ ctx.loop = loop;
+ for (i = 0; i < nr; i++) {
+ err = pthread_create(&tids[i], NULL, lookup_fn, &ctx);
+ if (err) {
+ abort_lookup(&ctx, tids, i);
+ CHECK(err, "pthread_create", "error %d\n", err);
+ goto out;
+ }
+ }
+
+ ctx.start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+
+ skel->bss->pid = getpid();
+ err = read_bpf_task_storage_busy__attach(skel);
+ CHECK(err, "attach", "error %d\n", err);
+
+ /* Trigger program */
+ sys_gettid();
+ skel->bss->pid = 0;
+
+ CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy);
+out:
+ read_bpf_task_storage_busy__destroy(skel);
+ /* Restore affinity setting */
+ sched_setaffinity(getpid(), sizeof(old), &old);
+ printf("%s:PASS\n", __func__);
+}
diff --git a/tools/testing/selftests/bpf/netcnt_common.h b/tools/testing/selftests/bpf/netcnt_common.h
index 81084c1c2c23..2d4a58e4e39c 100644
--- a/tools/testing/selftests/bpf/netcnt_common.h
+++ b/tools/testing/selftests/bpf/netcnt_common.h
@@ -6,19 +6,39 @@
#define MAX_PERCPU_PACKETS 32
-struct percpu_net_cnt {
- __u64 packets;
- __u64 bytes;
+/* sizeof(struct bpf_local_storage_elem):
+ *
+ * It is about 128 bytes on x86_64 and 512 bytes on s390x, but allocate more to
+ * account for possible layout changes, different architectures, etc.
+ * The kernel will wrap up to PAGE_SIZE internally anyway.
+ */
+#define SIZEOF_BPF_LOCAL_STORAGE_ELEM 768
- __u64 prev_ts;
+/* Try to estimate kernel's BPF_LOCAL_STORAGE_MAX_VALUE_SIZE: */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE (0xFFFF - \
+ SIZEOF_BPF_LOCAL_STORAGE_ELEM)
- __u64 prev_packets;
- __u64 prev_bytes;
+#define PCPU_MIN_UNIT_SIZE 32768
+
+union percpu_net_cnt {
+ struct {
+ __u64 packets;
+ __u64 bytes;
+
+ __u64 prev_ts;
+
+ __u64 prev_packets;
+ __u64 prev_bytes;
+ };
+ __u8 data[PCPU_MIN_UNIT_SIZE];
};
-struct net_cnt {
- __u64 packets;
- __u64 bytes;
+union net_cnt {
+ struct {
+ __u64 packets;
+ __u64 bytes;
+ };
+ __u8 data[BPF_LOCAL_STORAGE_MAX_VALUE_SIZE];
};
#endif
diff --git a/tools/testing/selftests/bpf/netlink_helpers.c b/tools/testing/selftests/bpf/netlink_helpers.c
new file mode 100644
index 000000000000..caf36eb1d032
--- /dev/null
+++ b/tools/testing/selftests/bpf/netlink_helpers.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Taken & modified from iproute2's libnetlink.c
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/socket.h>
+
+#include "netlink_helpers.h"
+
+static int rcvbuf = 1024 * 1024;
+
+void rtnl_close(struct rtnl_handle *rth)
+{
+ if (rth->fd >= 0) {
+ close(rth->fd);
+ rth->fd = -1;
+ }
+}
+
+int rtnl_open_byproto(struct rtnl_handle *rth, unsigned int subscriptions,
+ int protocol)
+{
+ socklen_t addr_len;
+ int sndbuf = 32768;
+ int one = 1;
+
+ memset(rth, 0, sizeof(*rth));
+ rth->proto = protocol;
+ rth->fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
+ if (rth->fd < 0) {
+ perror("Cannot open netlink socket");
+ return -1;
+ }
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf, sizeof(sndbuf)) < 0) {
+ perror("SO_SNDBUF");
+ goto err;
+ }
+ if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF,
+ &rcvbuf, sizeof(rcvbuf)) < 0) {
+ perror("SO_RCVBUF");
+ goto err;
+ }
+
+ /* Older kernels may no support extended ACK reporting */
+ setsockopt(rth->fd, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one));
+
+ memset(&rth->local, 0, sizeof(rth->local));
+ rth->local.nl_family = AF_NETLINK;
+ rth->local.nl_groups = subscriptions;
+
+ if (bind(rth->fd, (struct sockaddr *)&rth->local,
+ sizeof(rth->local)) < 0) {
+ perror("Cannot bind netlink socket");
+ goto err;
+ }
+ addr_len = sizeof(rth->local);
+ if (getsockname(rth->fd, (struct sockaddr *)&rth->local,
+ &addr_len) < 0) {
+ perror("Cannot getsockname");
+ goto err;
+ }
+ if (addr_len != sizeof(rth->local)) {
+ fprintf(stderr, "Wrong address length %d\n", addr_len);
+ goto err;
+ }
+ if (rth->local.nl_family != AF_NETLINK) {
+ fprintf(stderr, "Wrong address family %d\n",
+ rth->local.nl_family);
+ goto err;
+ }
+ rth->seq = time(NULL);
+ return 0;
+err:
+ rtnl_close(rth);
+ return -1;
+}
+
+int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions)
+{
+ return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE);
+}
+
+static int __rtnl_recvmsg(int fd, struct msghdr *msg, int flags)
+{
+ int len;
+
+ do {
+ len = recvmsg(fd, msg, flags);
+ } while (len < 0 && (errno == EINTR || errno == EAGAIN));
+ if (len < 0) {
+ fprintf(stderr, "netlink receive error %s (%d)\n",
+ strerror(errno), errno);
+ return -errno;
+ }
+ if (len == 0) {
+ fprintf(stderr, "EOF on netlink\n");
+ return -ENODATA;
+ }
+ return len;
+}
+
+static int rtnl_recvmsg(int fd, struct msghdr *msg, char **answer)
+{
+ struct iovec *iov = msg->msg_iov;
+ char *buf;
+ int len;
+
+ iov->iov_base = NULL;
+ iov->iov_len = 0;
+
+ len = __rtnl_recvmsg(fd, msg, MSG_PEEK | MSG_TRUNC);
+ if (len < 0)
+ return len;
+ if (len < 32768)
+ len = 32768;
+ buf = malloc(len);
+ if (!buf) {
+ fprintf(stderr, "malloc error: not enough buffer\n");
+ return -ENOMEM;
+ }
+ iov->iov_base = buf;
+ iov->iov_len = len;
+ len = __rtnl_recvmsg(fd, msg, 0);
+ if (len < 0) {
+ free(buf);
+ return len;
+ }
+ if (answer)
+ *answer = buf;
+ else
+ free(buf);
+ return len;
+}
+
+static void rtnl_talk_error(struct nlmsghdr *h, struct nlmsgerr *err,
+ nl_ext_ack_fn_t errfn)
+{
+ fprintf(stderr, "RTNETLINK answers: %s\n",
+ strerror(-err->error));
+}
+
+static int __rtnl_talk_iov(struct rtnl_handle *rtnl, struct iovec *iov,
+ size_t iovlen, struct nlmsghdr **answer,
+ bool show_rtnl_err, nl_ext_ack_fn_t errfn)
+{
+ struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
+ struct iovec riov;
+ struct msghdr msg = {
+ .msg_name = &nladdr,
+ .msg_namelen = sizeof(nladdr),
+ .msg_iov = iov,
+ .msg_iovlen = iovlen,
+ };
+ unsigned int seq = 0;
+ struct nlmsghdr *h;
+ int i, status;
+ char *buf;
+
+ for (i = 0; i < iovlen; i++) {
+ h = iov[i].iov_base;
+ h->nlmsg_seq = seq = ++rtnl->seq;
+ if (answer == NULL)
+ h->nlmsg_flags |= NLM_F_ACK;
+ }
+ status = sendmsg(rtnl->fd, &msg, 0);
+ if (status < 0) {
+ perror("Cannot talk to rtnetlink");
+ return -1;
+ }
+ /* change msg to use the response iov */
+ msg.msg_iov = &riov;
+ msg.msg_iovlen = 1;
+ i = 0;
+ while (1) {
+next:
+ status = rtnl_recvmsg(rtnl->fd, &msg, &buf);
+ ++i;
+ if (status < 0)
+ return status;
+ if (msg.msg_namelen != sizeof(nladdr)) {
+ fprintf(stderr,
+ "Sender address length == %d!\n",
+ msg.msg_namelen);
+ exit(1);
+ }
+ for (h = (struct nlmsghdr *)buf; status >= sizeof(*h); ) {
+ int len = h->nlmsg_len;
+ int l = len - sizeof(*h);
+
+ if (l < 0 || len > status) {
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Truncated message!\n");
+ free(buf);
+ return -1;
+ }
+ fprintf(stderr,
+ "Malformed message: len=%d!\n",
+ len);
+ exit(1);
+ }
+ if (nladdr.nl_pid != 0 ||
+ h->nlmsg_pid != rtnl->local.nl_pid ||
+ h->nlmsg_seq > seq || h->nlmsg_seq < seq - iovlen) {
+ /* Don't forget to skip that message. */
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ continue;
+ }
+ if (h->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h);
+ int error = err->error;
+
+ if (l < sizeof(struct nlmsgerr)) {
+ fprintf(stderr, "ERROR truncated\n");
+ free(buf);
+ return -1;
+ }
+ if (error) {
+ errno = -error;
+ if (rtnl->proto != NETLINK_SOCK_DIAG &&
+ show_rtnl_err)
+ rtnl_talk_error(h, err, errfn);
+ }
+ if (i < iovlen) {
+ free(buf);
+ goto next;
+ }
+ if (error) {
+ free(buf);
+ return -i;
+ }
+ if (answer)
+ *answer = (struct nlmsghdr *)buf;
+ else
+ free(buf);
+ return 0;
+ }
+ if (answer) {
+ *answer = (struct nlmsghdr *)buf;
+ return 0;
+ }
+ fprintf(stderr, "Unexpected reply!\n");
+ status -= NLMSG_ALIGN(len);
+ h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len));
+ }
+ free(buf);
+ if (msg.msg_flags & MSG_TRUNC) {
+ fprintf(stderr, "Message truncated!\n");
+ continue;
+ }
+ if (status) {
+ fprintf(stderr, "Remnant of size %d!\n", status);
+ exit(1);
+ }
+ }
+}
+
+static int __rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer, bool show_rtnl_err,
+ nl_ext_ack_fn_t errfn)
+{
+ struct iovec iov = {
+ .iov_base = n,
+ .iov_len = n->nlmsg_len,
+ };
+
+ return __rtnl_talk_iov(rtnl, &iov, 1, answer, show_rtnl_err, errfn);
+}
+
+int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer)
+{
+ return __rtnl_talk(rtnl, n, answer, true, NULL);
+}
+
+int addattr(struct nlmsghdr *n, int maxlen, int type)
+{
+ return addattr_l(n, maxlen, type, NULL, 0);
+}
+
+int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u8));
+}
+
+int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u16));
+}
+
+int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u32));
+}
+
+int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data)
+{
+ return addattr_l(n, maxlen, type, &data, sizeof(__u64));
+}
+
+int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *str)
+{
+ return addattr_l(n, maxlen, type, str, strlen(str)+1);
+}
+
+int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data,
+ int alen)
+{
+ int len = RTA_LENGTH(alen);
+ struct rtattr *rta;
+
+ if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) {
+ fprintf(stderr, "%s: Message exceeded bound of %d\n",
+ __func__, maxlen);
+ return -1;
+ }
+ rta = NLMSG_TAIL(n);
+ rta->rta_type = type;
+ rta->rta_len = len;
+ if (alen)
+ memcpy(RTA_DATA(rta), data, alen);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len);
+ return 0;
+}
+
+int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len)
+{
+ if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) {
+ fprintf(stderr, "%s: Message exceeded bound of %d\n",
+ __func__, maxlen);
+ return -1;
+ }
+
+ memcpy(NLMSG_TAIL(n), data, len);
+ memset((void *) NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len);
+ return 0;
+}
+
+struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type)
+{
+ struct rtattr *nest = NLMSG_TAIL(n);
+
+ addattr_l(n, maxlen, type, NULL, 0);
+ return nest;
+}
+
+int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest)
+{
+ nest->rta_len = (void *)NLMSG_TAIL(n) - (void *)nest;
+ return n->nlmsg_len;
+}
diff --git a/tools/testing/selftests/bpf/netlink_helpers.h b/tools/testing/selftests/bpf/netlink_helpers.h
new file mode 100644
index 000000000000..68116818a47e
--- /dev/null
+++ b/tools/testing/selftests/bpf/netlink_helpers.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef NETLINK_HELPERS_H
+#define NETLINK_HELPERS_H
+
+#include <string.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
+struct rtnl_handle {
+ int fd;
+ struct sockaddr_nl local;
+ struct sockaddr_nl peer;
+ __u32 seq;
+ __u32 dump;
+ int proto;
+ FILE *dump_fp;
+#define RTNL_HANDLE_F_LISTEN_ALL_NSID 0x01
+#define RTNL_HANDLE_F_SUPPRESS_NLERR 0x02
+#define RTNL_HANDLE_F_STRICT_CHK 0x04
+ int flags;
+};
+
+#define NLMSG_TAIL(nmsg) \
+ ((struct rtattr *) (((void *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
+
+typedef int (*nl_ext_ack_fn_t)(const char *errmsg, uint32_t off,
+ const struct nlmsghdr *inner_nlh);
+
+int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions)
+ __attribute__((warn_unused_result));
+void rtnl_close(struct rtnl_handle *rth);
+int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n,
+ struct nlmsghdr **answer)
+ __attribute__((warn_unused_result));
+
+int addattr(struct nlmsghdr *n, int maxlen, int type);
+int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data);
+int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data);
+int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data);
+int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data);
+int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *data);
+int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen);
+int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len);
+struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type);
+int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest);
+#endif /* NETLINK_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 12ee40284da0..0a6a5561bed3 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -1,18 +1,44 @@
// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
+#include <sched.h>
#include <arpa/inet.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/eventfd.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/limits.h>
+
+#include <linux/ip.h>
+#include <netinet/udp.h>
+#include <netinet/tcp.h>
+#include <net/if.h>
#include "bpf_util.h"
#include "network_helpers.h"
+#include "test_progs.h"
+
+#ifdef TRAFFIC_MONITOR
+/* Prevent pcap.h from including pcap/bpf.h and causing conflicts */
+#define PCAP_DONT_INCLUDE_PCAP_BPF_H 1
+#include <pcap/pcap.h>
+#include <pcap/dlt.h>
+#endif
+
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) ({ \
@@ -40,7 +66,9 @@ struct ipv6_packet pkt_v6 = {
.tcp.doff = 5,
};
-static int settimeo(int fd, int timeout_ms)
+static const struct network_helper_opts default_opts;
+
+int settimeo(int fd, int timeout_ms)
{
struct timeval timeout = { .tv_sec = 3 };
@@ -66,32 +94,42 @@ static int settimeo(int fd, int timeout_ms)
#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
-int start_server(int family, int type, const char *addr_str, __u16 port,
- int timeout_ms)
+int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
+ const struct network_helper_opts *opts)
{
- struct sockaddr_storage addr = {};
- socklen_t len;
- int fd;
+ int on = 1, fd;
- if (make_sockaddr(family, addr_str, port, &addr, &len))
- return -1;
+ if (!opts)
+ opts = &default_opts;
- fd = socket(family, type, 0);
+ fd = socket(addr->ss_family, type, opts->proto);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
}
- if (settimeo(fd, timeout_ms))
+ if (settimeo(fd, opts->timeout_ms))
+ goto error_close;
+
+ if (type == SOCK_STREAM &&
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) {
+ log_err("Failed to enable SO_REUSEADDR");
goto error_close;
+ }
+
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts)) {
+ log_err("Failed to call post_socket_cb");
+ goto error_close;
+ }
- if (bind(fd, (const struct sockaddr *)&addr, len) < 0) {
+ if (bind(fd, (struct sockaddr *)addr, addrlen) < 0) {
log_err("Failed to bind socket");
goto error_close;
}
if (type == SOCK_STREAM) {
- if (listen(fd, 1) < 0) {
+ if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}
@@ -104,6 +142,90 @@ error_close:
return -1;
}
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+
+ if (!opts)
+ opts = &default_opts;
+
+ if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
+ return -1;
+
+ return start_server_addr(type, &addr, addrlen, opts);
+}
+
+int start_server(int family, int type, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ };
+
+ return start_server_str(family, type, addr_str, port, &opts);
+}
+
+static int reuseport_cb(int fd, void *opts)
+{
+ int on = 1;
+
+ return setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on));
+}
+
+int *start_reuseport_server(int family, int type, const char *addr_str,
+ __u16 port, int timeout_ms, unsigned int nr_listens)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ .post_socket_cb = reuseport_cb,
+ };
+ struct sockaddr_storage addr;
+ unsigned int nr_fds = 0;
+ socklen_t addrlen;
+ int *fds;
+
+ if (!nr_listens)
+ return NULL;
+
+ if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
+ return NULL;
+
+ fds = malloc(sizeof(*fds) * nr_listens);
+ if (!fds)
+ return NULL;
+
+ fds[0] = start_server_addr(type, &addr, addrlen, &opts);
+ if (fds[0] == -1)
+ goto close_fds;
+ nr_fds = 1;
+
+ if (getsockname(fds[0], (struct sockaddr *)&addr, &addrlen))
+ goto close_fds;
+
+ for (; nr_fds < nr_listens; nr_fds++) {
+ fds[nr_fds] = start_server_addr(type, &addr, addrlen, &opts);
+ if (fds[nr_fds] == -1)
+ goto close_fds;
+ }
+
+ return fds;
+
+close_fds:
+ free_fds(fds, nr_fds);
+ return NULL;
+}
+
+void free_fds(int *fds, unsigned int nr_close_fds)
+{
+ if (fds) {
+ while (nr_close_fds)
+ close(fds[--nr_close_fds]);
+ free(fds);
+ }
+}
+
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms)
{
@@ -141,24 +263,80 @@ error_close:
return -1;
}
-static int connect_fd_to_addr(int fd,
- const struct sockaddr_storage *addr,
- socklen_t addrlen)
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts)
+{
+ int fd;
+
+ if (!opts)
+ opts = &default_opts;
+
+ fd = socket(family, type, opts->proto);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
+ if (settimeo(fd, opts->timeout_ms))
+ goto error_close;
+
+ if (opts->post_socket_cb &&
+ opts->post_socket_cb(fd, opts->cb_opts))
+ goto error_close;
+
+ return fd;
+
+error_close:
+ save_errno_close(fd);
+ return -1;
+}
+
+int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
+ const struct network_helper_opts *opts)
{
+ int fd;
+
+ if (!opts)
+ opts = &default_opts;
+
+ fd = client_socket(addr->ss_family, type, opts);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
log_err("Failed to connect to server");
+ save_errno_close(fd);
return -1;
}
- return 0;
+ return fd;
}
-int connect_to_fd(int server_fd, int timeout_ms)
+int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+
+ if (!opts)
+ opts = &default_opts;
+
+ if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
+ return -1;
+
+ return connect_to_addr(type, &addr, addrlen, opts);
+}
+
+int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
{
struct sockaddr_storage addr;
- struct sockaddr_in *addr_in;
socklen_t addrlen, optlen;
- int fd, type;
+ int type;
+
+ if (!opts)
+ opts = &default_opts;
optlen = sizeof(type);
if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
@@ -172,24 +350,25 @@ int connect_to_fd(int server_fd, int timeout_ms)
return -1;
}
- addr_in = (struct sockaddr_in *)&addr;
- fd = socket(addr_in->sin_family, type, 0);
- if (fd < 0) {
- log_err("Failed to create client socket");
- return -1;
- }
-
- if (settimeo(fd, timeout_ms))
- goto error_close;
+ return connect_to_addr(type, &addr, addrlen, opts);
+}
- if (connect_fd_to_addr(fd, &addr, addrlen))
- goto error_close;
+int connect_to_fd(int server_fd, int timeout_ms)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ };
+ socklen_t optlen;
+ int protocol;
- return fd;
+ optlen = sizeof(protocol);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
+ log_err("getsockopt(SOL_PROTOCOL)");
+ return -1;
+ }
+ opts.proto = protocol;
-error_close:
- save_errno_close(fd);
- return -1;
+ return connect_to_fd_opts(server_fd, &opts);
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
@@ -205,8 +384,10 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
return -1;
}
- if (connect_fd_to_addr(client_fd, &addr, len))
+ if (connect(client_fd, (const struct sockaddr *)&addr, len)) {
+ log_err("Failed to connect to server");
return -1;
+ }
return 0;
}
@@ -217,6 +398,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
if (family == AF_INET) {
struct sockaddr_in *sin = (void *)addr;
+ memset(addr, 0, sizeof(*sin));
sin->sin_family = AF_INET;
sin->sin_port = htons(port);
if (addr_str &&
@@ -230,6 +412,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
} else if (family == AF_INET6) {
struct sockaddr_in6 *sin6 = (void *)addr;
+ memset(addr, 0, sizeof(*sin6));
sin6->sin6_family = AF_INET6;
sin6->sin6_port = htons(port);
if (addr_str &&
@@ -240,6 +423,859 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
if (len)
*len = sizeof(*sin6);
return 0;
+ } else if (family == AF_UNIX) {
+ /* Note that we always use abstract unix sockets to avoid having
+ * to clean up leftover files.
+ */
+ struct sockaddr_un *sun = (void *)addr;
+
+ memset(addr, 0, sizeof(*sun));
+ sun->sun_family = family;
+ sun->sun_path[0] = 0;
+ strcpy(sun->sun_path + 1, addr_str);
+ if (len)
+ *len = offsetof(struct sockaddr_un, sun_path) + 1 + strlen(addr_str);
+ return 0;
}
return -1;
}
+
+char *ping_command(int family)
+{
+ if (family == AF_INET6) {
+ /* On some systems 'ping' doesn't support IPv6, so use ping6 if it is present. */
+ if (!system("which ping6 >/dev/null 2>&1"))
+ return "ping6";
+ else
+ return "ping -6";
+ }
+ return "ping";
+}
+
+int append_tid(char *str, size_t sz)
+{
+ size_t end;
+
+ if (!str)
+ return -1;
+
+ end = strlen(str);
+ if (end + 8 > sz)
+ return -1;
+
+ sprintf(&str[end], "%07ld", sys_gettid());
+ str[end + 7] = '\0';
+
+ return 0;
+}
+
+int remove_netns(const char *name)
+{
+ char *cmd;
+ int r;
+
+ r = asprintf(&cmd, "ip netns del %s >/dev/null 2>&1", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd");
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+ return r;
+}
+
+int make_netns(const char *name)
+{
+ char *cmd;
+ int r;
+
+ r = asprintf(&cmd, "ip netns add %s", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd");
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+
+ if (r)
+ return r;
+
+ r = asprintf(&cmd, "ip -n %s link set lo up", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd for setting up lo");
+ remove_netns(name);
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+
+ return r;
+}
+
+struct nstoken {
+ int orig_netns_fd;
+};
+
+struct nstoken *open_netns(const char *name)
+{
+ int nsfd;
+ char nspath[PATH_MAX];
+ int err;
+ struct nstoken *token;
+
+ token = calloc(1, sizeof(struct nstoken));
+ if (!token) {
+ log_err("Failed to malloc token");
+ return NULL;
+ }
+
+ token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (token->orig_netns_fd == -1) {
+ log_err("Failed to open(/proc/self/ns/net)");
+ goto fail;
+ }
+
+ snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+ nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+ if (nsfd == -1) {
+ log_err("Failed to open(%s)", nspath);
+ goto fail;
+ }
+
+ err = setns(nsfd, CLONE_NEWNET);
+ close(nsfd);
+ if (err) {
+ log_err("Failed to setns(nsfd)");
+ goto fail;
+ }
+
+ return token;
+fail:
+ if (token->orig_netns_fd != -1)
+ close(token->orig_netns_fd);
+ free(token);
+ return NULL;
+}
+
+void close_netns(struct nstoken *token)
+{
+ if (!token)
+ return;
+
+ if (setns(token->orig_netns_fd, CLONE_NEWNET))
+ log_err("Failed to setns(orig_netns_fd)");
+ close(token->orig_netns_fd);
+ free(token);
+}
+
+int open_tuntap(const char *dev_name, bool need_mac)
+{
+ int err = 0;
+ struct ifreq ifr;
+ int fd = open("/dev/net/tun", O_RDWR);
+
+ if (!ASSERT_GE(fd, 0, "open(/dev/net/tun)"))
+ return -1;
+
+ ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN);
+ strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1);
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
+
+ err = ioctl(fd, TUNSETIFF, &ifr);
+ if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {
+ close(fd);
+ return -1;
+ }
+
+ err = fcntl(fd, F_SETFL, O_NONBLOCK);
+ if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+int get_socket_local_port(int sock_fd)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen = sizeof(addr);
+ int err;
+
+ err = getsockname(sock_fd, (struct sockaddr *)&addr, &addrlen);
+ if (err < 0)
+ return err;
+
+ if (addr.ss_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&addr;
+
+ return sin->sin_port;
+ } else if (addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&addr;
+
+ return sin->sin6_port;
+ }
+
+ return -1;
+}
+
+int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param)
+{
+ struct ifreq ifr = {0};
+ int sockfd, err;
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sockfd < 0)
+ return -errno;
+
+ memcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ ring_param->cmd = ETHTOOL_GRINGPARAM;
+ ifr.ifr_data = (char *)ring_param;
+
+ if (ioctl(sockfd, SIOCETHTOOL, &ifr) < 0) {
+ err = errno;
+ close(sockfd);
+ return -err;
+ }
+
+ close(sockfd);
+ return 0;
+}
+
+int set_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param)
+{
+ struct ifreq ifr = {0};
+ int sockfd, err;
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sockfd < 0)
+ return -errno;
+
+ memcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ ring_param->cmd = ETHTOOL_SRINGPARAM;
+ ifr.ifr_data = (char *)ring_param;
+
+ if (ioctl(sockfd, SIOCETHTOOL, &ifr) < 0) {
+ err = errno;
+ close(sockfd);
+ return -err;
+ }
+
+ close(sockfd);
+ return 0;
+}
+
+struct send_recv_arg {
+ int fd;
+ uint32_t bytes;
+ int stop;
+};
+
+static void *send_recv_server(void *arg)
+{
+ struct send_recv_arg *a = (struct send_recv_arg *)arg;
+ ssize_t nr_sent = 0, bytes = 0;
+ char batch[1500];
+ int err = 0, fd;
+
+ fd = accept(a->fd, NULL, NULL);
+ while (fd == -1) {
+ if (errno == EINTR)
+ continue;
+ err = -errno;
+ goto done;
+ }
+
+ if (settimeo(fd, 0)) {
+ err = -errno;
+ goto done;
+ }
+
+ while (bytes < a->bytes && !READ_ONCE(a->stop)) {
+ nr_sent = send(fd, &batch,
+ MIN(a->bytes - bytes, sizeof(batch)), 0);
+ if (nr_sent == -1 && errno == EINTR)
+ continue;
+ if (nr_sent == -1) {
+ err = -errno;
+ break;
+ }
+ bytes += nr_sent;
+ }
+
+ if (bytes != a->bytes) {
+ log_err("send %zd expected %u", bytes, a->bytes);
+ if (!err)
+ err = bytes > a->bytes ? -E2BIG : -EINTR;
+ }
+
+done:
+ if (fd >= 0)
+ close(fd);
+ if (err) {
+ WRITE_ONCE(a->stop, 1);
+ return ERR_PTR(err);
+ }
+ return NULL;
+}
+
+int send_recv_data(int lfd, int fd, uint32_t total_bytes)
+{
+ ssize_t nr_recv = 0, bytes = 0;
+ struct send_recv_arg arg = {
+ .fd = lfd,
+ .bytes = total_bytes,
+ .stop = 0,
+ };
+ pthread_t srv_thread;
+ void *thread_ret;
+ char batch[1500];
+ int err = 0;
+
+ err = pthread_create(&srv_thread, NULL, send_recv_server, (void *)&arg);
+ if (err) {
+ log_err("Failed to pthread_create");
+ return err;
+ }
+
+ /* recv total_bytes */
+ while (bytes < total_bytes && !READ_ONCE(arg.stop)) {
+ nr_recv = recv(fd, &batch,
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
+ if (nr_recv == -1 && errno == EINTR)
+ continue;
+ if (nr_recv == -1) {
+ err = -errno;
+ break;
+ }
+ bytes += nr_recv;
+ }
+
+ if (bytes != total_bytes) {
+ log_err("recv %zd expected %u", bytes, total_bytes);
+ if (!err)
+ err = bytes > total_bytes ? -E2BIG : -EINTR;
+ }
+
+ WRITE_ONCE(arg.stop, 1);
+ pthread_join(srv_thread, &thread_ret);
+ if (IS_ERR(thread_ret)) {
+ log_err("Failed in thread_ret %ld", PTR_ERR(thread_ret));
+ err = err ? : PTR_ERR(thread_ret);
+ }
+
+ return err;
+}
+
+int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd)
+{
+ int ifindex, ret;
+
+ if (!ASSERT_TRUE(ingress_fd >= 0 || egress_fd >= 0,
+ "at least one program fd is valid"))
+ return -1;
+
+ ifindex = if_nametoindex(dev);
+ if (!ASSERT_NEQ(ifindex, 0, "get ifindex"))
+ return -1;
+
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex,
+ .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1,
+ .priority = 1, .prog_fd = ingress_fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1,
+ .priority = 1, .prog_fd = egress_fd);
+
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ if (ingress_fd >= 0) {
+ hook.attach_point = BPF_TC_INGRESS;
+ ret = bpf_tc_attach(&hook, &opts1);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ }
+
+ if (egress_fd >= 0) {
+ hook.attach_point = BPF_TC_EGRESS;
+ ret = bpf_tc_attach(&hook, &opts2);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef TRAFFIC_MONITOR
+struct tmonitor_ctx {
+ pcap_t *pcap;
+ pcap_dumper_t *dumper;
+ pthread_t thread;
+ int wake_fd;
+
+ volatile bool done;
+ char pkt_fname[PATH_MAX];
+ int pcap_fd;
+};
+
+static int __base_pr(const char *format, va_list args)
+{
+ return vfprintf(stdout, format, args);
+}
+
+static tm_print_fn_t __tm_pr = __base_pr;
+
+tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn)
+{
+ tm_print_fn_t old_print_fn;
+
+ old_print_fn = __atomic_exchange_n(&__tm_pr, fn, __ATOMIC_RELAXED);
+
+ return old_print_fn;
+}
+
+void tm_print(const char *format, ...)
+{
+ tm_print_fn_t print_fn;
+ va_list args;
+
+ print_fn = __atomic_load_n(&__tm_pr, __ATOMIC_RELAXED);
+ if (!print_fn)
+ return;
+
+ va_start(args, format);
+ print_fn(format, args);
+ va_end(args);
+}
+
+/* Is this packet captured with a Ethernet protocol type? */
+static bool is_ethernet(const u_char *packet)
+{
+ u16 arphdr_type;
+
+ memcpy(&arphdr_type, packet + 8, 2);
+ arphdr_type = ntohs(arphdr_type);
+
+ /* Except the following cases, the protocol type contains the
+ * Ethernet protocol type for the packet.
+ *
+ * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html
+ */
+ switch (arphdr_type) {
+ case 770: /* ARPHRD_FRAD */
+ case 778: /* ARPHDR_IPGRE */
+ case 803: /* ARPHRD_IEEE80211_RADIOTAP */
+ tm_print("Packet captured: arphdr_type=%d\n", arphdr_type);
+ return false;
+ }
+ return true;
+}
+
+static const char * const pkt_types[] = {
+ "In",
+ "B", /* Broadcast */
+ "M", /* Multicast */
+ "C", /* Captured with the promiscuous mode */
+ "Out",
+};
+
+static const char *pkt_type_str(u16 pkt_type)
+{
+ if (pkt_type < ARRAY_SIZE(pkt_types))
+ return pkt_types[pkt_type];
+ return "Unknown";
+}
+
+#define MAX_FLAGS_STRLEN 21
+/* Show the information of the transport layer in the packet */
+static void show_transport(const u_char *packet, u16 len, u32 ifindex,
+ const char *src_addr, const char *dst_addr,
+ u16 proto, bool ipv6, u8 pkt_type)
+{
+ char *ifname, _ifname[IF_NAMESIZE], flags[MAX_FLAGS_STRLEN] = "";
+ const char *transport_str;
+ u16 src_port, dst_port;
+ struct udphdr *udp;
+ struct tcphdr *tcp;
+
+ ifname = if_indextoname(ifindex, _ifname);
+ if (!ifname) {
+ snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex);
+ ifname = _ifname;
+ }
+
+ if (proto == IPPROTO_UDP) {
+ udp = (struct udphdr *)packet;
+ src_port = ntohs(udp->source);
+ dst_port = ntohs(udp->dest);
+ transport_str = "UDP";
+ } else if (proto == IPPROTO_TCP) {
+ tcp = (struct tcphdr *)packet;
+ src_port = ntohs(tcp->source);
+ dst_port = ntohs(tcp->dest);
+ transport_str = "TCP";
+ } else if (proto == IPPROTO_ICMP) {
+ tm_print("%-7s %-3s IPv4 %s > %s: ICMP, length %d, type %d, code %d\n",
+ ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len,
+ packet[0], packet[1]);
+ return;
+ } else if (proto == IPPROTO_ICMPV6) {
+ tm_print("%-7s %-3s IPv6 %s > %s: ICMPv6, length %d, type %d, code %d\n",
+ ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len,
+ packet[0], packet[1]);
+ return;
+ } else {
+ tm_print("%-7s %-3s %s %s > %s: protocol %d\n",
+ ifname, pkt_type_str(pkt_type), ipv6 ? "IPv6" : "IPv4",
+ src_addr, dst_addr, proto);
+ return;
+ }
+
+ /* TCP or UDP*/
+
+ if (proto == IPPROTO_TCP)
+ snprintf(flags, MAX_FLAGS_STRLEN, "%s%s%s%s",
+ tcp->fin ? ", FIN" : "",
+ tcp->syn ? ", SYN" : "",
+ tcp->rst ? ", RST" : "",
+ tcp->ack ? ", ACK" : "");
+
+ if (ipv6)
+ tm_print("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d%s\n",
+ ifname, pkt_type_str(pkt_type), src_addr, src_port,
+ dst_addr, dst_port, transport_str, len, flags);
+ else
+ tm_print("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d%s\n",
+ ifname, pkt_type_str(pkt_type), src_addr, src_port,
+ dst_addr, dst_port, transport_str, len, flags);
+}
+
+static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
+{
+ char src_buf[INET6_ADDRSTRLEN], dst_buf[INET6_ADDRSTRLEN];
+ struct ipv6hdr *pkt = (struct ipv6hdr *)packet;
+ const char *src, *dst;
+ u_char proto;
+
+ src = inet_ntop(AF_INET6, &pkt->saddr, src_buf, sizeof(src_buf));
+ if (!src)
+ src = "<invalid>";
+ dst = inet_ntop(AF_INET6, &pkt->daddr, dst_buf, sizeof(dst_buf));
+ if (!dst)
+ dst = "<invalid>";
+ proto = pkt->nexthdr;
+ show_transport(packet + sizeof(struct ipv6hdr),
+ ntohs(pkt->payload_len),
+ ifindex, src, dst, proto, true, pkt_type);
+}
+
+static void show_ipv4_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
+{
+ char src_buf[INET_ADDRSTRLEN], dst_buf[INET_ADDRSTRLEN];
+ struct iphdr *pkt = (struct iphdr *)packet;
+ const char *src, *dst;
+ u_char proto;
+
+ src = inet_ntop(AF_INET, &pkt->saddr, src_buf, sizeof(src_buf));
+ if (!src)
+ src = "<invalid>";
+ dst = inet_ntop(AF_INET, &pkt->daddr, dst_buf, sizeof(dst_buf));
+ if (!dst)
+ dst = "<invalid>";
+ proto = pkt->protocol;
+ show_transport(packet + sizeof(struct iphdr),
+ ntohs(pkt->tot_len),
+ ifindex, src, dst, proto, false, pkt_type);
+}
+
+static void *traffic_monitor_thread(void *arg)
+{
+ char *ifname, _ifname[IF_NAMESIZE];
+ const u_char *packet, *payload;
+ struct tmonitor_ctx *ctx = arg;
+ pcap_dumper_t *dumper = ctx->dumper;
+ int fd = ctx->pcap_fd, nfds, r;
+ int wake_fd = ctx->wake_fd;
+ struct pcap_pkthdr header;
+ pcap_t *pcap = ctx->pcap;
+ u32 ifindex;
+ fd_set fds;
+ u16 proto;
+ u8 ptype;
+
+ nfds = (fd > wake_fd ? fd : wake_fd) + 1;
+ FD_ZERO(&fds);
+
+ while (!ctx->done) {
+ FD_SET(fd, &fds);
+ FD_SET(wake_fd, &fds);
+ r = select(nfds, &fds, NULL, NULL, NULL);
+ if (!r)
+ continue;
+ if (r < 0) {
+ if (errno == EINTR)
+ continue;
+ log_err("Fail to select on pcap fd and wake fd");
+ break;
+ }
+
+ /* This instance of pcap is non-blocking */
+ packet = pcap_next(pcap, &header);
+ if (!packet)
+ continue;
+
+ /* According to the man page of pcap_dump(), first argument
+ * is the pcap_dumper_t pointer even it's argument type is
+ * u_char *.
+ */
+ pcap_dump((u_char *)dumper, &header, packet);
+
+ /* Not sure what other types of packets look like. Here, we
+ * parse only Ethernet and compatible packets.
+ */
+ if (!is_ethernet(packet))
+ continue;
+
+ /* Skip SLL2 header
+ * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html
+ *
+ * Although the document doesn't mention that, the payload
+ * doesn't include the Ethernet header. The payload starts
+ * from the first byte of the network layer header.
+ */
+ payload = packet + 20;
+
+ memcpy(&proto, packet, 2);
+ proto = ntohs(proto);
+ memcpy(&ifindex, packet + 4, 4);
+ ifindex = ntohl(ifindex);
+ ptype = packet[10];
+
+ if (proto == ETH_P_IPV6) {
+ show_ipv6_packet(payload, ifindex, ptype);
+ } else if (proto == ETH_P_IP) {
+ show_ipv4_packet(payload, ifindex, ptype);
+ } else {
+ ifname = if_indextoname(ifindex, _ifname);
+ if (!ifname) {
+ snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex);
+ ifname = _ifname;
+ }
+
+ tm_print("%-7s %-3s Unknown network protocol type 0x%x\n",
+ ifname, pkt_type_str(ptype), proto);
+ }
+ }
+
+ return NULL;
+}
+
+/* Prepare the pcap handle to capture packets.
+ *
+ * This pcap is non-blocking and immediate mode is enabled to receive
+ * captured packets as soon as possible. The snaplen is set to 1024 bytes
+ * to limit the size of captured content. The format of the link-layer
+ * header is set to DLT_LINUX_SLL2 to enable handling various link-layer
+ * technologies.
+ */
+static pcap_t *traffic_monitor_prepare_pcap(void)
+{
+ char errbuf[PCAP_ERRBUF_SIZE];
+ pcap_t *pcap;
+ int r;
+
+ /* Listen on all NICs in the namespace */
+ pcap = pcap_create("any", errbuf);
+ if (!pcap) {
+ log_err("Failed to open pcap: %s", errbuf);
+ return NULL;
+ }
+ /* Limit the size of the packet (first N bytes) */
+ r = pcap_set_snaplen(pcap, 1024);
+ if (r) {
+ log_err("Failed to set snaplen: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ /* To receive packets as fast as possible */
+ r = pcap_set_immediate_mode(pcap, 1);
+ if (r) {
+ log_err("Failed to set immediate mode: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ r = pcap_setnonblock(pcap, 1, errbuf);
+ if (r) {
+ log_err("Failed to set nonblock: %s", errbuf);
+ goto error;
+ }
+ r = pcap_activate(pcap);
+ if (r) {
+ log_err("Failed to activate pcap: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ /* Determine the format of the link-layer header */
+ r = pcap_set_datalink(pcap, DLT_LINUX_SLL2);
+ if (r) {
+ log_err("Failed to set datalink: %s", pcap_geterr(pcap));
+ goto error;
+ }
+
+ return pcap;
+error:
+ pcap_close(pcap);
+ return NULL;
+}
+
+static void encode_test_name(char *buf, size_t len, const char *test_name, const char *subtest_name)
+{
+ char *p;
+
+ if (subtest_name)
+ snprintf(buf, len, "%s__%s", test_name, subtest_name);
+ else
+ snprintf(buf, len, "%s", test_name);
+ while ((p = strchr(buf, '/')))
+ *p = '_';
+ while ((p = strchr(buf, ' ')))
+ *p = '_';
+}
+
+#define PCAP_DIR "/tmp/tmon_pcap"
+
+/* Start to monitor the network traffic in the given network namespace.
+ *
+ * netns: the name of the network namespace to monitor. If NULL, the
+ * current network namespace is monitored.
+ * test_name: the name of the running test.
+ * subtest_name: the name of the running subtest if there is. It should be
+ * NULL if it is not a subtest.
+ *
+ * This function will start a thread to capture packets going through NICs
+ * in the give network namespace.
+ */
+struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name)
+{
+ struct nstoken *nstoken = NULL;
+ struct tmonitor_ctx *ctx;
+ char test_name_buf[64];
+ static int tmon_seq;
+ int r;
+
+ if (netns) {
+ nstoken = open_netns(netns);
+ if (!nstoken)
+ return NULL;
+ }
+ ctx = malloc(sizeof(*ctx));
+ if (!ctx) {
+ log_err("Failed to malloc ctx");
+ goto fail_ctx;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+
+ encode_test_name(test_name_buf, sizeof(test_name_buf), test_name, subtest_name);
+ snprintf(ctx->pkt_fname, sizeof(ctx->pkt_fname),
+ PCAP_DIR "/packets-%d-%d-%s-%s.log", getpid(), tmon_seq++,
+ test_name_buf, netns ? netns : "unknown");
+
+ r = mkdir(PCAP_DIR, 0755);
+ if (r && errno != EEXIST) {
+ log_err("Failed to create " PCAP_DIR);
+ goto fail_pcap;
+ }
+
+ ctx->pcap = traffic_monitor_prepare_pcap();
+ if (!ctx->pcap)
+ goto fail_pcap;
+ ctx->pcap_fd = pcap_get_selectable_fd(ctx->pcap);
+ if (ctx->pcap_fd < 0) {
+ log_err("Failed to get pcap fd");
+ goto fail_dumper;
+ }
+
+ /* Create a packet file */
+ ctx->dumper = pcap_dump_open(ctx->pcap, ctx->pkt_fname);
+ if (!ctx->dumper) {
+ log_err("Failed to open pcap dump: %s", ctx->pkt_fname);
+ goto fail_dumper;
+ }
+
+ /* Create an eventfd to wake up the monitor thread */
+ ctx->wake_fd = eventfd(0, 0);
+ if (ctx->wake_fd < 0) {
+ log_err("Failed to create eventfd");
+ goto fail_eventfd;
+ }
+
+ r = pthread_create(&ctx->thread, NULL, traffic_monitor_thread, ctx);
+ if (r) {
+ log_err("Failed to create thread");
+ goto fail;
+ }
+
+ close_netns(nstoken);
+
+ return ctx;
+
+fail:
+ close(ctx->wake_fd);
+
+fail_eventfd:
+ pcap_dump_close(ctx->dumper);
+ unlink(ctx->pkt_fname);
+
+fail_dumper:
+ pcap_close(ctx->pcap);
+
+fail_pcap:
+ free(ctx);
+
+fail_ctx:
+ close_netns(nstoken);
+
+ return NULL;
+}
+
+static void traffic_monitor_release(struct tmonitor_ctx *ctx)
+{
+ pcap_close(ctx->pcap);
+ pcap_dump_close(ctx->dumper);
+
+ close(ctx->wake_fd);
+
+ free(ctx);
+}
+
+/* Stop the network traffic monitor.
+ *
+ * ctx: the context returned by traffic_monitor_start()
+ */
+void traffic_monitor_stop(struct tmonitor_ctx *ctx)
+{
+ __u64 w = 1;
+
+ if (!ctx)
+ return;
+
+ /* Stop the monitor thread */
+ ctx->done = true;
+ /* Wake up the background thread. */
+ write(ctx->wake_fd, &w, sizeof(w));
+ pthread_join(ctx->thread, NULL);
+
+ tm_print("Packet file: %s\n", strrchr(ctx->pkt_fname, '/') + 1);
+
+ traffic_monitor_release(ctx);
+}
+
+#endif /* TRAFFIC_MONITOR */
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 7205f8afdba1..79a010c88e11 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -1,22 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NETWORK_HELPERS_H
#define __NETWORK_HELPERS_H
+#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <linux/types.h>
typedef __u16 __sum16;
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/if_tun.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <linux/err.h>
#include <netinet/tcp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_endian.h>
+#include <net/if.h>
+#include <stdio.h>
#define MAGIC_VAL 0x1234
#define NUM_ITER 100000
#define VIP_NUM 5
#define MAGIC_BYTES 123
+struct network_helper_opts {
+ int timeout_ms;
+ int proto;
+ /* +ve: Passed to listen() as-is.
+ * 0: Default when the test does not set
+ * a particular value during the struct init.
+ * It is changed to 1 before passing to listen().
+ * Most tests only have one on-going connection.
+ * -ve: It is changed to 0 before passing to listen().
+ * It is useful to force syncookie without
+ * changing the "tcp_syncookies" sysctl from 1 to 2.
+ */
+ int backlog;
+ int (*post_socket_cb)(int fd, void *opts);
+ void *cb_opts;
+};
+
/* ipv4 test vector */
struct ipv4_packet {
struct ethhdr eth;
@@ -33,13 +58,239 @@ struct ipv6_packet {
} __packed;
extern struct ipv6_packet pkt_v6;
+int settimeo(int fd, int timeout_ms);
+int start_server_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
+int *start_reuseport_server(int family, int type, const char *addr_str,
+ __u16 port, int timeout_ms,
+ unsigned int nr_listens);
+int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
+ const struct network_helper_opts *opts);
+void free_fds(int *fds, unsigned int nr_close_fds);
+int client_socket(int family, int type,
+ const struct network_helper_opts *opts);
+int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
+ const struct network_helper_opts *opts);
+int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts);
int connect_to_fd(int server_fd, int timeout_ms);
+int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms);
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
+char *ping_command(int family);
+int get_socket_local_port(int sock_fd);
+int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param);
+int set_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param);
+
+int open_tuntap(const char *dev_name, bool need_mac);
+
+struct nstoken;
+/**
+ * open_netns() - Switch to specified network namespace by name.
+ *
+ * Returns token with which to restore the original namespace
+ * using close_netns().
+ */
+struct nstoken *open_netns(const char *name);
+void close_netns(struct nstoken *token);
+int send_recv_data(int lfd, int fd, uint32_t total_bytes);
+int make_netns(const char *name);
+int remove_netns(const char *name);
+
+/**
+ * append_tid() - Append thread ID to the given string.
+ *
+ * @str: string to extend
+ * @sz: string's size
+ *
+ * 8 characters are used to append the thread ID (7 digits + '\0')
+ *
+ * Returns -1 on errors, 0 otherwise
+ */
+int append_tid(char *str, size_t sz);
+
+static __u16 csum_fold(__u32 csum)
+{
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+
+ return (__u16)~csum;
+}
+
+static __wsum csum_partial(const void *buf, int len, __wsum sum)
+{
+ __u16 *p = (__u16 *)buf;
+ int num_u16 = len >> 1;
+ int i;
+
+ for (i = 0; i < num_u16; i++)
+ sum += p[i];
+
+ return sum;
+}
+
+static inline __sum16 build_ip_csum(struct iphdr *iph)
+{
+ __u32 sum = 0;
+ __u16 *p;
+
+ iph->check = 0;
+ p = (void *)iph;
+ sum = csum_partial(p, iph->ihl << 2, 0);
+
+ return csum_fold(sum);
+}
+
+/**
+ * csum_tcpudp_magic - compute IP pseudo-header checksum
+ *
+ * Compute the IPv4 pseudo header checksum. The helper can take a
+ * accumulated sum from the transport layer to accumulate it and directly
+ * return the transport layer
+ *
+ * @saddr: IP source address
+ * @daddr: IP dest address
+ * @len: IP data size
+ * @proto: transport layer protocol
+ * @csum: The accumulated partial sum to add to the computation
+ *
+ * Returns the folded sum
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __wsum csum)
+{
+ __u64 s = csum;
+
+ s += (__u32)saddr;
+ s += (__u32)daddr;
+ s += htons(proto + len);
+ s = (s & 0xffffffff) + (s >> 32);
+ s = (s & 0xffffffff) + (s >> 32);
+
+ return csum_fold((__u32)s);
+}
+
+/**
+ * csum_ipv6_magic - compute IPv6 pseudo-header checksum
+ *
+ * Compute the ipv6 pseudo header checksum. The helper can take a
+ * accumulated sum from the transport layer to accumulate it and directly
+ * return the transport layer
+ *
+ * @saddr: IPv6 source address
+ * @daddr: IPv6 dest address
+ * @len: IPv6 data size
+ * @proto: transport layer protocol
+ * @csum: The accumulated partial sum to add to the computation
+ *
+ * Returns the folded sum
+ */
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto,
+ __wsum csum)
+{
+ __u64 s = csum;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ s += (__u32)saddr->s6_addr32[i];
+ for (i = 0; i < 4; i++)
+ s += (__u32)daddr->s6_addr32[i];
+ s += htons(proto + len);
+ s = (s & 0xffffffff) + (s >> 32);
+ s = (s & 0xffffffff) + (s >> 32);
+
+ return csum_fold((__u32)s);
+}
+
+/**
+ * build_udp_v4_csum - compute UDP checksum for UDP over IPv4
+ *
+ * Compute the checksum to embed in UDP header, composed of the sum of IP
+ * pseudo-header checksum, UDP header checksum and UDP data checksum
+ * @iph IP header
+ * @udph UDP header, which must be immediately followed by UDP data
+ *
+ * Returns the total checksum
+ */
+
+static inline __sum16 build_udp_v4_csum(const struct iphdr *iph,
+ const struct udphdr *udph)
+{
+ unsigned long sum;
+
+ sum = csum_partial(udph, ntohs(udph->len), 0);
+ return csum_tcpudp_magic(iph->saddr, iph->daddr, ntohs(udph->len),
+ IPPROTO_UDP, sum);
+}
+
+/**
+ * build_udp_v6_csum - compute UDP checksum for UDP over IPv6
+ *
+ * Compute the checksum to embed in UDP header, composed of the sum of IPv6
+ * pseudo-header checksum, UDP header checksum and UDP data checksum
+ * @ip6h IPv6 header
+ * @udph UDP header, which must be immediately followed by UDP data
+ *
+ * Returns the total checksum
+ */
+static inline __sum16 build_udp_v6_csum(const struct ipv6hdr *ip6h,
+ const struct udphdr *udph)
+{
+ unsigned long sum;
+
+ sum = csum_partial(udph, ntohs(udph->len), 0);
+ return csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ntohs(udph->len),
+ IPPROTO_UDP, sum);
+}
+
+struct tmonitor_ctx;
+
+typedef int (*tm_print_fn_t)(const char *format, va_list args);
+
+/**
+ * tc_prog_attach - attach BPF program(s) to an interface
+ *
+ * Takes file descriptors pointing to at least one, at most two BPF
+ * programs, and attach those programs to an interface ingress, egress or
+ * both.
+ *
+ * @dev: string containing the interface name
+ * @ingress_fd: file descriptor of the program to attach to interface ingress
+ * @egress_fd: file descriptor of the program to attach to interface egress
+ *
+ * Returns 0 on success, -1 if no valid file descriptor has been found, if
+ * the interface name is invalid or if an error ocurred during attach.
+ */
+int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd);
+
+#ifdef TRAFFIC_MONITOR
+struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name);
+void traffic_monitor_stop(struct tmonitor_ctx *ctx);
+tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn);
+#else
+static inline struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name)
+{
+ return NULL;
+}
+
+static inline void traffic_monitor_stop(struct tmonitor_ctx *ctx)
+{
+}
+
+static inline tm_print_fn_t traffic_monitor_set_print(tm_print_fn_t fn)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/access_variable_array.c b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c
new file mode 100644
index 000000000000..08131782437c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include <test_progs.h>
+#include "test_access_variable_array.skel.h"
+
+void test_access_variable_array(void)
+{
+ struct test_access_variable_array *skel;
+
+ skel = test_access_variable_array__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load"))
+ return;
+
+ test_access_variable_array__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 5861446d0777..24c509ce4e5b 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -2,10 +2,11 @@
#include <test_progs.h>
#define MAX_INSNS 512
-#define MAX_MATCHES 16
+#define MAX_MATCHES 24
struct bpf_reg_match {
unsigned int line;
+ const char *reg;
const char *match;
};
@@ -39,13 +40,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
- {1, "R3_w=inv2"},
- {2, "R3_w=inv4"},
- {3, "R3_w=inv8"},
- {4, "R3_w=inv16"},
- {5, "R3_w=inv32"},
+ {0, "R1", "ctx()"},
+ {0, "R10", "fp0"},
+ {0, "R3", "2"},
+ {1, "R3", "4"},
+ {2, "R3", "8"},
+ {3, "R3", "16"},
+ {4, "R3", "32"},
},
},
{
@@ -67,19 +68,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
- {1, "R3_w=inv1"},
- {2, "R3_w=inv2"},
- {3, "R3_w=inv4"},
- {4, "R3_w=inv8"},
- {5, "R3_w=inv16"},
- {6, "R3_w=inv1"},
- {7, "R4_w=inv32"},
- {8, "R4_w=inv16"},
- {9, "R4_w=inv8"},
- {10, "R4_w=inv4"},
- {11, "R4_w=inv2"},
+ {0, "R1", "ctx()"},
+ {0, "R10", "fp0"},
+ {0, "R3", "1"},
+ {1, "R3", "2"},
+ {2, "R3", "4"},
+ {3, "R3", "8"},
+ {4, "R3", "16"},
+ {5, "R3", "1"},
+ {6, "R4", "32"},
+ {7, "R4", "16"},
+ {8, "R4", "8"},
+ {9, "R4", "4"},
+ {10, "R4", "2"},
},
},
{
@@ -96,14 +97,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
- {1, "R3_w=inv4"},
- {2, "R3_w=inv8"},
- {3, "R3_w=inv10"},
- {4, "R4_w=inv8"},
- {5, "R4_w=inv12"},
- {6, "R4_w=inv14"},
+ {0, "R1", "ctx()"},
+ {0, "R10", "fp0"},
+ {0, "R3", "4"},
+ {1, "R3", "8"},
+ {2, "R3", "10"},
+ {3, "R4", "8"},
+ {4, "R4", "12"},
+ {5, "R4", "14"},
},
},
{
@@ -118,12 +119,12 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {1, "R1=ctx(id=0,off=0,imm=0)"},
- {1, "R10=fp0"},
- {1, "R3_w=inv7"},
- {2, "R3_w=inv7"},
- {3, "R3_w=inv14"},
- {4, "R3_w=inv56"},
+ {0, "R1", "ctx()"},
+ {0, "R10", "fp0"},
+ {0, "R3", "7"},
+ {1, "R3", "7"},
+ {2, "R3", "14"},
+ {3, "R3", "56"},
},
},
@@ -161,19 +162,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
- {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {18, "R3=pkt_end(id=0,off=0,imm=0)"},
- {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
- {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {6, "R0", "pkt(off=8,r=8)"},
+ {6, "R3", "var_off=(0x0; 0xff)"},
+ {7, "R3", "var_off=(0x0; 0x1fe)"},
+ {8, "R3", "var_off=(0x0; 0x3fc)"},
+ {9, "R3", "var_off=(0x0; 0x7f8)"},
+ {10, "R3", "var_off=(0x0; 0xff0)"},
+ {12, "R3", "pkt_end()"},
+ {17, "R4", "var_off=(0x0; 0xff)"},
+ {18, "R4", "var_off=(0x0; 0x1fe0)"},
+ {19, "R4", "var_off=(0x0; 0xff0)"},
+ {20, "R4", "var_off=(0x0; 0x7f8)"},
+ {21, "R4", "var_off=(0x0; 0x3fc)"},
+ {22, "R4", "var_off=(0x0; 0x1fe)"},
},
},
{
@@ -194,16 +195,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {10, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {12, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {14, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {6, "R3", "var_off=(0x0; 0xff)"},
+ {7, "R4", "var_off=(0x0; 0xff)"},
+ {8, "R4", "var_off=(0x0; 0xff)"},
+ {9, "R4", "var_off=(0x0; 0xff)"},
+ {10, "R4", "var_off=(0x0; 0x1fe)"},
+ {11, "R4", "var_off=(0x0; 0xff)"},
+ {12, "R4", "var_off=(0x0; 0x3fc)"},
+ {13, "R4", "var_off=(0x0; 0xff)"},
+ {14, "R4", "var_off=(0x0; 0x7f8)"},
+ {15, "R4", "var_off=(0x0; 0xff0)"},
},
},
{
@@ -234,14 +235,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
- {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
- {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
- {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
- {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
- {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
- {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {2, "R5", "pkt(r=0)"},
+ {4, "R5", "pkt(off=14,r=0)"},
+ {5, "R4", "pkt(off=14,r=0)"},
+ {9, "R2", "pkt(r=18)"},
+ {10, "R5", "pkt(off=14,r=18)"},
+ {10, "R4", "var_off=(0x0; 0xff)"},
+ {13, "R4", "var_off=(0x0; 0xffff)"},
+ {14, "R4", "var_off=(0x0; 0xffff)"},
},
},
{
@@ -267,6 +268,7 @@ static struct bpf_align_test tests[] = {
*/
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
@@ -280,6 +282,7 @@ static struct bpf_align_test tests[] = {
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
@@ -296,59 +299,67 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2", "pkt(r=8)"},
+ {7, "R6", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R5", "pkt(id=1,off=14,"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
- {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {15, "R4", "var_off=(0x0; 0x3fc)"},
+ {15, "R5", "var_off=(0x0; 0x3fc)"},
/* Variable offset is added to R5 packet pointer,
- * resulting in auxiliary alignment of 4.
+ * resulting in auxiliary alignment of 4. To avoid BPF
+ * verifier's precision backtracking logging
+ * interfering we also have a no-op R4 = R5
+ * instruction to validate R5 state. We also check
+ * that R4 is what it should be in such case.
*/
- {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R4", "var_off=(0x0; 0x3fc)"},
+ {18, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {19, "R5", "pkt(id=2,off=14,"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
- {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {24, "R4", "var_off=(0x0; 0x3fc)"},
+ {24, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5_w=pkt(id=0,off=14,r=8"},
+ {26, "R5", "pkt(off=14,r=8)"},
/* Variable offset is added to R5, resulting in a
- * variable offset of (4n).
+ * variable offset of (4n). See comment for insn #18
+ * for R4 = R5 trick.
*/
- {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {28, "R4", "var_off=(0x0; 0x3fc)"},
+ {28, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {29, "R5", "pkt(id=3,off=18,"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+ {31, "R4", "var_off=(0x0; 0x7fc)"},
+ {31, "R5", "var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+ {35, "R4", "var_off=(0x0; 0x7fc)"},
+ {35, "R5", "var_off=(0x0; 0x7fc)"},
},
},
{
@@ -386,36 +397,36 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2", "pkt(r=8)"},
+ {7, "R6", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {8, "R6", "var_off=(0x2; 0x7fc)"},
/* Packet pointer has (4n+2) offset */
- {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
- {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {11, "R5", "var_off=(0x2; 0x7fc)"},
+ {12, "R4", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {15, "R5", "var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {17, "R6", "var_off=(0x0; 0x3fc)"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
- {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+ {19, "R5", "var_off=(0x2; 0xffc)"},
+ {20, "R4", "var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+ {23, "R5", "var_off=(0x2; 0xffc)"},
},
},
{
@@ -448,18 +459,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+ {3, "R5", "pkt_end()"},
/* (ptr - ptr) << 2 == unknown, (4n) */
- {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
+ {5, "R5", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {6, "R5", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
- {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
- {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {11, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
+ {12, "R4", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -467,7 +478,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {15, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@@ -502,24 +513,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2", "pkt(r=8)"},
+ {8, "R6", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {9, "R6", "var_off=(0x2; 0x7fc)"},
/* New unknown value in R7 is (4n) */
- {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {10, "R7", "var_off=(0x0; 0x3fc)"},
/* Subtracting it from R6 blows our unsigned bounds */
- {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
- {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {14, "R6", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
-
+ {20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
{
@@ -556,23 +566,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+ {6, "R2", "pkt(r=8)"},
+ {9, "R6", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
- {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+ {10, "R6", "var_off=(0x2; 0x7c)"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
+ {13, "R5", "var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
- {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+ {14, "R7", "var_off=(0x0; 0x7fc)"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {16, "R5", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
};
@@ -594,16 +604,23 @@ static int do_test_single(struct bpf_align_test *test)
struct bpf_insn *prog = test->insns;
int prog_type = test->prog_type;
char bpf_vlog_copy[32768];
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .prog_flags = BPF_F_STRICT_ALIGNMENT,
+ .log_buf = bpf_vlog,
+ .log_size = sizeof(bpf_vlog),
+ .log_level = 2,
+ );
+ const char *main_pass_start = "0: R1=ctx() R10=fp0";
const char *line_ptr;
int cur_line = -1;
int prog_len, i;
+ char *start;
int fd_prog;
int ret;
prog_len = probe_filter_length(prog);
- fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
- prog, prog_len, BPF_F_STRICT_ALIGNMENT,
- "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
+ fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ prog, prog_len, &opts);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
@@ -617,34 +634,63 @@ static int do_test_single(struct bpf_align_test *test)
ret = 0;
/* We make a local copy so that we can strtok() it */
strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
- line_ptr = strtok(bpf_vlog_copy, "\n");
+ start = strstr(bpf_vlog_copy, main_pass_start);
+ if (!start) {
+ ret = 1;
+ printf("Can't find initial line '%s'\n", main_pass_start);
+ goto out;
+ }
+ line_ptr = strtok(start, "\n");
for (i = 0; i < MAX_MATCHES; i++) {
struct bpf_reg_match m = test->matches[i];
+ const char *p;
+ int tmp;
if (!m.match)
break;
while (line_ptr) {
cur_line = -1;
sscanf(line_ptr, "%u: ", &cur_line);
+ if (cur_line == -1)
+ sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
if (cur_line == m.line)
break;
line_ptr = strtok(NULL, "\n");
}
if (!line_ptr) {
- printf("Failed to find line %u for match: %s\n",
- m.line, m.match);
+ printf("Failed to find line %u for match: %s=%s\n",
+ m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
}
- if (!strstr(line_ptr, m.match)) {
- printf("Failed to find match %u: %s\n",
- m.line, m.match);
+ /* Check the next line as well in case the previous line
+ * did not have a corresponding bpf insn. Example:
+ * func#0 @0
+ * 0: R1=ctx() R10=fp0
+ * 0: (b7) r3 = 2 ; R3_w=2
+ *
+ * Sometimes it's actually two lines below, e.g. when
+ * searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
+ * from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
+ * 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
+ * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
+ */
+ while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
+ cur_line = -1;
+ line_ptr = strtok(NULL, "\n");
+ sscanf(line_ptr ?: "", "%u: ", &cur_line);
+ if (!line_ptr || cur_line != m.line)
+ break;
+ }
+ if (cur_line != m.line || !line_ptr || !(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
+ printf("Failed to find match %u: %s=%s\n", m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
}
}
+out:
if (fd_prog >= 0)
close(fd_prog);
}
@@ -661,6 +707,6 @@ void test_align(void)
if (!test__start_subtest(test->descr))
continue;
- CHECK_FAIL(do_test_single(test));
+ ASSERT_OK(do_test_single(test), test->descr);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
new file mode 100644
index 000000000000..d98577a6babc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "arena_atomics.skel.h"
+
+static void test_add(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.add);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->add64_value, 3, "add64_value");
+ ASSERT_EQ(skel->arena->add64_result, 1, "add64_result");
+
+ ASSERT_EQ(skel->arena->add32_value, 3, "add32_value");
+ ASSERT_EQ(skel->arena->add32_result, 1, "add32_result");
+
+ ASSERT_EQ(skel->arena->add_stack_value_copy, 3, "add_stack_value");
+ ASSERT_EQ(skel->arena->add_stack_result, 1, "add_stack_result");
+
+ ASSERT_EQ(skel->arena->add_noreturn_value, 3, "add_noreturn_value");
+}
+
+static void test_sub(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.sub);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->sub64_value, -1, "sub64_value");
+ ASSERT_EQ(skel->arena->sub64_result, 1, "sub64_result");
+
+ ASSERT_EQ(skel->arena->sub32_value, -1, "sub32_value");
+ ASSERT_EQ(skel->arena->sub32_result, 1, "sub32_result");
+
+ ASSERT_EQ(skel->arena->sub_stack_value_copy, -1, "sub_stack_value");
+ ASSERT_EQ(skel->arena->sub_stack_result, 1, "sub_stack_result");
+
+ ASSERT_EQ(skel->arena->sub_noreturn_value, -1, "sub_noreturn_value");
+}
+
+static void test_and(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.and);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->and64_value, 0x010ull << 32, "and64_value");
+ ASSERT_EQ(skel->arena->and32_value, 0x010, "and32_value");
+}
+
+static void test_or(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.or);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->or64_value, 0x111ull << 32, "or64_value");
+ ASSERT_EQ(skel->arena->or32_value, 0x111, "or32_value");
+}
+
+static void test_xor(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.xor);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->xor64_value, 0x101ull << 32, "xor64_value");
+ ASSERT_EQ(skel->arena->xor32_value, 0x101, "xor32_value");
+}
+
+static void test_cmpxchg(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.cmpxchg);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->cmpxchg64_value, 2, "cmpxchg64_value");
+ ASSERT_EQ(skel->arena->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->arena->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
+
+ ASSERT_EQ(skel->arena->cmpxchg32_value, 2, "lcmpxchg32_value");
+ ASSERT_EQ(skel->arena->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->arena->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
+}
+
+static void test_xchg(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.xchg);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->xchg64_value, 2, "xchg64_value");
+ ASSERT_EQ(skel->arena->xchg64_result, 1, "xchg64_result");
+
+ ASSERT_EQ(skel->arena->xchg32_value, 2, "xchg32_value");
+ ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
+}
+
+static void test_uaf(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.uaf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
+}
+
+static void test_load_acquire(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ if (skel->data->skip_lacq_srel_tests) {
+ printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support load-acquire\n",
+ __func__);
+ test__skip();
+ return;
+ }
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.load_acquire);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->load_acquire8_result, 0x12,
+ "load_acquire8_result");
+ ASSERT_EQ(skel->arena->load_acquire16_result, 0x1234,
+ "load_acquire16_result");
+ ASSERT_EQ(skel->arena->load_acquire32_result, 0x12345678,
+ "load_acquire32_result");
+ ASSERT_EQ(skel->arena->load_acquire64_result, 0x1234567890abcdef,
+ "load_acquire64_result");
+}
+
+static void test_store_release(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ if (skel->data->skip_lacq_srel_tests) {
+ printf("%s:SKIP: ENABLE_ATOMICS_TESTS not defined, Clang doesn't support addr_space_cast, and/or JIT doesn't support store-release\n",
+ __func__);
+ test__skip();
+ return;
+ }
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.store_release);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->store_release8_result, 0x12,
+ "store_release8_result");
+ ASSERT_EQ(skel->arena->store_release16_result, 0x1234,
+ "store_release16_result");
+ ASSERT_EQ(skel->arena->store_release32_result, 0x12345678,
+ "store_release32_result");
+ ASSERT_EQ(skel->arena->store_release64_result, 0x1234567890abcdef,
+ "store_release64_result");
+}
+
+void test_arena_atomics(void)
+{
+ struct arena_atomics *skel;
+ int err;
+
+ skel = arena_atomics__open();
+ if (!ASSERT_OK_PTR(skel, "arena atomics skeleton open"))
+ return;
+
+ if (skel->data->skip_all_tests) {
+ printf("%s:SKIP:no ENABLE_ATOMICS_TESTS or no addr_space_cast support in clang",
+ __func__);
+ test__skip();
+ goto cleanup;
+ }
+ err = arena_atomics__load(skel);
+ if (!ASSERT_OK(err, "arena atomics skeleton load"))
+ return;
+ skel->bss->pid = getpid();
+
+ if (test__start_subtest("add"))
+ test_add(skel);
+ if (test__start_subtest("sub"))
+ test_sub(skel);
+ if (test__start_subtest("and"))
+ test_and(skel);
+ if (test__start_subtest("or"))
+ test_or(skel);
+ if (test__start_subtest("xor"))
+ test_xor(skel);
+ if (test__start_subtest("cmpxchg"))
+ test_cmpxchg(skel);
+ if (test__start_subtest("xchg"))
+ test_xchg(skel);
+ if (test__start_subtest("uaf"))
+ test_uaf(skel);
+ if (test__start_subtest("load_acquire"))
+ test_load_acquire(skel);
+ if (test__start_subtest("store_release"))
+ test_store_release(skel);
+
+cleanup:
+ arena_atomics__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_htab.c b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
new file mode 100644
index 000000000000..d69fd2465f53
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <network_helpers.h>
+#include <sys/user.h>
+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
+#include <unistd.h>
+#define PAGE_SIZE getpagesize()
+#endif
+#include "arena_htab_asm.skel.h"
+#include "arena_htab.skel.h"
+
+#include "bpf_arena_htab.h"
+
+static void test_arena_htab_common(struct htab *htab)
+{
+ int i;
+
+ printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets);
+ ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL");
+ for (i = 0; htab->buckets && i < 16; i += 4) {
+ /*
+ * Walk htab buckets and link lists since all pointers are correct,
+ * though they were written by bpf program.
+ */
+ int val = htab_lookup_elem(htab, i);
+
+ ASSERT_EQ(i, val, "key == value");
+ }
+}
+
+static void test_arena_htab_llvm(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_htab *skel;
+ struct htab *htab;
+ size_t arena_sz;
+ void *area;
+ int ret;
+
+ skel = arena_htab__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_htab__open_and_load"))
+ return;
+
+ area = bpf_map__initial_value(skel->maps.arena, &arena_sz);
+ /* fault-in a page with pgoff == 0 as sanity check */
+ *(volatile int *)area = 0x55aa;
+
+ /* bpf prog will allocate more pages */
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_llvm), &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ goto out;
+ }
+ htab = skel->bss->htab_for_user;
+ test_arena_htab_common(htab);
+out:
+ arena_htab__destroy(skel);
+}
+
+static void test_arena_htab_asm(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_htab_asm *skel;
+ struct htab *htab;
+ int ret;
+
+ skel = arena_htab_asm__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_htab_asm__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_asm), &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+ htab = skel->bss->htab_for_user;
+ test_arena_htab_common(htab);
+ arena_htab_asm__destroy(skel);
+}
+
+void test_arena_htab(void)
+{
+ if (test__start_subtest("arena_htab_llvm"))
+ test_arena_htab_llvm();
+ if (test__start_subtest("arena_htab_asm"))
+ test_arena_htab_asm();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_list.c b/tools/testing/selftests/bpf/prog_tests/arena_list.c
new file mode 100644
index 000000000000..d15867cddde0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_list.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <network_helpers.h>
+#include <sys/user.h>
+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */
+#include <unistd.h>
+#define PAGE_SIZE getpagesize()
+#endif
+
+#include "bpf_arena_list.h"
+#include "arena_list.skel.h"
+
+struct elem {
+ struct arena_list_node node;
+ __u64 value;
+};
+
+static int list_sum(struct arena_list_head *head)
+{
+ struct elem __arena *n;
+ int sum = 0;
+
+ list_for_each_entry(n, head, node)
+ sum += n->value;
+ return sum;
+}
+
+static void test_arena_list_add_del(int cnt)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_list *skel;
+ int expected_sum = (u64)cnt * (cnt - 1) / 2;
+ int ret, sum;
+
+ skel = arena_list__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_list__open_and_load"))
+ return;
+
+ skel->bss->cnt = cnt;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_add), &opts);
+ ASSERT_OK(ret, "ret_add");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ goto out;
+ }
+ sum = list_sum(skel->bss->list_head);
+ ASSERT_EQ(sum, expected_sum, "sum of elems");
+ ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
+ ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_del), &opts);
+ ASSERT_OK(ret, "ret_del");
+ sum = list_sum(skel->bss->list_head);
+ ASSERT_EQ(sum, 0, "sum of list elems after del");
+ ASSERT_EQ(skel->bss->list_sum, expected_sum, "sum of list elems computed by prog");
+ ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
+out:
+ arena_list__destroy(skel);
+}
+
+void test_arena_list(void)
+{
+ if (test__start_subtest("arena_list_1"))
+ test_arena_list_add_del(1);
+ if (test__start_subtest("arena_list_1000"))
+ test_arena_list_add_del(1000);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
new file mode 100644
index 000000000000..693fd86fbde6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <sys/sysinfo.h>
+
+struct __qspinlock { int val; };
+typedef struct __qspinlock arena_spinlock_t;
+
+struct arena_qnode {
+ unsigned long next;
+ int count;
+ int locked;
+};
+
+#include "arena_spin_lock.skel.h"
+
+static long cpu;
+static int repeat;
+
+pthread_barrier_t barrier;
+
+static void *spin_lock_thread(void *arg)
+{
+ int err, prog_fd = *(u32 *)arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = repeat,
+ );
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
+ ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
+
+ err = pthread_barrier_wait(&barrier);
+ if (err != PTHREAD_BARRIER_SERIAL_THREAD && err != 0)
+ ASSERT_FALSE(true, "pthread_barrier");
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run err");
+
+ if (topts.retval == -EOPNOTSUPP)
+ goto end;
+
+ ASSERT_EQ((int)topts.retval, 0, "test_run retval");
+
+end:
+ pthread_exit(arg);
+}
+
+static void test_arena_spin_lock_size(int size)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct arena_spin_lock *skel;
+ pthread_t thread_id[16];
+ int prog_fd, i, err;
+ int nthreads;
+ void *ret;
+
+ nthreads = MIN(get_nprocs(), ARRAY_SIZE(thread_id));
+ if (nthreads < 2) {
+ test__skip();
+ return;
+ }
+
+ skel = arena_spin_lock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
+ return;
+
+ if (skel->data->test_skip == 2) {
+ test__skip();
+ goto end;
+ }
+ skel->bss->cs_count = size;
+ skel->bss->limit = repeat * nthreads;
+
+ ASSERT_OK(pthread_barrier_init(&barrier, NULL, nthreads), "barrier init");
+
+ prog_fd = bpf_program__fd(skel->progs.prog);
+ for (i = 0; i < nthreads; i++) {
+ err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto end_barrier;
+ }
+
+ for (i = 0; i < nthreads; i++) {
+ if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
+ goto end_barrier;
+ if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
+ goto end_barrier;
+ }
+
+ if (skel->data->test_skip == 3) {
+ printf("%s:SKIP: CONFIG_NR_CPUS exceed the maximum supported by arena spinlock\n",
+ __func__);
+ test__skip();
+ goto end_barrier;
+ }
+
+ ASSERT_EQ(skel->bss->counter, repeat * nthreads, "check counter value");
+
+end_barrier:
+ pthread_barrier_destroy(&barrier);
+end:
+ arena_spin_lock__destroy(skel);
+ return;
+}
+
+void test_arena_spin_lock(void)
+{
+ repeat = 1000;
+ if (test__start_subtest("arena_spin_lock_1"))
+ test_arena_spin_lock_size(1);
+ cpu = 0;
+ if (test__start_subtest("arena_spin_lock_1000"))
+ test_arena_spin_lock_size(1000);
+ cpu = 0;
+ repeat = 100;
+ if (test__start_subtest("arena_spin_lock_50000"))
+ test_arena_spin_lock_size(50000);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c b/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c
new file mode 100644
index 000000000000..f81a0c066505
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "arena_strsearch.skel.h"
+
+static void test_arena_str(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_strsearch *skel;
+ int ret;
+
+ skel = arena_strsearch__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_strsearch__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_strsearch), &opts);
+ ASSERT_OK(ret, "ret_add");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ }
+ arena_strsearch__destroy(skel);
+}
+
+void test_arena_strsearch(void)
+{
+ if (test__start_subtest("arena_strsearch"))
+ test_arena_str();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
new file mode 100644
index 000000000000..e27d66b75fb1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void init_test_filter_set(struct test_filter_set *set)
+{
+ set->cnt = 0;
+ set->tests = NULL;
+}
+
+static void free_test_filter_set(struct test_filter_set *set)
+{
+ int i, j;
+
+ for (i = 0; i < set->cnt; i++) {
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
+ free(set->tests[i].subtests);
+ free(set->tests[i].name);
+ }
+
+ free(set->tests);
+ init_test_filter_set(set);
+}
+
+static void test_parse_test_list(void)
+{
+ struct test_filter_set set;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "test filters count"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie",
+ &set,
+ true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true),
+ "parsing");
+ ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing");
+ ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 3, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("t", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name");
+ ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name");
+error:
+ free_test_filter_set(&set);
+}
+
+static void test_parse_test_list_file(void)
+{
+ struct test_filter_set set;
+ char tmpfile[80];
+ FILE *fp;
+ int fd;
+
+ snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX");
+ fd = mkstemp(tmpfile);
+ if (!ASSERT_GE(fd, 0, "create tmp"))
+ return;
+
+ fp = fdopen(fd, "w");
+ if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) {
+ close(fd);
+ goto out_remove;
+ }
+
+ fprintf(fp, "# comment\n");
+ fprintf(fp, " test_with_spaces \n");
+ fprintf(fp, "testA/subtest # comment\n");
+ fprintf(fp, "testB#comment with no space\n");
+ fprintf(fp, "testB # duplicate\n");
+ fprintf(fp, "testA/subtest # subtest duplicate\n");
+ fprintf(fp, "testA/subtest2\n");
+ fprintf(fp, "testC_no_eof_newline");
+ fflush(fp);
+
+ if (!ASSERT_OK(ferror(fp), "prepare tmp"))
+ goto out_fclose;
+
+ if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp"))
+ goto out_fclose;
+
+ init_test_filter_set(&set);
+
+ if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"))
+ goto out_fclose;
+
+ if (!ASSERT_EQ(set.cnt, 4, "test count"))
+ goto out_free_set;
+
+ ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
+ ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count");
+ ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0");
+ ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1");
+ ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
+ ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
+
+out_free_set:
+ free_test_filter_set(&set);
+out_fclose:
+ fclose(fp);
+out_remove:
+ remove(tmpfile);
+}
+
+void test_arg_parsing(void)
+{
+ if (test__start_subtest("test_parse_test_list"))
+ test_parse_test_list();
+ if (test__start_subtest("test_parse_test_list_file"))
+ test_parse_test_list_file();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/assign_reuse.c b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c
new file mode 100644
index 000000000000..989ee4d9785b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <uapi/linux/if_link.h>
+#include <test_progs.h>
+
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
+#include "network_helpers.h"
+#include "test_assign_reuse.skel.h"
+
+#define NS_TEST "assign_reuse"
+#define LOOPBACK 1
+#define PORT 4443
+
+static int attach_reuseport(int sock_fd, int prog_fd)
+{
+ return setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF,
+ &prog_fd, sizeof(prog_fd));
+}
+
+static __u64 cookie(int fd)
+{
+ __u64 cookie = 0;
+ socklen_t cookie_len = sizeof(cookie);
+ int ret;
+
+ ret = getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len);
+ ASSERT_OK(ret, "cookie");
+ ASSERT_GT(cookie, 0, "cookie_invalid");
+
+ return cookie;
+}
+
+static int echo_test_udp(int fd_sv)
+{
+ struct sockaddr_storage addr = {};
+ socklen_t len = sizeof(addr);
+ char buff[1] = {};
+ int fd_cl = -1, ret;
+
+ fd_cl = connect_to_fd(fd_sv, 100);
+ ASSERT_GT(fd_cl, 0, "create_client");
+ ASSERT_EQ(getsockname(fd_cl, (void *)&addr, &len), 0, "getsockname");
+
+ ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client");
+
+ ret = recv(fd_sv, buff, sizeof(buff), 0);
+ if (ret < 0) {
+ close(fd_cl);
+ return errno;
+ }
+
+ ASSERT_EQ(ret, 1, "recv_server");
+ ASSERT_EQ(sendto(fd_sv, buff, sizeof(buff), 0, (void *)&addr, len), 1, "send_server");
+ ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client");
+ close(fd_cl);
+ return 0;
+}
+
+static int echo_test_tcp(int fd_sv)
+{
+ char buff[1] = {};
+ int fd_cl = -1, fd_sv_cl = -1;
+
+ fd_cl = connect_to_fd(fd_sv, 100);
+ if (fd_cl < 0)
+ return errno;
+
+ fd_sv_cl = accept(fd_sv, NULL, NULL);
+ ASSERT_GE(fd_sv_cl, 0, "accept_fd");
+
+ ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client");
+ ASSERT_EQ(recv(fd_sv_cl, buff, sizeof(buff), 0), 1, "recv_server");
+ ASSERT_EQ(send(fd_sv_cl, buff, sizeof(buff), 0), 1, "send_server");
+ ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client");
+ close(fd_sv_cl);
+ close(fd_cl);
+ return 0;
+}
+
+void run_assign_reuse(int family, int sotype, const char *ip, __u16 port)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .ifindex = LOOPBACK,
+ .attach_point = BPF_TC_INGRESS,
+ );
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, tc_opts,
+ .handle = 1,
+ .priority = 1,
+ );
+ bool hook_created = false, tc_attached = false;
+ int ret, fd_tc, fd_accept, fd_drop, fd_map;
+ int *fd_sv = NULL;
+ __u64 fd_val;
+ struct test_assign_reuse *skel;
+ const int zero = 0;
+
+ skel = test_assign_reuse__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ skel->rodata->dest_port = port;
+
+ ret = test_assign_reuse__load(skel);
+ if (!ASSERT_OK(ret, "skel_load"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->sk_cookie_seen, 0, "cookie_init");
+
+ fd_tc = bpf_program__fd(skel->progs.tc_main);
+ fd_accept = bpf_program__fd(skel->progs.reuse_accept);
+ fd_drop = bpf_program__fd(skel->progs.reuse_drop);
+ fd_map = bpf_map__fd(skel->maps.sk_map);
+
+ fd_sv = start_reuseport_server(family, sotype, ip, port, 100, 1);
+ if (!ASSERT_NEQ(fd_sv, NULL, "start_reuseport_server"))
+ goto cleanup;
+
+ ret = attach_reuseport(*fd_sv, fd_drop);
+ if (!ASSERT_OK(ret, "attach_reuseport"))
+ goto cleanup;
+
+ fd_val = *fd_sv;
+ ret = bpf_map_update_elem(fd_map, &zero, &fd_val, BPF_NOEXIST);
+ if (!ASSERT_OK(ret, "bpf_sk_map"))
+ goto cleanup;
+
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (ret == 0)
+ hook_created = true;
+ ret = ret == -EEXIST ? 0 : ret;
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = fd_tc;
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto cleanup;
+ tc_attached = true;
+
+ if (sotype == SOCK_STREAM)
+ ASSERT_EQ(echo_test_tcp(*fd_sv), ECONNREFUSED, "drop_tcp");
+ else
+ ASSERT_EQ(echo_test_udp(*fd_sv), EAGAIN, "drop_udp");
+ ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once");
+
+ skel->bss->sk_cookie_seen = 0;
+ skel->bss->reuseport_executed = 0;
+ ASSERT_OK(attach_reuseport(*fd_sv, fd_accept), "attach_reuseport(accept)");
+
+ if (sotype == SOCK_STREAM)
+ ASSERT_EQ(echo_test_tcp(*fd_sv), 0, "echo_tcp");
+ else
+ ASSERT_EQ(echo_test_udp(*fd_sv), 0, "echo_udp");
+
+ ASSERT_EQ(skel->bss->sk_cookie_seen, cookie(*fd_sv),
+ "cookie_mismatch");
+ ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once");
+cleanup:
+ if (tc_attached) {
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ ret = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(ret, "bpf_tc_detach");
+ }
+ if (hook_created) {
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+ }
+ test_assign_reuse__destroy(skel);
+ free_fds(fd_sv, 1);
+}
+
+void test_assign_reuse(void)
+{
+ struct nstoken *tok = NULL;
+
+ SYS(out, "ip netns add %s", NS_TEST);
+ SYS(cleanup, "ip -net %s link set dev lo up", NS_TEST);
+
+ tok = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(tok, "netns token"))
+ return;
+
+ if (test__start_subtest("tcpv4"))
+ run_assign_reuse(AF_INET, SOCK_STREAM, "127.0.0.1", PORT);
+ if (test__start_subtest("tcpv6"))
+ run_assign_reuse(AF_INET6, SOCK_STREAM, "::1", PORT);
+ if (test__start_subtest("udpv4"))
+ run_assign_reuse(AF_INET, SOCK_DGRAM, "127.0.0.1", PORT);
+ if (test__start_subtest("udpv6"))
+ run_assign_reuse(AF_INET6, SOCK_DGRAM, "::1", PORT);
+
+cleanup:
+ close_netns(tok);
+ SYS_NOFAIL("ip netns delete %s", NS_TEST);
+out:
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c b/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c
new file mode 100644
index 000000000000..118abc29b236
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "async_stack_depth.skel.h"
+
+void test_async_stack_depth(void)
+{
+ RUN_TESTS(async_stack_depth);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c b/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c
new file mode 100644
index 000000000000..69bd7853e8f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "atomic_bounds.skel.h"
+
+void test_atomic_bounds(void)
+{
+ struct atomic_bounds *skel;
+ __u32 duration = 0;
+
+ skel = atomic_bounds__open_and_load();
+ if (CHECK(!skel, "skel_load", "couldn't load program\n"))
+ return;
+
+ atomic_bounds__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
new file mode 100644
index 000000000000..92b5f378bfb8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "atomics.lskel.h"
+
+static void test_add(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.add.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
+ ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
+
+ ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
+ ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
+
+ ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
+ ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
+
+ ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
+}
+
+static void test_sub(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.sub.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
+ ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
+
+ ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
+ ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
+
+ ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
+ ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
+
+ ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
+}
+
+static void test_and(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.and.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
+ ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
+
+ ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
+ ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
+
+ ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
+}
+
+static void test_or(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.or.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
+ ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
+
+ ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
+ ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
+
+ ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
+}
+
+static void test_xor(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.xor.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
+ ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
+
+ ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
+ ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
+
+ ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
+}
+
+static void test_cmpxchg(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.cmpxchg.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
+
+ ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
+}
+
+static void test_xchg(struct atomics_lskel *skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* No need to attach it, just run it directly */
+ prog_fd = skel->progs.xchg.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
+ ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
+
+ ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
+ ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
+}
+
+void test_atomics(void)
+{
+ struct atomics_lskel *skel;
+ int err;
+
+ skel = atomics_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "atomics skeleton open"))
+ return;
+
+ skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = atomics_lskel__load(skel);
+ if (!ASSERT_OK(err, "atomics skeleton load"))
+ goto cleanup;
+
+ if (skel->data->skip_tests) {
+ printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
+ __func__);
+ test__skip();
+ goto cleanup;
+ }
+ skel->bss->pid = getpid();
+
+ if (test__start_subtest("add"))
+ test_add(skel);
+ if (test__start_subtest("sub"))
+ test_sub(skel);
+ if (test__start_subtest("and"))
+ test_and(skel);
+ if (test__start_subtest("or"))
+ test_or(skel);
+ if (test__start_subtest("xor"))
+ test_xor(skel);
+ if (test__start_subtest("cmpxchg"))
+ test_cmpxchg(skel);
+ if (test__start_subtest("xchg"))
+ test_xchg(skel);
+
+cleanup:
+ atomics_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index a0ee87c8e1ea..9e77e5da7097 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -1,105 +1,441 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "test_attach_kprobe_sleepable.skel.h"
+#include "test_attach_probe_manual.skel.h"
#include "test_attach_probe.skel.h"
+#include "kprobe_write_ctx.skel.h"
-ssize_t get_base_addr() {
- size_t start, offset;
- char buf[256];
- FILE *f;
-
- f = fopen("/proc/self/maps", "r");
- if (!f)
- return -errno;
-
- while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n",
- &start, buf, &offset) == 3) {
- if (strcmp(buf, "r-xp") == 0) {
- fclose(f);
- return start - offset;
- }
- }
-
- fclose(f);
- return -EINVAL;
+/* this is how USDT semaphore is actually defined, except volatile modifier */
+volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
+
+/* uprobe attach point */
+static noinline void trigger_func(void)
+{
+ asm volatile ("");
}
-void test_attach_probe(void)
+/* attach point for byname uprobe */
+static noinline void trigger_func2(void)
+{
+ asm volatile ("");
+}
+
+/* attach point for byname sleepable uprobe */
+static noinline void trigger_func3(void)
+{
+ asm volatile ("");
+}
+
+/* attach point for ref_ctr */
+static noinline void trigger_func4(void)
{
- int duration = 0;
+ asm volatile ("");
+}
+
+static char test_data[] = "test_data";
+
+/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
+static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
- struct test_attach_probe* skel;
- size_t uprobe_offset;
- ssize_t base_addr;
+ struct test_attach_probe_manual *skel;
+ ssize_t uprobe_offset;
- base_addr = get_base_addr();
- if (CHECK(base_addr < 0, "get_base_addr",
- "failed to find base addr: %zd", base_addr))
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
return;
- uprobe_offset = (size_t)&get_base_addr - base_addr;
- skel = test_attach_probe__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
- return;
- if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
+ uprobe_offset = get_uprobe_offset(&trigger_func);
+ if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
goto cleanup;
- kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
- false /* retprobe */,
- SYS_NANOSLEEP_KPROBE_NAME);
- if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
- "err %ld\n", PTR_ERR(kprobe_link)))
+ /* manual-attach kprobe/kretprobe */
+ kprobe_opts.attach_mode = attach_mode;
+ kprobe_opts.retprobe = false;
+ kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ SYS_NANOSLEEP_KPROBE_NAME,
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
goto cleanup;
skel->links.handle_kprobe = kprobe_link;
- kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
- true /* retprobe */,
- SYS_NANOSLEEP_KPROBE_NAME);
- if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
- "err %ld\n", PTR_ERR(kretprobe_link)))
+ kprobe_opts.retprobe = true;
+ kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ SYS_NANOSLEEP_KPROBE_NAME,
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
- uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
- false /* retprobe */,
- 0 /* self pid */,
- "/proc/self/exe",
- uprobe_offset);
- if (CHECK(IS_ERR(uprobe_link), "attach_uprobe",
- "err %ld\n", PTR_ERR(uprobe_link)))
+ /* manual-attach uprobe/uretprobe */
+ uprobe_opts.attach_mode = attach_mode;
+ uprobe_opts.ref_ctr_offset = 0;
+ uprobe_opts.retprobe = false;
+ uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
+ 0 /* self pid */,
+ "/proc/self/exe",
+ uprobe_offset,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
goto cleanup;
skel->links.handle_uprobe = uprobe_link;
- uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe,
- true /* retprobe */,
- -1 /* any pid */,
- "/proc/self/exe",
- uprobe_offset);
- if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe",
- "err %ld\n", PTR_ERR(uretprobe_link)))
+ uprobe_opts.retprobe = true;
+ uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
+ -1 /* any pid */,
+ "/proc/self/exe",
+ uprobe_offset, &uprobe_opts);
+ if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
+ /* attach uprobe by function name manually */
+ uprobe_opts.func_name = "trigger_func2";
+ uprobe_opts.retprobe = false;
+ uprobe_opts.ref_ctr_offset = 0;
+ skel->links.handle_uprobe_byname =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
+ 0 /* this pid */,
+ "/proc/self/exe",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
+ goto cleanup;
+
/* trigger & validate kprobe && kretprobe */
usleep(1);
- if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
- "wrong kprobe res: %d\n", skel->bss->kprobe_res))
+ /* trigger & validate uprobe & uretprobe */
+ trigger_func();
+
+ /* trigger & validate uprobe attached by name */
+ trigger_func2();
+
+ ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+ ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
+ ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
+ ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
+
+/* attach uprobe/uretprobe long event name testings */
+static void test_attach_uprobe_long_event_name(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct bpf_link *uprobe_link, *uretprobe_link;
+ struct test_attach_probe_manual *skel;
+ ssize_t uprobe_offset;
+ char path[PATH_MAX] = {0};
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ uprobe_offset = get_uprobe_offset(&trigger_func);
+ if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
+ goto cleanup;
+
+ if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink"))
goto cleanup;
- if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
- "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
+
+ /* manual-attach uprobe/uretprobe */
+ uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
+ uprobe_opts.ref_ctr_offset = 0;
+ uprobe_opts.retprobe = false;
+ uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
+ 0 /* self pid */,
+ path,
+ uprobe_offset,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name"))
goto cleanup;
+ skel->links.handle_uprobe = uprobe_link;
- /* trigger & validate uprobe & uretprobe */
- get_base_addr();
+ uprobe_opts.retprobe = true;
+ uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
+ -1 /* any pid */,
+ path,
+ uprobe_offset, &uprobe_opts);
+ if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name"))
+ goto cleanup;
+ skel->links.handle_uretprobe = uretprobe_link;
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
- if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
- "wrong uprobe res: %d\n", skel->bss->uprobe_res))
+/* attach kprobe/kretprobe long event name testings */
+static void test_attach_kprobe_long_event_name(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ struct bpf_link *kprobe_link, *kretprobe_link;
+ struct test_attach_probe_manual *skel;
+
+ skel = test_attach_probe_manual__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
+ return;
+
+ /* manual-attach kprobe/kretprobe */
+ kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
+ kprobe_opts.retprobe = false;
+ kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ "bpf_testmod_looooooooooooooooooooooooooooooong_name",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name"))
goto cleanup;
- if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
- "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
+ skel->links.handle_kprobe = kprobe_link;
+
+ kprobe_opts.retprobe = true;
+ kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ "bpf_testmod_looooooooooooooooooooooooooooooong_name",
+ &kprobe_opts);
+ if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name"))
goto cleanup;
+ skel->links.handle_kretprobe = kretprobe_link;
+
+cleanup:
+ test_attach_probe_manual__destroy(skel);
+}
+
+#ifdef __x86_64__
+/* attach kprobe/kretprobe long event name testings */
+static void test_attach_kprobe_write_ctx(void)
+{
+ struct kprobe_write_ctx *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_write_ctx__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
+ return;
+
+ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_write_ctx,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_write_ctx__destroy(skel);
+}
+#else
+static void test_attach_kprobe_write_ctx(void)
+{
+ test__skip();
+}
+#endif
+
+static void test_attach_probe_auto(struct test_attach_probe *skel)
+{
+ struct bpf_link *uprobe_err_link;
+
+ /* auto-attachable kprobe and kretprobe */
+ skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
+ ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
+
+ skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
+ ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
+
+ /* verify auto-attach fails for old-style uprobe definition */
+ uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
+ if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
+ "auto-attach should fail for old-style name"))
+ return;
+
+ /* verify auto-attach works */
+ skel->links.handle_uretprobe_byname =
+ bpf_program__attach(skel->progs.handle_uretprobe_byname);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
+ return;
+
+ /* trigger & validate kprobe && kretprobe */
+ usleep(1);
+
+ /* trigger & validate uprobe attached by name */
+ trigger_func2();
+
+ ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
+ ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
+}
+
+static void test_uprobe_lib(struct test_attach_probe *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ FILE *devnull;
+
+ /* test attach by name for a library function, using the library
+ * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
+ */
+ uprobe_opts.func_name = "fopen";
+ uprobe_opts.retprobe = false;
+ skel->links.handle_uprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
+ 0 /* this pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
+ return;
+
+ uprobe_opts.func_name = "fclose";
+ uprobe_opts.retprobe = true;
+ skel->links.handle_uretprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
+ -1 /* any pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
+ return;
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ devnull = fopen("/dev/null", "r");
+ fclose(devnull);
+
+ ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
+}
+
+static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct bpf_link *uprobe_link, *uretprobe_link;
+ ssize_t uprobe_offset, ref_ctr_offset;
+
+ uprobe_offset = get_uprobe_offset(&trigger_func4);
+ if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
+ return;
+
+ ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
+ if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
+ return;
+
+ ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
+
+ uprobe_opts.retprobe = false;
+ uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+ uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
+ 0 /* self pid */,
+ "/proc/self/exe",
+ uprobe_offset,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
+ return;
+ skel->links.handle_uprobe_ref_ctr = uprobe_link;
+
+ ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
+
+ /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
+ uprobe_opts.retprobe = true;
+ uprobe_opts.ref_ctr_offset = ref_ctr_offset;
+ uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
+ -1 /* any pid */,
+ "/proc/self/exe",
+ uprobe_offset, &uprobe_opts);
+ if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
+ return;
+ skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
+}
+
+static void test_kprobe_sleepable(void)
+{
+ struct test_attach_kprobe_sleepable *skel;
+
+ skel = test_attach_kprobe_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
+ return;
+
+ /* sleepable kprobe test case needs flags set before loading */
+ if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
+ BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
+ goto cleanup;
+
+ if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
+ "skel_kprobe_sleepable_load"))
+ goto cleanup;
+
+ /* sleepable kprobes should not attach successfully */
+ skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
+ ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
+
+cleanup:
+ test_attach_kprobe_sleepable__destroy(skel);
+}
+
+static void test_uprobe_sleepable(struct test_attach_probe *skel)
+{
+ /* test sleepable uprobe and uretprobe variants */
+ skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
+ return;
+
+ skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
+ return;
+
+ skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
+ return;
+
+ skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
+ return;
+
+ skel->bss->user_ptr = test_data;
+
+ /* trigger & validate sleepable uprobe attached by name */
+ trigger_func3();
+
+ ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
+}
+
+void test_attach_probe(void)
+{
+ struct test_attach_probe *skel;
+
+ skel = test_attach_probe__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
+ goto cleanup;
+ if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
+ goto cleanup;
+
+ if (test__start_subtest("manual-default"))
+ test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
+ if (test__start_subtest("manual-legacy"))
+ test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
+ if (test__start_subtest("manual-perf"))
+ test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
+ if (test__start_subtest("manual-link"))
+ test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
+
+ if (test__start_subtest("auto"))
+ test_attach_probe_auto(skel);
+ if (test__start_subtest("kprobe-sleepable"))
+ test_kprobe_sleepable();
+ if (test__start_subtest("uprobe-lib"))
+ test_uprobe_lib(skel);
+ if (test__start_subtest("uprobe-sleepable"))
+ test_uprobe_sleepable(skel);
+ if (test__start_subtest("uprobe-ref_ctr"))
+ test_uprobe_ref_ctr(skel);
+
+ if (test__start_subtest("uprobe-long_name"))
+ test_attach_uprobe_long_event_name();
+ if (test__start_subtest("kprobe-long_name"))
+ test_attach_kprobe_long_event_name();
+ if (test__start_subtest("kprobe-write-ctx"))
+ test_attach_kprobe_write_ctx();
cleanup:
test_attach_probe__destroy(skel);
+ ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/autoattach.c b/tools/testing/selftests/bpf/prog_tests/autoattach.c
new file mode 100644
index 000000000000..dc5e01d279bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/autoattach.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include <test_progs.h>
+#include "test_autoattach.skel.h"
+
+void test_autoattach(void)
+{
+ struct test_autoattach *skel;
+
+ skel = test_autoattach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto cleanup;
+
+ /* disable auto-attach for prog2 */
+ bpf_program__set_autoattach(skel->progs.prog2, false);
+ ASSERT_TRUE(bpf_program__autoattach(skel->progs.prog1), "autoattach_prog1");
+ ASSERT_FALSE(bpf_program__autoattach(skel->progs.prog2), "autoattach_prog2");
+ if (!ASSERT_OK(test_autoattach__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_TRUE(skel->bss->prog1_called, "attached_prog1");
+ ASSERT_FALSE(skel->bss->prog2_called, "attached_prog2");
+
+cleanup:
+ test_autoattach__destroy(skel);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
new file mode 100644
index 000000000000..6a707213e46b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "bad_struct_ops.skel.h"
+#include "bad_struct_ops2.skel.h"
+
+static void invalid_prog_reuse(void)
+{
+ struct bad_struct_ops *skel;
+ char *log = NULL;
+ int err;
+
+ skel = bad_struct_ops__open();
+ if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open"))
+ return;
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+
+ err = bad_struct_ops__load(skel);
+ log = stop_libbpf_log_capture();
+ ASSERT_ERR(err, "bad_struct_ops__load should fail");
+ ASSERT_HAS_SUBSTR(log,
+ "struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1",
+ "expected init_kern message");
+
+cleanup:
+ free(log);
+ bad_struct_ops__destroy(skel);
+}
+
+static void unused_program(void)
+{
+ struct bad_struct_ops2 *skel;
+ char *log = NULL;
+ int err;
+
+ skel = bad_struct_ops2__open();
+ if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open"))
+ return;
+
+ /* struct_ops programs not referenced from any maps are open
+ * with autoload set to true.
+ */
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true");
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+
+ err = bad_struct_ops2__load(skel);
+ ASSERT_ERR(err, "bad_struct_ops2__load should fail");
+ log = stop_libbpf_log_capture();
+ ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load",
+ "message about 'foo' failing to load");
+
+cleanup:
+ free(log);
+ bad_struct_ops2__destroy(skel);
+}
+
+void test_bad_struct_ops(void)
+{
+ if (test__start_subtest("invalid_prog_reuse"))
+ invalid_prog_reuse();
+ if (test__start_subtest("unused_program"))
+ unused_program();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
new file mode 100644
index 000000000000..f7cd129cb82b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include "test_progs.h"
+#include "cap_helpers.h"
+#include "bind_perm.skel.h"
+
+static int create_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ return 0;
+}
+
+void try_bind(int family, int port, int expected_errno)
+{
+ struct sockaddr_storage addr = {};
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ int fd = -1;
+
+ fd = socket(family, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ goto close_socket;
+
+ if (family == AF_INET) {
+ sin = (struct sockaddr_in *)&addr;
+ sin->sin_family = family;
+ sin->sin_port = htons(port);
+ } else {
+ sin6 = (struct sockaddr_in6 *)&addr;
+ sin6->sin6_family = family;
+ sin6->sin6_port = htons(port);
+ }
+
+ errno = 0;
+ bind(fd, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(errno, expected_errno, "bind");
+
+close_socket:
+ if (fd >= 0)
+ close(fd);
+}
+
+void test_bind_perm(void)
+{
+ const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE;
+ struct bind_perm *skel;
+ __u64 old_caps = 0;
+ int cgroup_fd;
+
+ if (create_netns())
+ return;
+
+ cgroup_fd = test__join_cgroup("/bind_perm");
+ if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
+ return;
+
+ skel = bind_perm__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto close_cgroup_fd;
+
+ skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel, "bind_v4_prog"))
+ goto close_skeleton;
+
+ skel->links.bind_v6_prog = bpf_program__attach_cgroup(skel->progs.bind_v6_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel, "bind_v6_prog"))
+ goto close_skeleton;
+
+ ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps),
+ "cap_disable_effective");
+
+ try_bind(AF_INET, 110, EACCES);
+ try_bind(AF_INET6, 110, EACCES);
+
+ try_bind(AF_INET, 111, 0);
+ try_bind(AF_INET6, 111, 0);
+
+ if (old_caps & net_bind_svc_cap)
+ ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL),
+ "cap_enable_effective");
+
+close_skeleton:
+ bind_perm__destroy(skel);
+close_cgroup_fd:
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
new file mode 100644
index 000000000000..42b49870e520
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <sys/syscall.h>
+#include <limits.h>
+#include <test_progs.h>
+#include "bloom_filter_map.skel.h"
+
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE (-1)
+#endif
+
+static void test_fail_cases(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ __u32 value = 0;
+ int fd, err;
+
+ /* Invalid key size */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 4, sizeof(value), 100, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid key size"))
+ close(fd);
+
+ /* Invalid value size */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, 0, 100, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0"))
+ close(fd);
+
+ /* Invalid value size: too big */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, INT32_MAX, 100, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value too large"))
+ close(fd);
+
+ /* Invalid max entries size */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size"))
+ close(fd);
+
+ /* Bloom filter maps do not support BPF_F_NO_PREALLOC */
+ opts.map_flags = BPF_F_NO_PREALLOC;
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid flags"))
+ close(fd);
+
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, NULL);
+ if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter"))
+ return;
+
+ /* Test invalid flags */
+ err = bpf_map_update_elem(fd, NULL, &value, -1);
+ ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
+
+ err = bpf_map_update_elem(fd, NULL, &value, BPF_EXIST);
+ ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
+
+ err = bpf_map_update_elem(fd, NULL, &value, BPF_F_LOCK);
+ ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
+
+ err = bpf_map_update_elem(fd, NULL, &value, BPF_NOEXIST);
+ ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
+
+ err = bpf_map_update_elem(fd, NULL, &value, 10000);
+ ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags");
+
+ close(fd);
+}
+
+static void test_success_cases(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ char value[11];
+ int fd, err;
+
+ /* Create a map */
+ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE;
+ opts.numa_node = NUMA_NO_NODE;
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts);
+ if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case"))
+ return;
+
+ /* Add a value to the bloom filter */
+ err = bpf_map_update_elem(fd, NULL, &value, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem bloom filter success case"))
+ goto done;
+
+ /* Lookup a value in the bloom filter */
+ err = bpf_map_lookup_elem(fd, NULL, &value);
+ ASSERT_OK(err, "bpf_map_update_elem bloom filter success case");
+
+done:
+ close(fd);
+}
+
+static void check_bloom(struct bloom_filter_map *skel)
+{
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.check_bloom);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->error, 0, "error");
+
+ bpf_link__destroy(link);
+}
+
+static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals,
+ __u32 nr_rand_vals)
+{
+ int outer_map_fd, inner_map_fd, err, i, key = 0;
+ struct bpf_link *link;
+
+ /* Create a bloom filter map that will be used as the inner map */
+ inner_map_fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(*rand_vals),
+ nr_rand_vals, NULL);
+ if (!ASSERT_GE(inner_map_fd, 0, "bpf_map_create bloom filter inner map"))
+ return;
+
+ for (i = 0; i < nr_rand_vals; i++) {
+ err = bpf_map_update_elem(inner_map_fd, NULL, rand_vals + i, BPF_ANY);
+ if (!ASSERT_OK(err, "Add random value to inner_map_fd"))
+ goto done;
+ }
+
+ /* Add the bloom filter map to the outer map */
+ outer_map_fd = bpf_map__fd(skel->maps.outer_map);
+ err = bpf_map_update_elem(outer_map_fd, &key, &inner_map_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "Add bloom filter map to outer map"))
+ goto done;
+
+ /* Attach the bloom_filter_inner_map prog */
+ link = bpf_program__attach(skel->progs.inner_map);
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto delete_inner_map;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->error, 0, "error");
+
+ bpf_link__destroy(link);
+
+delete_inner_map:
+ /* Ensure the inner bloom filter map can be deleted */
+ err = bpf_map_delete_elem(outer_map_fd, &key);
+ ASSERT_OK(err, "Delete inner bloom filter map");
+
+done:
+ close(inner_map_fd);
+}
+
+static int setup_progs(struct bloom_filter_map **out_skel, __u32 **out_rand_vals,
+ __u32 *out_nr_rand_vals)
+{
+ struct bloom_filter_map *skel;
+ int random_data_fd, bloom_fd;
+ __u32 *rand_vals = NULL;
+ __u32 map_size, val;
+ int err, i;
+
+ /* Set up a bloom filter map skeleton */
+ skel = bloom_filter_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bloom_filter_map__open_and_load"))
+ return -EINVAL;
+
+ /* Set up rand_vals */
+ map_size = bpf_map__max_entries(skel->maps.map_random_data);
+ rand_vals = malloc(sizeof(*rand_vals) * map_size);
+ if (!rand_vals) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* Generate random values and populate both skeletons */
+ random_data_fd = bpf_map__fd(skel->maps.map_random_data);
+ bloom_fd = bpf_map__fd(skel->maps.map_bloom);
+ for (i = 0; i < map_size; i++) {
+ val = rand();
+
+ err = bpf_map_update_elem(random_data_fd, &i, &val, BPF_ANY);
+ if (!ASSERT_OK(err, "Add random value to map_random_data"))
+ goto error;
+
+ err = bpf_map_update_elem(bloom_fd, NULL, &val, BPF_ANY);
+ if (!ASSERT_OK(err, "Add random value to map_bloom"))
+ goto error;
+
+ rand_vals[i] = val;
+ }
+
+ *out_skel = skel;
+ *out_rand_vals = rand_vals;
+ *out_nr_rand_vals = map_size;
+
+ return 0;
+
+error:
+ bloom_filter_map__destroy(skel);
+ if (rand_vals)
+ free(rand_vals);
+ return err;
+}
+
+void test_bloom_filter_map(void)
+{
+ __u32 *rand_vals = NULL, nr_rand_vals = 0;
+ struct bloom_filter_map *skel = NULL;
+ int err;
+
+ test_fail_cases();
+ test_success_cases();
+
+ err = setup_progs(&skel, &rand_vals, &nr_rand_vals);
+ if (err)
+ return;
+
+ test_inner_map(skel, rand_vals, nr_rand_vals);
+ free(rand_vals);
+
+ check_bloom(skel);
+
+ bloom_filter_map__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
new file mode 100644
index 000000000000..75f4dff7d042
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sched.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <bpf/btf.h>
+#include "test_bpf_cookie.skel.h"
+#include "kprobe_multi.skel.h"
+#include "uprobe_multi.skel.h"
+
+/* uprobe attach point */
+static noinline void trigger_func(void)
+{
+ asm volatile ("");
+}
+
+static void kprobe_subtest(struct test_bpf_cookie *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
+
+ /* attach two kprobes */
+ opts.bpf_cookie = 0x1;
+ opts.retprobe = false;
+ link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ SYS_NANOSLEEP_KPROBE_NAME, &opts);
+ if (!ASSERT_OK_PTR(link1, "link1"))
+ goto cleanup;
+
+ opts.bpf_cookie = 0x2;
+ opts.retprobe = false;
+ link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
+ SYS_NANOSLEEP_KPROBE_NAME, &opts);
+ if (!ASSERT_OK_PTR(link2, "link2"))
+ goto cleanup;
+
+ /* attach two kretprobes */
+ opts.bpf_cookie = 0x10;
+ opts.retprobe = true;
+ retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ SYS_NANOSLEEP_KPROBE_NAME, &opts);
+ if (!ASSERT_OK_PTR(retlink1, "retlink1"))
+ goto cleanup;
+
+ opts.bpf_cookie = 0x20;
+ opts.retprobe = true;
+ retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
+ SYS_NANOSLEEP_KPROBE_NAME, &opts);
+ if (!ASSERT_OK_PTR(retlink2, "retlink2"))
+ goto cleanup;
+
+ /* trigger kprobe && kretprobe */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
+
+cleanup:
+ bpf_link__destroy(link1);
+ bpf_link__destroy(link2);
+ bpf_link__destroy(retlink1);
+ bpf_link__destroy(retlink2);
+}
+
+static void kprobe_multi_test_run(struct kprobe_multi *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
+ ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
+ ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
+ ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
+ ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
+ ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
+
+ ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
+ ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
+ ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
+ ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
+ ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
+ ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
+}
+
+static void kprobe_multi_link_api_subtest(void)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1;
+ struct kprobe_multi *skel = NULL;
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ unsigned long long addrs[8];
+ __u64 cookies[8];
+
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ goto cleanup;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+#define GET_ADDR(__sym, __addr) ({ \
+ __addr = ksym_get_addr(__sym); \
+ if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \
+ goto cleanup; \
+})
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test3", addrs[1]);
+ GET_ADDR("bpf_fentry_test4", addrs[2]);
+ GET_ADDR("bpf_fentry_test5", addrs[3]);
+ GET_ADDR("bpf_fentry_test6", addrs[4]);
+ GET_ADDR("bpf_fentry_test7", addrs[5]);
+ GET_ADDR("bpf_fentry_test2", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+#undef GET_ADDR
+
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
+
+ opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
+ opts.kprobe_multi.cookies = (const __u64 *) &cookies;
+ prog_fd = bpf_program__fd(skel->progs.test_kprobe);
+
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
+ goto cleanup;
+
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
+
+ opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
+
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel);
+
+cleanup:
+ close(link1_fd);
+ close(link2_fd);
+ kprobe_multi__destroy(skel);
+}
+
+static void kprobe_multi_attach_api_subtest(void)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct kprobe_multi *skel = NULL;
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test2",
+ "bpf_fentry_test8",
+ };
+ __u64 cookies[8];
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = cookies;
+
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
+
+ opts.retprobe = true;
+
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
+ NULL, &opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ kprobe_multi__destroy(skel);
+}
+
+/* defined in prog_tests/uprobe_multi_test.c */
+void uprobe_multi_func_1(void);
+void uprobe_multi_func_2(void);
+void uprobe_multi_func_3(void);
+
+static void uprobe_multi_test_run(struct uprobe_multi *skel)
+{
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result");
+}
+
+static void uprobe_multi_attach_api_subtest(void)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct uprobe_multi *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+ __u64 cookies[3];
+
+ cookies[0] = 3; /* uprobe_multi_func_1 */
+ cookies[1] = 1; /* uprobe_multi_func_2 */
+ cookies[2] = 2; /* uprobe_multi_func_3 */
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = &cookies[0];
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi"))
+ goto cleanup;
+
+ link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ cookies[0] = 2; /* uprobe_multi_func_1 */
+ cookies[1] = 3; /* uprobe_multi_func_2 */
+ cookies[2] = 1; /* uprobe_multi_func_3 */
+
+ opts.retprobe = true;
+ link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ uprobe_multi__destroy(skel);
+}
+
+static void uprobe_subtest(struct test_bpf_cookie *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
+ ssize_t uprobe_offset;
+
+ uprobe_offset = get_uprobe_offset(&trigger_func);
+ if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
+ goto cleanup;
+
+ /* attach two uprobes */
+ opts.bpf_cookie = 0x100;
+ opts.retprobe = false;
+ link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
+ "/proc/self/exe", uprobe_offset, &opts);
+ if (!ASSERT_OK_PTR(link1, "link1"))
+ goto cleanup;
+
+ opts.bpf_cookie = 0x200;
+ opts.retprobe = false;
+ link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
+ "/proc/self/exe", uprobe_offset, &opts);
+ if (!ASSERT_OK_PTR(link2, "link2"))
+ goto cleanup;
+
+ /* attach two uretprobes */
+ opts.bpf_cookie = 0x1000;
+ opts.retprobe = true;
+ retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
+ "/proc/self/exe", uprobe_offset, &opts);
+ if (!ASSERT_OK_PTR(retlink1, "retlink1"))
+ goto cleanup;
+
+ opts.bpf_cookie = 0x2000;
+ opts.retprobe = true;
+ retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
+ "/proc/self/exe", uprobe_offset, &opts);
+ if (!ASSERT_OK_PTR(retlink2, "retlink2"))
+ goto cleanup;
+
+ /* trigger uprobe && uretprobe */
+ trigger_func();
+
+ ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
+ ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
+
+cleanup:
+ bpf_link__destroy(link1);
+ bpf_link__destroy(link2);
+ bpf_link__destroy(retlink1);
+ bpf_link__destroy(retlink2);
+}
+
+static void tp_subtest(struct test_bpf_cookie *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
+ struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
+
+ /* attach first tp prog */
+ opts.bpf_cookie = 0x10000;
+ link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
+ "syscalls", "sys_enter_nanosleep", &opts);
+ if (!ASSERT_OK_PTR(link1, "link1"))
+ goto cleanup;
+
+ /* attach second tp prog */
+ opts.bpf_cookie = 0x20000;
+ link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
+ "syscalls", "sys_enter_nanosleep", &opts);
+ if (!ASSERT_OK_PTR(link2, "link2"))
+ goto cleanup;
+
+ /* trigger tracepoints */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
+
+ /* now we detach first prog and will attach third one, which causes
+ * two internal calls to bpf_prog_array_copy(), shuffling
+ * bpf_prog_array_items around. We test here that we don't lose track
+ * of associated bpf_cookies.
+ */
+ bpf_link__destroy(link1);
+ link1 = NULL;
+ kern_sync_rcu();
+ skel->bss->tp_res = 0;
+
+ /* attach third tp prog */
+ opts.bpf_cookie = 0x40000;
+ link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
+ "syscalls", "sys_enter_nanosleep", &opts);
+ if (!ASSERT_OK_PTR(link3, "link3"))
+ goto cleanup;
+
+ /* trigger tracepoints */
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
+
+cleanup:
+ bpf_link__destroy(link1);
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link3);
+}
+
+static void burn_cpu(void)
+{
+ volatile int j = 0;
+ cpu_set_t cpu_set;
+ int i, err;
+
+ /* generate some branches on cpu 0 */
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+ ASSERT_OK(err, "set_thread_affinity");
+
+ /* spin the loop for a while (random high number) */
+ for (i = 0; i < 1000000; ++i)
+ ++j;
+}
+
+static void pe_subtest(struct test_bpf_cookie *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
+ struct bpf_link *link = NULL;
+ struct perf_event_attr attr;
+ int pfd = -1;
+
+ /* create perf event */
+ memset(&attr, 0, sizeof(attr));
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.sample_period = 100000;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (!ASSERT_GE(pfd, 0, "perf_fd"))
+ goto cleanup;
+
+ opts.bpf_cookie = 0x100000;
+ link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
+ if (!ASSERT_OK_PTR(link, "link1"))
+ goto cleanup;
+
+ burn_cpu(); /* trigger BPF prog */
+
+ ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
+
+ /* prevent bpf_link__destroy() closing pfd itself */
+ bpf_link__disconnect(link);
+ /* close BPF link's FD explicitly */
+ close(bpf_link__fd(link));
+ /* free up memory used by struct bpf_link */
+ bpf_link__destroy(link);
+ link = NULL;
+ kern_sync_rcu();
+ skel->bss->pe_res = 0;
+
+ opts.bpf_cookie = 0x200000;
+ link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
+ if (!ASSERT_OK_PTR(link, "link2"))
+ goto cleanup;
+
+ burn_cpu(); /* trigger BPF prog */
+
+ ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
+
+cleanup:
+ close(pfd);
+ bpf_link__destroy(link);
+}
+
+static int verify_tracing_link_info(int fd, u64 cookie)
+{
+ struct bpf_link_info info;
+ int err;
+ u32 len = sizeof(info);
+
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type"))
+ return -1;
+
+ ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie");
+
+ return 0;
+}
+
+static void tracing_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd, err;
+ int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+
+ skel->bss->fentry_res = 0;
+ skel->bss->fexit_res = 0;
+
+ cookie = 0x10000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fentry_test1);
+ link_opts.tracing.cookie = cookie;
+ fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts);
+ if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
+ goto cleanup;
+
+ err = verify_tracing_link_info(fentry_fd, cookie);
+ if (!ASSERT_OK(err, "verify_tracing_link_info"))
+ goto cleanup;
+
+ cookie = 0x20000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fexit_test1);
+ link_opts.tracing.cookie = cookie;
+ fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts);
+ if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create"))
+ goto cleanup;
+
+ cookie = 0x30000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ link_opts.tracing.cookie = cookie;
+ fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts);
+ if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.fentry_test1);
+ bpf_prog_test_run_opts(prog_fd, &opts);
+
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ bpf_prog_test_run_opts(prog_fd, &opts);
+
+ ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res");
+ ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res");
+ ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res");
+
+cleanup:
+ if (fentry_fd >= 0)
+ close(fentry_fd);
+ if (fexit_fd >= 0)
+ close(fexit_fd);
+ if (fmod_ret_fd >= 0)
+ close(fmod_ret_fd);
+}
+
+int stack_mprotect(void);
+
+static void lsm_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd;
+ int lsm_fd = -1;
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ int err;
+
+ skel->bss->lsm_res = 0;
+
+ cookie = 0x90000000000090L;
+ prog_fd = bpf_program__fd(skel->progs.test_int_hook);
+ link_opts.tracing.cookie = cookie;
+ lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts);
+ if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
+ goto cleanup;
+
+ err = stack_mprotect();
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res");
+
+cleanup:
+ if (lsm_fd >= 0)
+ close(lsm_fd);
+}
+
+static void tp_btf_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd, link_fd = -1;
+ struct bpf_link *link = NULL;
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
+ LIBBPF_OPTS(bpf_trace_opts, trace_opts);
+
+ /* There are three different ways to attach tp_btf (BTF-aware raw
+ * tracepoint) programs. Let's test all of them.
+ */
+ prog_fd = bpf_program__fd(skel->progs.handle_tp_btf);
+
+ /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
+ skel->bss->tp_btf_res = 0;
+
+ raw_tp_opts.cookie = cookie = 0x11000000000000L;
+ link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "raw_tp_open_res");
+
+ /* low-level generic bpf_link_create() API */
+ skel->bss->tp_btf_res = 0;
+
+ link_opts.tracing.cookie = cookie = 0x22000000000000L;
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_RAW_TP, &link_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_link_create"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "link_create_res");
+
+ /* high-level bpf_link-based bpf_program__attach_trace_opts() API */
+ skel->bss->tp_btf_res = 0;
+
+ trace_opts.cookie = cookie = 0x33000000000000L;
+ link = bpf_program__attach_trace_opts(skel->progs.handle_tp_btf, &trace_opts);
+ if (!ASSERT_OK_PTR(link, "attach_trace_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ bpf_link__destroy(link); /* detach */
+ link = NULL;
+
+ ASSERT_EQ(skel->bss->tp_btf_res, cookie, "attach_trace_opts_res");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ bpf_link__destroy(link);
+}
+
+static int verify_raw_tp_link_info(int fd, u64 cookie)
+{
+ struct bpf_link_info info;
+ int err;
+ u32 len = sizeof(info);
+
+ memset(&info, 0, sizeof(info));
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type"))
+ return -1;
+
+ ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie");
+
+ return 0;
+}
+
+static void raw_tp_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int err, prog_fd, link_fd = -1;
+ struct bpf_link *link = NULL;
+ LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
+ LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts);
+
+ /* There are two different ways to attach raw_tp programs */
+ prog_fd = bpf_program__fd(skel->progs.handle_raw_tp);
+
+ /* low-level BPF_RAW_TRACEPOINT_OPEN command wrapper */
+ skel->bss->raw_tp_res = 0;
+
+ raw_tp_opts.tp_name = "sys_enter";
+ raw_tp_opts.cookie = cookie = 0x55000000000000L;
+ link_fd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_tp_opts);
+ if (!ASSERT_GE(link_fd, 0, "bpf_raw_tracepoint_open_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+
+ err = verify_raw_tp_link_info(link_fd, cookie);
+ if (!ASSERT_OK(err, "verify_raw_tp_link_info"))
+ goto cleanup;
+
+ close(link_fd); /* detach */
+ link_fd = -1;
+
+ ASSERT_EQ(skel->bss->raw_tp_res, cookie, "raw_tp_open_res");
+
+ /* high-level bpf_link-based bpf_program__attach_raw_tracepoint_opts() API */
+ skel->bss->raw_tp_res = 0;
+
+ opts.cookie = cookie = 0x66000000000000L;
+ link = bpf_program__attach_raw_tracepoint_opts(skel->progs.handle_raw_tp,
+ "sys_enter", &opts);
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp_opts"))
+ goto cleanup;
+
+ usleep(1); /* trigger */
+ bpf_link__destroy(link); /* detach */
+ link = NULL;
+
+ ASSERT_EQ(skel->bss->raw_tp_res, cookie, "attach_raw_tp_opts_res");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ bpf_link__destroy(link);
+}
+
+void test_bpf_cookie(void)
+{
+ struct test_bpf_cookie *skel;
+
+ skel = test_bpf_cookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->my_tid = sys_gettid();
+
+ if (test__start_subtest("kprobe"))
+ kprobe_subtest(skel);
+ if (test__start_subtest("multi_kprobe_link_api"))
+ kprobe_multi_link_api_subtest();
+ if (test__start_subtest("multi_kprobe_attach_api"))
+ kprobe_multi_attach_api_subtest();
+ if (test__start_subtest("uprobe"))
+ uprobe_subtest(skel);
+ if (test__start_subtest("multi_uprobe_attach_api"))
+ uprobe_multi_attach_api_subtest();
+ if (test__start_subtest("tracepoint"))
+ tp_subtest(skel);
+ if (test__start_subtest("perf_event"))
+ pe_subtest(skel);
+ if (test__start_subtest("trampoline"))
+ tracing_subtest(skel);
+ if (test__start_subtest("lsm"))
+ lsm_subtest(skel);
+ if (test__start_subtest("tp_btf"))
+ tp_btf_subtest(skel);
+ if (test__start_subtest("raw_tp"))
+ raw_tp_subtest(skel);
+ test_bpf_cookie__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
new file mode 100644
index 000000000000..d138cc7b1bda
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+
+#include <sys/syscall.h>
+#include <bpf/bpf.h>
+
+#include "bpf_gotox.skel.h"
+
+static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .ctx_in = ctx_in,
+ .ctx_size_in = ctx_size_in,
+ );
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run_opts err");
+}
+
+static void __subtest(struct bpf_gotox *skel, void (*check)(struct bpf_gotox *))
+{
+ if (skel->data->skip)
+ test__skip();
+ else
+ check(skel);
+}
+
+static void check_simple(struct bpf_gotox *skel,
+ struct bpf_program *prog,
+ __u64 ctx_in,
+ __u64 expected)
+{
+ skel->bss->ret_user = 0;
+
+ __test_run(prog, &ctx_in, sizeof(ctx_in));
+
+ if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
+ return;
+}
+
+static void check_simple_fentry(struct bpf_gotox *skel,
+ struct bpf_program *prog,
+ __u64 ctx_in,
+ __u64 expected)
+{
+ skel->bss->in_user = ctx_in;
+ skel->bss->ret_user = 0;
+
+ /* trigger */
+ usleep(1);
+
+ if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
+ return;
+}
+
+/* validate that for two loads of the same jump table libbpf generates only one map */
+static void check_one_map_two_jumps(struct bpf_gotox *skel)
+{
+ struct bpf_prog_info prog_info;
+ struct bpf_map_info map_info;
+ __u32 len;
+ __u32 map_ids[16];
+ int prog_fd, map_fd;
+ int ret;
+ int i;
+ bool seen = false;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.map_ids = (long)map_ids;
+ prog_info.nr_map_ids = ARRAY_SIZE(map_ids);
+ prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps);
+ if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
+ return;
+
+ len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)"))
+ return;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
+ return;
+
+ len = sizeof(map_info);
+ memset(&map_info, 0, len);
+ ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) {
+ close(map_fd);
+ return;
+ }
+
+ if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) {
+ if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) {
+ close(map_fd);
+ return;
+ }
+ seen = true;
+ }
+ close(map_fd);
+ }
+
+ ASSERT_EQ(seen, true, "no INSN_ARRAY map");
+}
+
+static void check_one_switch(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_switch, in[i], out[i]);
+}
+
+static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]);
+}
+
+static void check_two_switches(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {103, 104, 107, 205, 115, 1019, 1019};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.two_switches, in[i], out[i]);
+}
+
+static void check_big_jump_table(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 11, 27, 31, 22, 45, 99};
+ __u64 out[] = {2, 3, 4, 5, 19, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.big_jump_table, in[i], out[i]);
+}
+
+static void check_one_jump_two_maps(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {12, 15, 7 , 15, 12, 15, 15};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]);
+}
+
+static void check_static_global(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_static_global1, in[i], out[i]);
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_static_global2, in[i], out[i]);
+}
+
+static void check_nonstatic_global(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]);
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]);
+}
+
+static void check_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.simple_test_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+static void check_static_global_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.use_static_global_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+static void check_nonstatic_global_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.use_nonstatic_global_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+void test_bpf_gotox(void)
+{
+ struct bpf_gotox *skel;
+ int ret;
+
+ skel = bpf_gotox__open();
+ if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open"))
+ return;
+
+ ret = bpf_gotox__load(skel);
+ if (!ASSERT_OK(ret, "bpf_gotox__load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ if (test__start_subtest("one-switch"))
+ __subtest(skel, check_one_switch);
+
+ if (test__start_subtest("one-switch-non-zero-sec-offset"))
+ __subtest(skel, check_one_switch_non_zero_sec_off);
+
+ if (test__start_subtest("two-switches"))
+ __subtest(skel, check_two_switches);
+
+ if (test__start_subtest("big-jump-table"))
+ __subtest(skel, check_big_jump_table);
+
+ if (test__start_subtest("static-global"))
+ __subtest(skel, check_static_global);
+
+ if (test__start_subtest("nonstatic-global"))
+ __subtest(skel, check_nonstatic_global);
+
+ if (test__start_subtest("other-sec"))
+ __subtest(skel, check_other_sec);
+
+ if (test__start_subtest("static-global-other-sec"))
+ __subtest(skel, check_static_global_other_sec);
+
+ if (test__start_subtest("nonstatic-global-other-sec"))
+ __subtest(skel, check_nonstatic_global_other_sec);
+
+ if (test__start_subtest("one-jump-two-maps"))
+ __subtest(skel, check_one_jump_two_maps);
+
+ if (test__start_subtest("one-map-two-jumps"))
+ __subtest(skel, check_one_map_two_jumps);
+
+ bpf_gotox__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
new file mode 100644
index 000000000000..269870bec941
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <bpf/bpf.h>
+#include <test_progs.h>
+
+#ifdef __x86_64__
+static int map_create(__u32 map_type, __u32 max_entries)
+{
+ const char *map_name = "insn_array";
+ __u32 key_size = 4;
+ __u32 value_size = sizeof(struct bpf_insn_array_value);
+
+ return bpf_map_create(map_type, map_name, key_size, value_size, max_entries, NULL);
+}
+
+static int prog_load(struct bpf_insn *insns, __u32 insn_cnt, int *fd_array, __u32 fd_array_cnt)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.fd_array = fd_array;
+ opts.fd_array_cnt = fd_array_cnt;
+
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, &opts);
+}
+
+static void __check_success(struct bpf_insn *insns, __u32 insn_cnt, __u32 *map_in, __u32 *map_out)
+{
+ struct bpf_insn_array_value val = {};
+ int prog_fd = -1, map_fd, i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, insn_cnt);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < insn_cnt; i++) {
+ val.orig_off = map_in[i];
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, insn_cnt, &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < insn_cnt; i++) {
+ char buf[64];
+
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ snprintf(buf, sizeof(buf), "val.xlated_off should be equal map_out[%d]", i);
+ ASSERT_EQ(val.xlated_off, map_out[i], buf);
+ }
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+/*
+ * Load a program, which will not be anyhow mangled by the verifier. Add an
+ * insn_array map pointing to every instruction. Check that it hasn't changed
+ * after the program load.
+ */
+static void check_one_to_one_mapping(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, 1, 2, 3, 4, 5};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Load a program with two patches (get jiffies, for simplicity). Add an
+ * insn_array map pointing to every instruction. Check how it was changed
+ * after the program load.
+ */
+static void check_simple(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, 1, 4, 5, 8, 9};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Verifier can delete code in two cases: nops & dead code. From insn
+ * array's point of view, the two cases are the same, so test using
+ * the simplest method: by loading some nops
+ */
+static void check_deletions(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, -1, 1, -1, 2, 3};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Same test as check_deletions, but also add code which adds instructions
+ */
+static void check_deletions_with_functions(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = { 0, 1, 2, 3, 4, 5, /* func */ 6, 7, 8, 9, 10};
+ __u32 map_out[] = {-1, 0, -1, 3, 4, 5, /* func */ -1, 6, -1, 9, 10};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Try to load a program with a map which points to outside of the program
+ */
+static void check_out_of_bounds_index(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, map_fd;
+ struct bpf_insn_array_value val = {};
+ int key;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ key = 0;
+ val.orig_off = ARRAY_SIZE(insns); /* too big */
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
+ close(prog_fd);
+ goto cleanup;
+ }
+
+cleanup:
+ close(map_fd);
+}
+
+/*
+ * Try to load a program with a map which points to the middle of 16-bit insn
+ */
+static void check_mid_insn_index(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_LD_IMM64(BPF_REG_0, 0), /* 2 x 8 */
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, map_fd;
+ struct bpf_insn_array_value val = {};
+ int key;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ key = 0;
+ val.orig_off = 1; /* middle of 16-byte instruction */
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
+ close(prog_fd);
+ goto cleanup;
+ }
+
+cleanup:
+ close(map_fd);
+}
+
+static void check_incorrect_index(void)
+{
+ check_out_of_bounds_index();
+ check_mid_insn_index();
+}
+
+static int set_bpf_jit_harden(char *level)
+{
+ char old_level;
+ int err = -1;
+ int fd = -1;
+
+ fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ ASSERT_FAIL("open .../bpf_jit_harden returned %d (errno=%d)", fd, errno);
+ return -1;
+ }
+
+ err = read(fd, &old_level, 1);
+ if (err != 1) {
+ ASSERT_FAIL("read from .../bpf_jit_harden returned %d (errno=%d)", err, errno);
+ err = -1;
+ goto end;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+
+ err = write(fd, level, 1);
+ if (err != 1) {
+ ASSERT_FAIL("write to .../bpf_jit_harden returned %d (errno=%d)", err, errno);
+ err = -1;
+ goto end;
+ }
+
+ err = 0;
+ *level = old_level;
+end:
+ if (fd >= 0)
+ close(fd);
+ return err;
+}
+
+static void check_blindness(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+ struct bpf_insn_array_value val = {};
+ char bpf_jit_harden = '@'; /* non-exizsting value */
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ bpf_jit_harden = '2';
+ if (set_bpf_jit_harden(&bpf_jit_harden)) {
+ bpf_jit_harden = '@'; /* open, read or write failed => no write was done */
+ goto cleanup;
+ }
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ char fmt[32];
+
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ snprintf(fmt, sizeof(fmt), "val should be equal 3*%d", i);
+ ASSERT_EQ(val.xlated_off, i * 3, fmt);
+ }
+
+cleanup:
+ /* restore the old one */
+ if (bpf_jit_harden != '@')
+ set_bpf_jit_harden(&bpf_jit_harden);
+
+ close(prog_fd);
+ close(map_fd);
+}
+
+/* Once map was initialized, it should be frozen */
+static void check_load_unfrozen_map(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+ struct bpf_insn_array_value val = {};
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
+ goto cleanup;
+
+ /* correctness: now freeze the map, the program should load fine */
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ ASSERT_EQ(val.xlated_off, i, "val should be equal i");
+ }
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+/* Map can be used only by one BPF program */
+static void check_no_map_reuse(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd, extra_fd = -1;
+ struct bpf_insn_array_value val = {};
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ ASSERT_EQ(val.xlated_off, i, "val should be equal i");
+ }
+
+ extra_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(extra_fd, -EBUSY, "program should have been rejected (extra_fd != -EBUSY)"))
+ goto cleanup;
+
+ /* correctness: check that prog is still loadable without fd_array */
+ extra_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_GE(extra_fd, 0, "bpf(BPF_PROG_LOAD): expected no error"))
+ goto cleanup;
+
+cleanup:
+ close(extra_fd);
+ close(prog_fd);
+ close(map_fd);
+}
+
+static void check_bpf_no_lookup(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ insns[0].imm = map_fd;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
+ goto cleanup;
+
+ /* correctness: check that prog is still loadable with normal map */
+ close(map_fd);
+ map_fd = map_create(BPF_MAP_TYPE_ARRAY, 1);
+ insns[0].imm = map_fd;
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+static void check_bpf_side(void)
+{
+ check_bpf_no_lookup();
+}
+
+static void __test_bpf_insn_array(void)
+{
+ /* Test if offsets are adjusted properly */
+
+ if (test__start_subtest("one2one"))
+ check_one_to_one_mapping();
+
+ if (test__start_subtest("simple"))
+ check_simple();
+
+ if (test__start_subtest("deletions"))
+ check_deletions();
+
+ if (test__start_subtest("deletions-with-functions"))
+ check_deletions_with_functions();
+
+ if (test__start_subtest("blindness"))
+ check_blindness();
+
+ /* Check all kinds of operations and related restrictions */
+
+ if (test__start_subtest("incorrect-index"))
+ check_incorrect_index();
+
+ if (test__start_subtest("load-unfrozen-map"))
+ check_load_unfrozen_map();
+
+ if (test__start_subtest("no-map-reuse"))
+ check_no_map_reuse();
+
+ if (test__start_subtest("bpf-side-ops"))
+ check_bpf_side();
+}
+#else
+static void __test_bpf_insn_array(void)
+{
+ test__skip();
+}
+#endif
+
+void test_bpf_insn_array(void)
+{
+ __test_bpf_insn_array();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 0e586368948d..5225d69bf79b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -1,17 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <task_local_storage_helpers.h>
#include "bpf_iter_ipv6_route.skel.h"
#include "bpf_iter_netlink.skel.h"
#include "bpf_iter_bpf_map.skel.h"
-#include "bpf_iter_task.skel.h"
+#include "bpf_iter_tasks.skel.h"
#include "bpf_iter_task_stack.skel.h"
#include "bpf_iter_task_file.skel.h"
+#include "bpf_iter_task_vmas.skel.h"
#include "bpf_iter_task_btf.skel.h"
#include "bpf_iter_tcp4.skel.h"
#include "bpf_iter_tcp6.skel.h"
#include "bpf_iter_udp4.skel.h"
#include "bpf_iter_udp6.skel.h"
+#include "bpf_iter_unix.skel.h"
+#include "bpf_iter_vma_offset.skel.h"
#include "bpf_iter_test_kern1.skel.h"
#include "bpf_iter_test_kern2.skel.h"
#include "bpf_iter_test_kern3.skel.h"
@@ -24,39 +30,39 @@
#include "bpf_iter_bpf_sk_storage_map.skel.h"
#include "bpf_iter_test_kern5.skel.h"
#include "bpf_iter_test_kern6.skel.h"
-
-static int duration;
+#include "bpf_iter_bpf_link.skel.h"
+#include "bpf_iter_ksym.skel.h"
+#include "bpf_iter_sockmap.skel.h"
static void test_btf_id_or_null(void)
{
struct bpf_iter_test_kern3 *skel;
skel = bpf_iter_test_kern3__open_and_load();
- if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
- "skeleton open_and_load unexpectedly succeeded\n")) {
+ if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
bpf_iter_test_kern3__destroy(skel);
return;
}
}
-static void do_dummy_read(struct bpf_program *prog)
+static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
{
struct bpf_link *link;
char buf[16] = {};
int iter_fd, len;
- link = bpf_program__attach_iter(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ link = bpf_program__attach_iter(prog, opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* not check contents, but ensure read() ends without error */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
+ ASSERT_GE(len, 0, "read");
close(iter_fd);
@@ -64,13 +70,77 @@ free_link:
bpf_link__destroy(link);
}
+static void do_dummy_read(struct bpf_program *prog)
+{
+ do_dummy_read_opts(prog, NULL);
+}
+
+static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
+ struct bpf_map *map)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+ char buf[16] = {};
+ int iter_fd, len;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = bpf_map__fd(map);
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_map_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ /* Close link and map fd prematurely */
+ bpf_link__destroy(link);
+ bpf_object__destroy_skeleton(*skel);
+ *skel = NULL;
+
+ /* Try to let map free work to run first if map is freed */
+ usleep(100);
+ /* Memory used by both sock map and sock local storage map are
+ * freed after two synchronize_rcu() calls, so wait for it
+ */
+ kern_sync_rcu();
+ kern_sync_rcu();
+
+ /* Read after both map fd and link fd are closed */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ ASSERT_GE(len, 0, "read_iterator");
+
+ close(iter_fd);
+}
+
+static int read_fd_into_buffer(int fd, char *buf, int size)
+{
+ int bufleft = size;
+ int len;
+
+ do {
+ len = read(fd, buf, bufleft);
+ if (len > 0) {
+ buf += len;
+ bufleft -= len;
+ }
+ } while (len > 0);
+
+ return len < 0 ? len : size - bufleft;
+}
+
static void test_ipv6_route(void)
{
struct bpf_iter_ipv6_route *skel;
skel = bpf_iter_ipv6_route__open_and_load();
- if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
return;
do_dummy_read(skel->progs.dump_ipv6_route);
@@ -83,8 +153,7 @@ static void test_netlink(void)
struct bpf_iter_netlink *skel;
skel = bpf_iter_netlink__open_and_load();
- if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
return;
do_dummy_read(skel->progs.dump_netlink);
@@ -97,8 +166,7 @@ static void test_bpf_map(void)
struct bpf_iter_bpf_map *skel;
skel = bpf_iter_bpf_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
return;
do_dummy_read(skel->progs.dump_bpf_map);
@@ -106,18 +174,236 @@ static void test_bpf_map(void)
bpf_iter_bpf_map__destroy(skel);
}
-static void test_task(void)
+static void check_bpf_link_info(const struct bpf_program *prog)
{
- struct bpf_iter_task *skel;
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link_info info = {};
+ struct bpf_link *link;
+ __u32 info_len;
+ int err;
- skel = bpf_iter_task__open_and_load();
- if (CHECK(!skel, "bpf_iter_task__open_and_load",
- "skeleton open_and_load failed\n"))
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.tid = getpid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
return;
- do_dummy_read(skel->progs.dump_task);
+ info_len = sizeof(info);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
+ ASSERT_OK(err, "bpf_link_get_info_by_fd");
+ ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
+
+ bpf_link__destroy(link);
+}
+
+static pthread_mutex_t do_nothing_mutex;
- bpf_iter_task__destroy(skel);
+static void *do_nothing_wait(void *arg)
+{
+ pthread_mutex_lock(&do_nothing_mutex);
+ pthread_mutex_unlock(&do_nothing_mutex);
+
+ pthread_exit(arg);
+}
+
+static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
+ int *num_unknown, int *num_known)
+{
+ struct bpf_iter_tasks *skel;
+ pthread_t thread_id;
+ void *ret;
+
+ skel = bpf_iter_tasks__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
+ return;
+
+ ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
+
+ ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
+ "pthread_create");
+
+ skel->bss->tid = sys_gettid();
+
+ do_dummy_read_opts(skel->progs.dump_task, opts);
+
+ *num_unknown = skel->bss->num_unknown_tid;
+ *num_known = skel->bss->num_known_tid;
+
+ ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
+ ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
+ "pthread_join");
+
+ bpf_iter_tasks__destroy(skel);
+}
+
+static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
+{
+ int num_unknown_tid, num_known_tid;
+
+ test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
+ ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
+ ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
+}
+
+static void *run_test_task_tid(void *arg)
+{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ int num_unknown_tid, num_known_tid;
+
+ ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id");
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.tid = sys_gettid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ test_task_common(&opts, 0, 1);
+
+ linfo.task.tid = 0;
+ linfo.task.pid = getpid();
+ /* This includes the parent thread, this thread, watchdog timer thread
+ * and the do_nothing_wait thread
+ */
+ test_task_common(&opts, 3, 1);
+
+ test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
+ ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid");
+ ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
+
+ return NULL;
+}
+
+static void test_task_tid(void)
+{
+ pthread_t thread_id;
+
+ /* Create a new thread so pid and tid aren't the same */
+ ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL),
+ "pthread_create");
+ ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
+}
+
+static void test_task_pid(void)
+{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.pid = getpid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ test_task_common(&opts, 2, 1);
+}
+
+static void test_task_pidfd(void)
+{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ int pidfd;
+
+ pidfd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
+ return;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.pid_fd = pidfd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ test_task_common(&opts, 2, 1);
+
+ close(pidfd);
+}
+
+static void test_task_sleepable(void)
+{
+ struct bpf_iter_tasks *skel;
+ int pid, status, err, data_pipe[2], finish_pipe[2], c = 0;
+ char *test_data = NULL;
+ char *test_data_long = NULL;
+ char *data[2];
+
+ if (!ASSERT_OK(pipe(data_pipe), "data_pipe") ||
+ !ASSERT_OK(pipe(finish_pipe), "finish_pipe"))
+ return;
+
+ skel = bpf_iter_tasks__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
+ return;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork"))
+ return;
+
+ if (pid == 0) {
+ /* child */
+ close(data_pipe[0]);
+ close(finish_pipe[1]);
+
+ test_data = malloc(sizeof(char) * 10);
+ strncpy(test_data, "test_data", 10);
+ test_data[9] = '\0';
+
+ test_data_long = malloc(sizeof(char) * 5000);
+ for (int i = 0; i < 5000; ++i) {
+ if (i % 2 == 0)
+ test_data_long[i] = 'b';
+ else
+ test_data_long[i] = 'a';
+ }
+ test_data_long[4999] = '\0';
+
+ data[0] = test_data;
+ data[1] = test_data_long;
+
+ write(data_pipe[1], &data, sizeof(data));
+
+ /* keep child alive until after the test */
+ err = read(finish_pipe[0], &c, 1);
+ if (err != 1)
+ exit(-1);
+
+ close(data_pipe[1]);
+ close(finish_pipe[0]);
+ _exit(0);
+ }
+
+ /* parent */
+ close(data_pipe[1]);
+ close(finish_pipe[0]);
+
+ err = read(data_pipe[0], &data, sizeof(data));
+ ASSERT_EQ(err, sizeof(data), "read_check");
+
+ skel->bss->user_ptr = data[0];
+ skel->bss->user_ptr_long = data[1];
+ skel->bss->pid = pid;
+
+ do_dummy_read(skel->progs.dump_task_sleepable);
+
+ ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
+ "num_expected_failure_copy_from_user_task");
+ ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
+ "num_success_copy_from_user_task");
+ ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task_str, 0,
+ "num_expected_failure_copy_from_user_task_str");
+ ASSERT_GT(skel->bss->num_success_copy_from_user_task_str, 0,
+ "num_success_copy_from_user_task_str");
+
+ bpf_iter_tasks__destroy(skel);
+
+ write(finish_pipe[1], &c, 1);
+ err = waitpid(pid, &status, 0);
+ ASSERT_EQ(err, pid, "waitpid");
+ ASSERT_EQ(status, 0, "zero_child_exit");
+
+ close(data_pipe[0]);
+ close(finish_pipe[1]);
}
static void test_task_stack(void)
@@ -125,47 +411,61 @@ static void test_task_stack(void)
struct bpf_iter_task_stack *skel;
skel = bpf_iter_task_stack__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task_stack);
+ do_dummy_read(skel->progs.get_task_user_stacks);
- bpf_iter_task_stack__destroy(skel);
-}
+ ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
-static void *do_nothing(void *arg)
-{
- pthread_exit(arg);
+ bpf_iter_task_stack__destroy(skel);
}
static void test_task_file(void)
{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_task_file *skel;
+ union bpf_iter_link_info linfo;
pthread_t thread_id;
void *ret;
skel = bpf_iter_task_file__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
return;
skel->bss->tgid = getpid();
- if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
- "pthread_create", "pthread_create failed\n"))
- goto done;
+ ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
+
+ ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
+ "pthread_create");
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.tid = getpid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ do_dummy_read_opts(skel->progs.dump_task_file, &opts);
+
+ ASSERT_EQ(skel->bss->count, 0, "check_count");
+ ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+ skel->bss->last_tgid = 0;
+ skel->bss->count = 0;
+ skel->bss->unique_tgid_count = 0;
do_dummy_read(skel->progs.dump_task_file);
- if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
- "pthread_join", "pthread_join failed\n"))
- goto done;
+ ASSERT_EQ(skel->bss->count, 0, "check_count");
+ ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
+
+ check_bpf_link_info(skel->progs.dump_task_file);
- CHECK(skel->bss->count != 0, "check_count",
- "invalid non pthread file visit count %d\n", skel->bss->count);
+ ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
+ ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
+ ASSERT_NULL(ret, "pthread_join");
-done:
bpf_iter_task_file__destroy(skel);
}
@@ -177,27 +477,20 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
{
struct bpf_program *prog = skel->progs.dump_task_struct;
struct bpf_iter_task_btf__bss *bss = skel->bss;
- int iter_fd = -1, len = 0, bufleft = TASKBUFSZ;
+ int iter_fd = -1, err;
struct bpf_link *link;
char *buf = taskbuf;
int ret = 0;
link = bpf_program__attach_iter(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
return ret;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
- do {
- len = read(iter_fd, buf, bufleft);
- if (len > 0) {
- buf += len;
- bufleft -= len;
- }
- } while (len > 0);
-
+ err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
if (bss->skip) {
printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
ret = 1;
@@ -205,12 +498,11 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
goto free_link;
}
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(err, 0, "read"))
goto free_link;
- CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
- "check for btf representation of task_struct in iter data",
- "struct task_struct not found");
+ ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
+ "check for btf representation of task_struct in iter data");
free_link:
if (iter_fd > 0)
close(iter_fd);
@@ -225,8 +517,7 @@ static void test_task_btf(void)
int ret;
skel = bpf_iter_task_btf__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
return;
bss = skel->bss;
@@ -235,12 +526,10 @@ static void test_task_btf(void)
if (ret)
goto cleanup;
- if (CHECK(bss->tasks == 0, "check if iterated over tasks",
- "no task iteration, did BPF program run?\n"))
+ if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
goto cleanup;
- CHECK(bss->seq_err != 0, "check for unexpected err",
- "bpf_seq_printf_btf returned %ld", bss->seq_err);
+ ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
cleanup:
bpf_iter_task_btf__destroy(skel);
@@ -251,8 +540,7 @@ static void test_tcp4(void)
struct bpf_iter_tcp4 *skel;
skel = bpf_iter_tcp4__open_and_load();
- if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp4);
@@ -265,8 +553,7 @@ static void test_tcp6(void)
struct bpf_iter_tcp6 *skel;
skel = bpf_iter_tcp6__open_and_load();
- if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp6);
@@ -279,8 +566,7 @@ static void test_udp4(void)
struct bpf_iter_udp4 *skel;
skel = bpf_iter_udp4__open_and_load();
- if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp4);
@@ -293,8 +579,7 @@ static void test_udp6(void)
struct bpf_iter_udp6 *skel;
skel = bpf_iter_udp6__open_and_load();
- if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp6);
@@ -302,27 +587,38 @@ static void test_udp6(void)
bpf_iter_udp6__destroy(skel);
}
+static void test_unix(void)
+{
+ struct bpf_iter_unix *skel;
+
+ skel = bpf_iter_unix__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_unix);
+
+ bpf_iter_unix__destroy(skel);
+}
+
/* The expected string is less than 16 bytes */
static int do_read_with_fd(int iter_fd, const char *expected,
bool read_one_char)
{
- int err = -1, len, read_buf_len, start;
+ int len, read_buf_len, start;
char buf[16] = {};
read_buf_len = read_one_char ? 1 : 16;
start = 0;
while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
start += len;
- if (CHECK(start >= 16, "read", "read len %d\n", len))
+ if (!ASSERT_LT(start, 16, "read"))
return -1;
read_buf_len = read_one_char ? 1 : 16 - start;
}
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
return -1;
- err = strcmp(buf, expected);
- if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
- buf, expected))
+ if (!ASSERT_STREQ(buf, expected, "read"))
return -1;
return 0;
@@ -335,19 +631,17 @@ static void test_anon_iter(bool read_one_char)
int iter_fd, err;
skel = bpf_iter_test_kern1__open_and_load();
- if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
return;
err = bpf_iter_test_kern1__attach(skel);
- if (CHECK(err, "bpf_iter_test_kern1__attach",
- "skeleton attach failed\n")) {
+ if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
goto out;
}
link = skel->links.dump_task;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
do_read_with_fd(iter_fd, "abcd", read_one_char);
@@ -362,8 +656,7 @@ static int do_read(const char *path, const char *expected)
int err, iter_fd;
iter_fd = open(path, O_RDONLY);
- if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
- path, strerror(errno)))
+ if (!ASSERT_GE(iter_fd, 0, "open"))
return -1;
err = do_read_with_fd(iter_fd, expected, false);
@@ -380,19 +673,18 @@ static void test_file_iter(void)
int err;
skel1 = bpf_iter_test_kern1__open_and_load();
- if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
return;
link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
/* unlink this path if it exists. */
unlink(path);
err = bpf_link__pin(link, path);
- if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
+ if (!ASSERT_OK(err, "pin_iter"))
goto free_link;
err = do_read(path, "abcd");
@@ -404,12 +696,11 @@ static void test_file_iter(void)
* should change.
*/
skel2 = bpf_iter_test_kern2__open_and_load();
- if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
goto unlink_path;
err = bpf_link__update_program(link, skel2->progs.dump_task);
- if (CHECK(err, "update_prog", "update_prog failed\n"))
+ if (!ASSERT_OK(err, "update_prog"))
goto destroy_skel2;
do_read(path, "ABCD");
@@ -435,8 +726,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
char *buf;
skel = bpf_iter_test_kern4__open();
- if (CHECK(!skel, "bpf_iter_test_kern4__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
return;
/* create two maps: bpf program will only do bpf_seq_write
@@ -444,13 +734,11 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
* fills seq_file buffer and then the other will trigger
* overflow and needs restart.
*/
- map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
- if (CHECK(map1_fd < 0, "bpf_create_map",
- "map_creation failed: %s\n", strerror(errno)))
+ map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
+ if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
goto out;
- map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
- if (CHECK(map2_fd < 0, "bpf_create_map",
- "map_creation failed: %s\n", strerror(errno)))
+ map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
+ if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
goto free_map1;
/* bpf_seq_printf kernel buffer is 8 pages, so one map
@@ -472,34 +760,32 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
}
skel->rodata->ret1 = ret1;
- if (CHECK(bpf_iter_test_kern4__load(skel),
- "bpf_iter_test_kern4__load", "skeleton load failed\n"))
+ if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
+ "bpf_iter_test_kern4__load"))
goto free_map2;
/* setup filtering map_id in bpf program */
map_info_len = sizeof(map_info);
- err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
- if (CHECK(err, "get_map_info", "get map info failed: %s\n",
- strerror(errno)))
+ err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
+ if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map1_id = map_info.id;
- err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
- if (CHECK(err, "get_map_info", "get map info failed: %s\n",
- strerror(errno)))
+ err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
+ if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map2_id = map_info.id;
link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto free_map2;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
buf = malloc(expected_read_len);
- if (!buf)
+ if (!ASSERT_OK_PTR(buf, "malloc"))
goto close_iter;
/* do read */
@@ -508,16 +794,14 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
- CHECK(len != -1 || errno != E2BIG, "read",
- "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
- len, strerror(errno));
+ ASSERT_EQ(len, -1, "read");
+ ASSERT_EQ(errno, E2BIG, "read");
goto free_buf;
} else if (!ret1) {
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
- if (CHECK(len < 0, "read", "read failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
} else {
do {
@@ -526,27 +810,20 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
total_read_len += len;
} while (len > 0 || len == -EAGAIN);
- if (CHECK(len < 0, "read", "read failed: %s\n",
- strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
}
- if (CHECK(total_read_len != expected_read_len, "read",
- "total len %u, expected len %u\n", total_read_len,
- expected_read_len))
+ if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
goto free_buf;
- if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
- "expected 1 actual %d\n", skel->bss->map1_accessed))
+ if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
goto free_buf;
- if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
- "expected 2 actual %d\n", skel->bss->map2_accessed))
+ if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
goto free_buf;
- CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
- "map2_seqnum", "two different seqnum %lld %lld\n",
- skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
+ ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
free_buf:
free(buf);
@@ -564,7 +841,7 @@ out:
static void test_bpf_hash_map(void)
{
- __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
+ __u32 expected_key_a = 0, expected_key_b = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_hash_map *skel;
int err, i, len, map_fd, iter_fd;
@@ -579,15 +856,13 @@ static void test_bpf_hash_map(void)
char buf[64];
skel = bpf_iter_bpf_hash_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
return;
skel->bss->in_test_mode = true;
err = bpf_iter_bpf_hash_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
goto out;
/* iterator with hashmap2 and hashmap3 should fail */
@@ -596,14 +871,12 @@ static void test_bpf_hash_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter",
- "attach_iter for hashmap2 unexpected succeeded\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter",
- "attach_iter for hashmap3 unexpected succeeded\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
goto out;
/* hashmap1 should be good, update map values here */
@@ -615,41 +888,40 @@ static void test_bpf_hash_map(void)
val = i + 4;
expected_key_a += key.a;
expected_key_b += key.b;
- expected_key_c += key.c;
expected_val += val;
err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
+ /* Sleepable program is prohibited for hash map iterator */
+ linfo.map.map_fd = map_fd;
+ link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
+ if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
+ goto out;
+
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum_a != expected_key_a,
- "key_sum_a", "got %u expected %u\n",
- skel->bss->key_sum_a, expected_key_a))
+ if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
- if (CHECK(skel->bss->key_sum_b != expected_key_b,
- "key_sum_b", "got %u expected %u\n",
- skel->bss->key_sum_b, expected_key_b))
+ if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %llu expected %llu\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -662,7 +934,7 @@ out:
static void test_bpf_percpu_hash_map(void)
{
- __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
+ __u32 expected_key_a = 0, expected_key_b = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_hash_map *skel;
int err, i, j, len, map_fd, iter_fd;
@@ -677,18 +949,17 @@ static void test_bpf_percpu_hash_map(void)
char buf[64];
void *val;
- val = malloc(8 * bpf_num_possible_cpus());
-
skel = bpf_iter_bpf_percpu_hash_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
+ val = malloc(8 * bpf_num_possible_cpus());
+ if (!ASSERT_OK_PTR(val, "malloc"))
+ goto out;
err = bpf_iter_bpf_percpu_hash_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
goto out;
/* update map values here */
@@ -699,7 +970,6 @@ static void test_bpf_percpu_hash_map(void)
key.c = i + 3;
expected_key_a += key.a;
expected_key_b += key.b;
- expected_key_c += key.c;
for (j = 0; j < bpf_num_possible_cpus(); j++) {
*(__u32 *)(val + j * 8) = i + j;
@@ -707,7 +977,7 @@ static void test_bpf_percpu_hash_map(void)
}
err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -716,31 +986,25 @@ static void test_bpf_percpu_hash_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum_a != expected_key_a,
- "key_sum_a", "got %u expected %u\n",
- skel->bss->key_sum_a, expected_key_a))
+ if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
- if (CHECK(skel->bss->key_sum_b != expected_key_b,
- "key_sum_b", "got %u expected %u\n",
- skel->bss->key_sum_b, expected_key_b))
+ if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -749,23 +1013,23 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_hash_map__destroy(skel);
+ free(val);
}
static void test_bpf_array_map(void)
{
__u64 val, expected_val = 0, res_first_val, first_val = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
- __u32 expected_key = 0, res_first_key;
+ __u32 key, expected_key = 0, res_first_key;
+ int err, i, map_fd, hash_fd, iter_fd;
struct bpf_iter_bpf_array_map *skel;
union bpf_iter_link_info linfo;
- int err, i, map_fd, iter_fd;
struct bpf_link *link;
char buf[64] = {};
int len, start;
skel = bpf_iter_bpf_array_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.arraymap1);
@@ -778,7 +1042,7 @@ static void test_bpf_array_map(void)
first_val = val;
err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -787,46 +1051,46 @@ static void test_bpf_array_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
start = 0;
while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
start += len;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
res_first_key = *(__u32 *)buf;
res_first_val = *(__u64 *)(buf + sizeof(__u32));
- if (CHECK(res_first_key != 0 || res_first_val != first_val,
- "bpf_seq_write",
- "seq_write failure: first key %u vs expected 0, "
- " first value %llu vs expected %llu\n",
- res_first_key, res_first_val, first_val))
+ if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
+ !ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
goto close_iter;
- if (CHECK(skel->bss->key_sum != expected_key,
- "key_sum", "got %u expected %u\n",
- skel->bss->key_sum, expected_key))
+ if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %llu expected %llu\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
+ hash_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
err = bpf_map_lookup_elem(map_fd, &i, &val);
- if (CHECK(err, "map_lookup", "map_lookup failed\n"))
- goto out;
- if (CHECK(i != val, "invalid_val",
- "got value %llu expected %u\n", val, i))
- goto out;
+ if (!ASSERT_OK(err, "map_lookup arraymap1"))
+ goto close_iter;
+ if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
+ goto close_iter;
+
+ val = i + 4;
+ err = bpf_map_lookup_elem(hash_fd, &val, &key);
+ if (!ASSERT_OK(err, "map_lookup hashmap1"))
+ goto close_iter;
+ if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
+ goto close_iter;
}
close_iter:
@@ -837,6 +1101,20 @@ out:
bpf_iter_bpf_array_map__destroy(skel);
}
+static void test_bpf_array_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_array_map *skel;
+
+ skel = bpf_iter_bpf_array_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
+ skel->maps.arraymap1);
+
+ bpf_iter_bpf_array_map__destroy(skel);
+}
+
static void test_bpf_percpu_array_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -849,18 +1127,17 @@ static void test_bpf_percpu_array_map(void)
void *val;
int len;
- val = malloc(8 * bpf_num_possible_cpus());
-
skel = bpf_iter_bpf_percpu_array_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
+ val = malloc(8 * bpf_num_possible_cpus());
+ if (!ASSERT_OK_PTR(val, "malloc"))
+ goto out;
err = bpf_iter_bpf_percpu_array_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
goto out;
/* update map values here */
@@ -874,7 +1151,7 @@ static void test_bpf_percpu_array_map(void)
}
err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -883,27 +1160,23 @@ static void test_bpf_percpu_array_map(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum != expected_key,
- "key_sum", "got %u expected %u\n",
- skel->bss->key_sum, expected_key))
+ if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -912,6 +1185,7 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_array_map__destroy(skel);
+ free(val);
}
/* An iterator program deletes all local storage in a map. */
@@ -927,17 +1201,17 @@ static void test_bpf_sk_storage_delete(void)
char buf[64];
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
+
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
memset(&linfo, 0, sizeof(linfo));
@@ -946,24 +1220,29 @@ static void test_bpf_sk_storage_delete(void)
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
&opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
- "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
- goto close_iter;
+
+ /* Note: The following assertions serve to ensure
+ * the value was deleted. It does so by asserting
+ * that bpf_map_lookup_elem has failed. This might
+ * seem counterintuitive at first.
+ */
+ ASSERT_ERR(err, "bpf_map_lookup_elem");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
close_iter:
close(iter_fd);
@@ -988,38 +1267,35 @@ static void test_bpf_sk_storage_get(void)
int sock_fd = -1;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = listen(sock_fd, 1);
- if (CHECK(err != 0, "listen", "errno: %d\n", errno))
+ if (!ASSERT_OK(err, "listen"))
goto close_socket;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
- if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_socket;
do_dummy_read(skel->progs.fill_socket_owner);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
- "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
- getpid(), val, err))
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
+ !ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
goto close_socket;
do_dummy_read(skel->progs.negate_socket_local_storage);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
- CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
- "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
- -getpid(), val, err);
+ ASSERT_OK(err, "bpf_map_lookup_elem");
+ ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
close_socket:
close(sock_fd);
@@ -1027,6 +1303,20 @@ out:
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
+static void test_bpf_sk_storage_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_sk_storage_map *skel;
+
+ skel = bpf_iter_bpf_sk_storage_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
+ skel->maps.sk_stg_map);
+
+ bpf_iter_bpf_sk_storage_map__destroy(skel);
+}
+
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1039,15 +1329,14 @@ static void test_bpf_sk_storage_map(void)
char buf[64];
skel = bpf_iter_bpf_sk_storage_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
num_sockets = ARRAY_SIZE(sock_fd);
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd[i], 0, "socket"))
goto out;
val = i + 1;
@@ -1055,7 +1344,7 @@ static void test_bpf_sk_storage_map(void)
err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
BPF_NOEXIST);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -1063,31 +1352,43 @@ static void test_bpf_sk_storage_map(void)
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
- link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
+ err = libbpf_get_error(link);
+ if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
+ if (!err)
+ bpf_link__destroy(link);
+ goto out;
+ }
+
+ link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
+ skel->bss->to_add_val = time(NULL);
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
- if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
- "ipv6_sk_count", "got %u expected %u\n",
- skel->bss->ipv6_sk_count, num_sockets))
+ if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
+ for (i = 0; i < num_sockets; i++) {
+ err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
+ if (!ASSERT_OK(err, "map_lookup") ||
+ !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
+ break;
+ }
+
close_iter:
close(iter_fd);
free_link:
@@ -1108,8 +1409,7 @@ static void test_rdonly_buf_out_of_bound(void)
struct bpf_link *link;
skel = bpf_iter_test_kern5__open_and_load();
- if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
return;
memset(&linfo, 0, sizeof(linfo));
@@ -1117,7 +1417,7 @@ static void test_rdonly_buf_out_of_bound(void)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
- if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
+ if (!ASSERT_ERR_PTR(link, "attach_iter"))
bpf_link__destroy(link);
bpf_iter_test_kern5__destroy(skel);
@@ -1128,13 +1428,295 @@ static void test_buf_neg_offset(void)
struct bpf_iter_test_kern6 *skel;
skel = bpf_iter_test_kern6__open_and_load();
- if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
- "skeleton open_and_load unexpected success\n"))
+ if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
bpf_iter_test_kern6__destroy(skel);
}
+static void test_link_iter(void)
+{
+ struct bpf_iter_bpf_link *skel;
+
+ skel = bpf_iter_bpf_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_bpf_link);
+
+ bpf_iter_bpf_link__destroy(skel);
+}
+
+static void test_ksym_iter(void)
+{
+ struct bpf_iter_ksym *skel;
+
+ skel = bpf_iter_ksym__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_ksym);
+
+ bpf_iter_ksym__destroy(skel);
+}
+
+#define CMP_BUFFER_SIZE 1024
+static char task_vma_output[CMP_BUFFER_SIZE];
+static char proc_maps_output[CMP_BUFFER_SIZE];
+
+/* remove \0 and \t from str, and only keep the first line */
+static void str_strip_first_line(char *str)
+{
+ char *dst = str, *src = str;
+
+ do {
+ if (*src == ' ' || *src == '\t')
+ src++;
+ else
+ *(dst++) = *(src++);
+
+ } while (*src != '\0' && *src != '\n');
+
+ *dst = '\0';
+}
+
+static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
+{
+ int err, iter_fd = -1, proc_maps_fd = -1;
+ struct bpf_iter_task_vmas *skel;
+ int len, read_size = 4;
+ char maps_path[64];
+
+ skel = bpf_iter_task_vmas__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
+ return;
+
+ skel->bss->pid = getpid();
+ skel->bss->one_task = opts ? 1 : 0;
+
+ err = bpf_iter_task_vmas__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
+ goto out;
+
+ skel->links.proc_maps = bpf_program__attach_iter(
+ skel->progs.proc_maps, opts);
+
+ if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
+ skel->links.proc_maps = NULL;
+ goto out;
+ }
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto out;
+
+ /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
+ * to trigger seq_file corner cases.
+ */
+ len = 0;
+ while (len < CMP_BUFFER_SIZE) {
+ err = read_fd_into_buffer(iter_fd, task_vma_output + len,
+ MIN(read_size, CMP_BUFFER_SIZE - len));
+ if (!err)
+ break;
+ if (!ASSERT_GE(err, 0, "read_iter_fd"))
+ goto out;
+ len += err;
+ }
+ if (opts)
+ ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
+
+ /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
+ snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
+ proc_maps_fd = open(maps_path, O_RDONLY);
+ if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
+ goto out;
+ err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
+ if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
+ goto out;
+
+ /* strip and compare the first line of the two files */
+ str_strip_first_line(task_vma_output);
+ str_strip_first_line(proc_maps_output);
+
+ ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
+
+ check_bpf_link_info(skel->progs.proc_maps);
+
+out:
+ close(proc_maps_fd);
+ close(iter_fd);
+ bpf_iter_task_vmas__destroy(skel);
+}
+
+static void test_task_vma_dead_task(void)
+{
+ struct bpf_iter_task_vmas *skel;
+ int wstatus, child_pid = -1;
+ time_t start_tm, cur_tm;
+ int err, iter_fd = -1;
+ int wait_sec = 3;
+
+ skel = bpf_iter_task_vmas__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ err = bpf_iter_task_vmas__load(skel);
+ if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
+ goto out;
+
+ skel->links.proc_maps = bpf_program__attach_iter(
+ skel->progs.proc_maps, NULL);
+
+ if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
+ skel->links.proc_maps = NULL;
+ goto out;
+ }
+
+ start_tm = time(NULL);
+ cur_tm = start_tm;
+
+ child_pid = fork();
+ if (child_pid == 0) {
+ /* Fork short-lived processes in the background. */
+ while (cur_tm < start_tm + wait_sec) {
+ system("echo > /dev/null");
+ cur_tm = time(NULL);
+ }
+ exit(0);
+ }
+
+ if (!ASSERT_GE(child_pid, 0, "fork_child"))
+ goto out;
+
+ while (cur_tm < start_tm + wait_sec) {
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto out;
+
+ /* Drain all data from iter_fd. */
+ while (cur_tm < start_tm + wait_sec) {
+ err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
+ if (!ASSERT_GE(err, 0, "read_iter_fd"))
+ goto out;
+
+ cur_tm = time(NULL);
+
+ if (err == 0)
+ break;
+ }
+
+ close(iter_fd);
+ iter_fd = -1;
+ }
+
+ check_bpf_link_info(skel->progs.proc_maps);
+
+out:
+ waitpid(child_pid, &wstatus, 0);
+ close(iter_fd);
+ bpf_iter_task_vmas__destroy(skel);
+}
+
+void test_bpf_sockmap_map_iter_fd(void)
+{
+ struct bpf_iter_sockmap *skel;
+
+ skel = bpf_iter_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
+
+ bpf_iter_sockmap__destroy(skel);
+}
+
+static void test_task_vma(void)
+{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.tid = getpid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ test_task_vma_common(&opts);
+ test_task_vma_common(NULL);
+}
+
+/* uprobe attach point */
+static noinline int trigger_func(int arg)
+{
+ asm volatile ("");
+ return arg + 1;
+}
+
+static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
+{
+ struct bpf_iter_vma_offset *skel;
+ char buf[16] = {};
+ int iter_fd, len;
+ int pgsz, shift;
+
+ skel = bpf_iter_vma_offset__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+ skel->bss->address = (uintptr_t)trigger_func;
+ for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
+ ;
+ skel->bss->page_shift = shift;
+
+ skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
+ if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
+ goto exit;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
+ if (!ASSERT_GT(iter_fd, 0, "create_iter"))
+ goto exit;
+
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ buf[15] = 0;
+ ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
+
+ ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
+ if (one_proc)
+ ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
+ else
+ ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
+
+ close(iter_fd);
+
+exit:
+ bpf_iter_vma_offset__destroy(skel);
+}
+
+static void test_task_vma_offset(void)
+{
+ LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.task.pid = getpid();
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ test_task_vma_offset_common(&opts, true);
+
+ linfo.task.pid = 0;
+ linfo.task.tid = getpid();
+ test_task_vma_offset_common(&opts, true);
+
+ test_task_vma_offset_common(NULL, false);
+}
+
void test_bpf_iter(void)
{
+ ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
+
if (test__start_subtest("btf_id_or_null"))
test_btf_id_or_null();
if (test__start_subtest("ipv6_route"))
@@ -1143,12 +1725,22 @@ void test_bpf_iter(void)
test_netlink();
if (test__start_subtest("bpf_map"))
test_bpf_map();
- if (test__start_subtest("task"))
- test_task();
+ if (test__start_subtest("task_tid"))
+ test_task_tid();
+ if (test__start_subtest("task_pid"))
+ test_task_pid();
+ if (test__start_subtest("task_pidfd"))
+ test_task_pidfd();
+ if (test__start_subtest("task_sleepable"))
+ test_task_sleepable();
if (test__start_subtest("task_stack"))
test_task_stack();
if (test__start_subtest("task_file"))
test_task_file();
+ if (test__start_subtest("task_vma"))
+ test_task_vma();
+ if (test__start_subtest("task_vma_dead_task"))
+ test_task_vma_dead_task();
if (test__start_subtest("task_btf"))
test_task_btf();
if (test__start_subtest("tcp4"))
@@ -1159,6 +1751,8 @@ void test_bpf_iter(void)
test_udp4();
if (test__start_subtest("udp6"))
test_udp6();
+ if (test__start_subtest("unix"))
+ test_unix();
if (test__start_subtest("anon"))
test_anon_iter(false);
if (test__start_subtest("anon-read-one-char"))
@@ -1177,10 +1771,14 @@ void test_bpf_iter(void)
test_bpf_percpu_hash_map();
if (test__start_subtest("bpf_array_map"))
test_bpf_array_map();
+ if (test__start_subtest("bpf_array_map_iter_fd"))
+ test_bpf_array_map_iter_fd();
if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
+ if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
+ test_bpf_sk_storage_map_iter_fd();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
if (test__start_subtest("bpf_sk_storage_get"))
@@ -1189,4 +1787,12 @@ void test_bpf_iter(void)
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
test_buf_neg_offset();
+ if (test__start_subtest("link-iter"))
+ test_link_iter();
+ if (test__start_subtest("ksym"))
+ test_ksym_iter();
+ if (test__start_subtest("bpf_sockmap_map_iter_fd"))
+ test_bpf_sockmap_map_iter_fd();
+ if (test__start_subtest("vma_offset"))
+ test_task_vma_offset();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
new file mode 100644
index 000000000000..16bed9dd8e6a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include "network_helpers.h"
+#include "bpf_dctcp.skel.h"
+#include "bpf_cubic.skel.h"
+#include "bpf_iter_setsockopt.skel.h"
+
+static int create_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo"))
+ return -1;
+
+ return 0;
+}
+
+static unsigned int set_bpf_cubic(int *fds, unsigned int nr_fds)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_fds; i++) {
+ if (setsockopt(fds[i], SOL_TCP, TCP_CONGESTION, "bpf_cubic",
+ sizeof("bpf_cubic")))
+ return i;
+ }
+
+ return nr_fds;
+}
+
+static unsigned int check_bpf_dctcp(int *fds, unsigned int nr_fds)
+{
+ char tcp_cc[16];
+ socklen_t optlen = sizeof(tcp_cc);
+ unsigned int i;
+
+ for (i = 0; i < nr_fds; i++) {
+ if (getsockopt(fds[i], SOL_TCP, TCP_CONGESTION,
+ tcp_cc, &optlen) ||
+ strcmp(tcp_cc, "bpf_dctcp"))
+ return i;
+ }
+
+ return nr_fds;
+}
+
+static int *make_established(int listen_fd, unsigned int nr_est,
+ int **paccepted_fds)
+{
+ int *est_fds, *accepted_fds;
+ unsigned int i;
+
+ est_fds = malloc(sizeof(*est_fds) * nr_est);
+ if (!est_fds)
+ return NULL;
+
+ accepted_fds = malloc(sizeof(*accepted_fds) * nr_est);
+ if (!accepted_fds) {
+ free(est_fds);
+ return NULL;
+ }
+
+ for (i = 0; i < nr_est; i++) {
+ est_fds[i] = connect_to_fd(listen_fd, 0);
+ if (est_fds[i] == -1)
+ break;
+ if (set_bpf_cubic(&est_fds[i], 1) != 1) {
+ close(est_fds[i]);
+ break;
+ }
+
+ accepted_fds[i] = accept(listen_fd, NULL, 0);
+ if (accepted_fds[i] == -1) {
+ close(est_fds[i]);
+ break;
+ }
+ }
+
+ if (!ASSERT_EQ(i, nr_est, "create established fds")) {
+ free_fds(accepted_fds, i);
+ free_fds(est_fds, i);
+ return NULL;
+ }
+
+ *paccepted_fds = accepted_fds;
+ return est_fds;
+}
+
+static unsigned short get_local_port(int fd)
+{
+ struct sockaddr_in6 addr;
+ socklen_t addrlen = sizeof(addr);
+
+ if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen))
+ return ntohs(addr.sin6_port);
+
+ return 0;
+}
+
+static void do_bpf_iter_setsockopt(struct bpf_iter_setsockopt *iter_skel,
+ bool random_retry)
+{
+ int *reuse_listen_fds = NULL, *accepted_fds = NULL, *est_fds = NULL;
+ unsigned int nr_reuse_listens = 256, nr_est = 256;
+ int err, iter_fd = -1, listen_fd = -1;
+ char buf;
+
+ /* Prepare non-reuseport listen_fd */
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(listen_fd, 0, "start_server"))
+ return;
+ if (!ASSERT_EQ(set_bpf_cubic(&listen_fd, 1), 1,
+ "set listen_fd to cubic"))
+ goto done;
+ iter_skel->bss->listen_hport = get_local_port(listen_fd);
+ if (!ASSERT_NEQ(iter_skel->bss->listen_hport, 0,
+ "get_local_port(listen_fd)"))
+ goto done;
+
+ /* Connect to non-reuseport listen_fd */
+ est_fds = make_established(listen_fd, nr_est, &accepted_fds);
+ if (!ASSERT_OK_PTR(est_fds, "create established"))
+ goto done;
+
+ /* Prepare reuseport listen fds */
+ reuse_listen_fds = start_reuseport_server(AF_INET6, SOCK_STREAM,
+ "::1", 0, 0,
+ nr_reuse_listens);
+ if (!ASSERT_OK_PTR(reuse_listen_fds, "start_reuseport_server"))
+ goto done;
+ if (!ASSERT_EQ(set_bpf_cubic(reuse_listen_fds, nr_reuse_listens),
+ nr_reuse_listens, "set reuse_listen_fds to cubic"))
+ goto done;
+ iter_skel->bss->reuse_listen_hport = get_local_port(reuse_listen_fds[0]);
+ if (!ASSERT_NEQ(iter_skel->bss->reuse_listen_hport, 0,
+ "get_local_port(reuse_listen_fds[0])"))
+ goto done;
+
+ /* Run bpf tcp iter to switch from bpf_cubic to bpf_dctcp */
+ iter_skel->bss->random_retry = random_retry;
+ iter_fd = bpf_iter_create(bpf_link__fd(iter_skel->links.change_tcp_cc));
+ if (!ASSERT_GE(iter_fd, 0, "create iter_fd"))
+ goto done;
+
+ while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 &&
+ errno == EAGAIN)
+ ;
+ if (!ASSERT_OK(err, "read iter error"))
+ goto done;
+
+ /* Check reuseport listen fds for dctcp */
+ ASSERT_EQ(check_bpf_dctcp(reuse_listen_fds, nr_reuse_listens),
+ nr_reuse_listens,
+ "check reuse_listen_fds dctcp");
+
+ /* Check non reuseport listen fd for dctcp */
+ ASSERT_EQ(check_bpf_dctcp(&listen_fd, 1), 1,
+ "check listen_fd dctcp");
+
+ /* Check established fds for dctcp */
+ ASSERT_EQ(check_bpf_dctcp(est_fds, nr_est), nr_est,
+ "check est_fds dctcp");
+
+ /* Check accepted fds for dctcp */
+ ASSERT_EQ(check_bpf_dctcp(accepted_fds, nr_est), nr_est,
+ "check accepted_fds dctcp");
+
+done:
+ if (iter_fd != -1)
+ close(iter_fd);
+ if (listen_fd != -1)
+ close(listen_fd);
+ free_fds(reuse_listen_fds, nr_reuse_listens);
+ free_fds(accepted_fds, nr_est);
+ free_fds(est_fds, nr_est);
+}
+
+void serial_test_bpf_iter_setsockopt(void)
+{
+ struct bpf_iter_setsockopt *iter_skel = NULL;
+ struct bpf_cubic *cubic_skel = NULL;
+ struct bpf_dctcp *dctcp_skel = NULL;
+ struct bpf_link *cubic_link = NULL;
+ struct bpf_link *dctcp_link = NULL;
+
+ if (create_netns())
+ return;
+
+ /* Load iter_skel */
+ iter_skel = bpf_iter_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(iter_skel, "iter_skel"))
+ return;
+ iter_skel->links.change_tcp_cc = bpf_program__attach_iter(iter_skel->progs.change_tcp_cc, NULL);
+ if (!ASSERT_OK_PTR(iter_skel->links.change_tcp_cc, "attach iter"))
+ goto done;
+
+ /* Load bpf_cubic */
+ cubic_skel = bpf_cubic__open_and_load();
+ if (!ASSERT_OK_PTR(cubic_skel, "cubic_skel"))
+ goto done;
+ cubic_link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
+ if (!ASSERT_OK_PTR(cubic_link, "cubic_link"))
+ goto done;
+
+ /* Load bpf_dctcp */
+ dctcp_skel = bpf_dctcp__open_and_load();
+ if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
+ goto done;
+ dctcp_link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
+ if (!ASSERT_OK_PTR(dctcp_link, "dctcp_link"))
+ goto done;
+
+ do_bpf_iter_setsockopt(iter_skel, true);
+ do_bpf_iter_setsockopt(iter_skel, false);
+
+done:
+ bpf_link__destroy(cubic_link);
+ bpf_link__destroy(dctcp_link);
+ bpf_cubic__destroy(cubic_skel);
+ bpf_dctcp__destroy(dctcp_skel);
+ bpf_iter_setsockopt__destroy(iter_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c
new file mode 100644
index 000000000000..ee725d4d98a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <test_progs.h>
+#include "bpf_iter_setsockopt_unix.skel.h"
+
+#define NR_CASES 5
+
+static int create_unix_socket(struct bpf_iter_setsockopt_unix *skel)
+{
+ struct sockaddr_un addr = {
+ .sun_family = AF_UNIX,
+ .sun_path = "",
+ };
+ socklen_t len;
+ int fd, err;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (!ASSERT_NEQ(fd, -1, "socket"))
+ return -1;
+
+ len = offsetof(struct sockaddr_un, sun_path);
+ err = bind(fd, (struct sockaddr *)&addr, len);
+ if (!ASSERT_OK(err, "bind"))
+ return -1;
+
+ len = sizeof(addr);
+ err = getsockname(fd, (struct sockaddr *)&addr, &len);
+ if (!ASSERT_OK(err, "getsockname"))
+ return -1;
+
+ memcpy(&skel->bss->sun_path, &addr.sun_path,
+ len - offsetof(struct sockaddr_un, sun_path));
+
+ return fd;
+}
+
+static void test_sndbuf(struct bpf_iter_setsockopt_unix *skel, int fd)
+{
+ socklen_t optlen;
+ int i, err;
+
+ for (i = 0; i < NR_CASES; i++) {
+ if (!ASSERT_NEQ(skel->data->sndbuf_getsockopt[i], -1,
+ "bpf_(get|set)sockopt"))
+ return;
+
+ err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF,
+ &(skel->data->sndbuf_setsockopt[i]),
+ sizeof(skel->data->sndbuf_setsockopt[i]));
+ if (!ASSERT_OK(err, "setsockopt"))
+ return;
+
+ optlen = sizeof(skel->bss->sndbuf_getsockopt_expected[i]);
+ err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF,
+ &(skel->bss->sndbuf_getsockopt_expected[i]),
+ &optlen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ return;
+
+ if (!ASSERT_EQ(skel->data->sndbuf_getsockopt[i],
+ skel->bss->sndbuf_getsockopt_expected[i],
+ "bpf_(get|set)sockopt"))
+ return;
+ }
+}
+
+void test_bpf_iter_setsockopt_unix(void)
+{
+ struct bpf_iter_setsockopt_unix *skel;
+ int err, unix_fd, iter_fd;
+ char buf;
+
+ skel = bpf_iter_setsockopt_unix__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ unix_fd = create_unix_socket(skel);
+ if (!ASSERT_NEQ(unix_fd, -1, "create_unix_server"))
+ goto destroy;
+
+ skel->links.change_sndbuf = bpf_program__attach_iter(skel->progs.change_sndbuf, NULL);
+ if (!ASSERT_OK_PTR(skel->links.change_sndbuf, "bpf_program__attach_iter"))
+ goto destroy;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.change_sndbuf));
+ if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create"))
+ goto destroy;
+
+ while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 &&
+ errno == EAGAIN)
+ ;
+ if (!ASSERT_OK(err, "read iter error"))
+ goto destroy;
+
+ test_sndbuf(skel, unix_fd);
+destroy:
+ bpf_iter_setsockopt_unix__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c
new file mode 100644
index 000000000000..4cd8a25afe68
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "bpf_loop.skel.h"
+
+static void check_nr_loops(struct bpf_loop *skel)
+{
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.test_prog);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ /* test 0 loops */
+ skel->bss->nr_loops = 0;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
+ "0 loops");
+
+ /* test 500 loops */
+ skel->bss->nr_loops = 500;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
+ "500 loops");
+ ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output");
+
+ /* test exceeding the max limit */
+ skel->bss->nr_loops = -1;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit");
+
+ bpf_link__destroy(link);
+}
+
+static void check_callback_fn_stop(struct bpf_loop *skel)
+{
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.test_prog);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ /* testing that loop is stopped when callback_fn returns 1 */
+ skel->bss->nr_loops = 400;
+ skel->data->stop_index = 50;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->nr_loops_returned, skel->data->stop_index + 1,
+ "nr_loops_returned");
+ ASSERT_EQ(skel->bss->g_output, (50 * 49) / 2,
+ "g_output");
+
+ bpf_link__destroy(link);
+}
+
+static void check_null_callback_ctx(struct bpf_loop *skel)
+{
+ struct bpf_link *link;
+
+ /* check that user is able to pass in a null callback_ctx */
+ link = bpf_program__attach(skel->progs.prog_null_ctx);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ skel->bss->nr_loops = 10;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops,
+ "nr_loops_returned");
+
+ bpf_link__destroy(link);
+}
+
+static void check_invalid_flags(struct bpf_loop *skel)
+{
+ struct bpf_link *link;
+
+ /* check that passing in non-zero flags returns -EINVAL */
+ link = bpf_program__attach(skel->progs.prog_invalid_flags);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->err, -EINVAL, "err");
+
+ bpf_link__destroy(link);
+}
+
+static void check_nested_calls(struct bpf_loop *skel)
+{
+ __u32 nr_loops = 100, nested_callback_nr_loops = 4;
+ struct bpf_link *link;
+
+ /* check that nested calls are supported */
+ link = bpf_program__attach(skel->progs.prog_nested_calls);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ skel->bss->nr_loops = nr_loops;
+ skel->bss->nested_callback_nr_loops = nested_callback_nr_loops;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->nr_loops_returned, nr_loops * nested_callback_nr_loops
+ * nested_callback_nr_loops, "nr_loops_returned");
+ ASSERT_EQ(skel->bss->g_output, (4 * 3) / 2 * nested_callback_nr_loops
+ * nr_loops, "g_output");
+
+ bpf_link__destroy(link);
+}
+
+static void check_non_constant_callback(struct bpf_loop *skel)
+{
+ struct bpf_link *link =
+ bpf_program__attach(skel->progs.prog_non_constant_callback);
+
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ skel->bss->callback_selector = 0x0F;
+ usleep(1);
+ ASSERT_EQ(skel->bss->g_output, 0x0F, "g_output #1");
+
+ skel->bss->callback_selector = 0xF0;
+ usleep(1);
+ ASSERT_EQ(skel->bss->g_output, 0xF0, "g_output #2");
+
+ bpf_link__destroy(link);
+}
+
+static void check_stack(struct bpf_loop *skel)
+{
+ struct bpf_link *link = bpf_program__attach(skel->progs.stack_check);
+ const int max_key = 12;
+ int key;
+ int map_fd;
+
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ map_fd = bpf_map__fd(skel->maps.map1);
+
+ if (!ASSERT_GE(map_fd, 0, "bpf_map__fd"))
+ goto out;
+
+ for (key = 1; key <= max_key; ++key) {
+ int val = key;
+ int err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST);
+
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto out;
+ }
+
+ usleep(1);
+
+ for (key = 1; key <= max_key; ++key) {
+ int val;
+ int err = bpf_map_lookup_elem(map_fd, &key, &val);
+
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ goto out;
+ if (!ASSERT_EQ(val, key + 1, "bad value in the map"))
+ goto out;
+ }
+
+out:
+ bpf_link__destroy(link);
+}
+
+void test_bpf_loop(void)
+{
+ struct bpf_loop *skel;
+
+ skel = bpf_loop__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_loop__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ if (test__start_subtest("check_nr_loops"))
+ check_nr_loops(skel);
+ if (test__start_subtest("check_callback_fn_stop"))
+ check_callback_fn_stop(skel);
+ if (test__start_subtest("check_null_callback_ctx"))
+ check_null_callback_ctx(skel);
+ if (test__start_subtest("check_invalid_flags"))
+ check_invalid_flags(skel);
+ if (test__start_subtest("check_nested_calls"))
+ check_nested_calls(skel);
+ if (test__start_subtest("check_non_constant_callback"))
+ check_non_constant_callback(skel);
+ if (test__start_subtest("check_stack"))
+ check_stack(skel);
+
+ bpf_loop__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
new file mode 100644
index 000000000000..ecc3d47919ad
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <stdatomic.h>
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include <linux/module.h>
+#include <linux/userfaultfd.h>
+
+#include "ksym_race.skel.h"
+#include "bpf_mod_race.skel.h"
+#include "kfunc_call_race.skel.h"
+#include "testing_helpers.h"
+
+/* This test crafts a race between btf_try_get_module and do_init_module, and
+ * checks whether btf_try_get_module handles the invocation for a well-formed
+ * but uninitialized module correctly. Unless the module has completed its
+ * initcalls, the verifier should fail the program load and return ENXIO.
+ *
+ * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
+ * sleep, then the BPF program is loaded and the return value from verifier is
+ * inspected. After this, the userfaultfd is closed so that the module loading
+ * thread makes forward progress, and fmod_ret injects an error so that the
+ * module load fails and it is freed.
+ *
+ * If the verifier succeeded in loading the supplied program, it will end up
+ * taking reference to freed module, and trigger a crash when the program fd
+ * is closed later. This is true for both kfuncs and ksyms. In both cases,
+ * the crash is triggered inside bpf_prog_free_deferred, when module reference
+ * is finally released.
+ */
+
+struct test_config {
+ const char *str_open;
+ void *(*bpf_open_and_load)();
+ void (*bpf_destroy)(void *);
+};
+
+enum bpf_test_state {
+ _TS_INVALID,
+ TS_MODULE_LOAD,
+ TS_MODULE_LOAD_FAIL,
+};
+
+static _Atomic enum bpf_test_state state = _TS_INVALID;
+
+static void *load_module_thread(void *p)
+{
+
+ if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail"))
+ atomic_store(&state, TS_MODULE_LOAD);
+ else
+ atomic_store(&state, TS_MODULE_LOAD_FAIL);
+ return p;
+}
+
+static int sys_userfaultfd(int flags)
+{
+ return syscall(__NR_userfaultfd, flags);
+}
+
+static int test_setup_uffd(void *fault_addr)
+{
+ struct uffdio_register uffd_register = {};
+ struct uffdio_api uffd_api = {};
+ int uffd;
+
+ uffd = sys_userfaultfd(O_CLOEXEC);
+ if (uffd < 0)
+ return -errno;
+
+ uffd_api.api = UFFD_API;
+ uffd_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
+ close(uffd);
+ return -1;
+ }
+
+ uffd_register.range.start = (unsigned long)fault_addr;
+ uffd_register.range.len = getpagesize();
+ uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
+ close(uffd);
+ return -1;
+ }
+ return uffd;
+}
+
+static void test_bpf_mod_race_config(const struct test_config *config)
+{
+ void *fault_addr, *skel_fail;
+ struct bpf_mod_race *skel;
+ struct uffd_msg uffd_msg;
+ pthread_t load_mod_thrd;
+ _Atomic int *blockingp;
+ int uffd, ret;
+
+ fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
+ return;
+
+ if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod"))
+ goto end_mmap;
+
+ skel = bpf_mod_race__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
+ goto end_module;
+
+ skel->rodata->bpf_mod_race_config.tgid = getpid();
+ skel->rodata->bpf_mod_race_config.inject_error = -4242;
+ skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
+ if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
+ goto end_destroy;
+ blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
+
+ if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
+ goto end_destroy;
+
+ uffd = test_setup_uffd(fault_addr);
+ if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
+ goto end_destroy;
+
+ if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
+ "load module thread"))
+ goto end_uffd;
+
+ /* Now, we either fail loading module, or block in bpf prog, spin to find out */
+ while (!atomic_load(&state) && !atomic_load(blockingp))
+ ;
+ if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
+ goto end_join;
+ if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
+ pthread_kill(load_mod_thrd, SIGKILL);
+ goto end_uffd;
+ }
+
+ /* We might have set bpf_blocking to 1, but may have not blocked in
+ * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
+ */
+ if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
+ "read uffd block event"))
+ goto end_join;
+ if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
+ goto end_join;
+
+ /* We know that load_mod_thrd is blocked in the fmod_ret program, the
+ * module state is still MODULE_STATE_COMING because mod->init hasn't
+ * returned. This is the time we try to load a program calling kfunc and
+ * check if we get ENXIO from verifier.
+ */
+ skel_fail = config->bpf_open_and_load();
+ ret = errno;
+ if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
+ /* Close uffd to unblock load_mod_thrd */
+ close(uffd);
+ uffd = -1;
+ while (atomic_load(blockingp) != 2)
+ ;
+ ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
+ config->bpf_destroy(skel_fail);
+ goto end_join;
+
+ }
+ ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
+ ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
+
+ close(uffd);
+ uffd = -1;
+end_join:
+ pthread_join(load_mod_thrd, NULL);
+ if (uffd < 0)
+ ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
+end_uffd:
+ if (uffd >= 0)
+ close(uffd);
+end_destroy:
+ bpf_mod_race__destroy(skel);
+ ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
+end_module:
+ unload_bpf_testmod(false);
+ ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod");
+end_mmap:
+ munmap(fault_addr, 4096);
+ atomic_store(&state, _TS_INVALID);
+}
+
+static const struct test_config ksym_config = {
+ .str_open = "ksym_race__open_and_load",
+ .bpf_open_and_load = (void *)ksym_race__open_and_load,
+ .bpf_destroy = (void *)ksym_race__destroy,
+};
+
+static const struct test_config kfunc_config = {
+ .str_open = "kfunc_call_race__open_and_load",
+ .bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
+ .bpf_destroy = (void *)kfunc_call_race__destroy,
+};
+
+void serial_test_bpf_mod_race(void)
+{
+ if (test__start_subtest("ksym (used_btfs UAF)"))
+ test_bpf_mod_race_config(&ksym_config);
+ if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
+ test_bpf_mod_race_config(&kfunc_config);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
new file mode 100644
index 000000000000..dd6512fa652b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include "test_bpf_nf.skel.h"
+#include "test_bpf_nf_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+struct {
+ const char *prog_name;
+ const char *err_msg;
+} test_bpf_nf_fail_tests[] = {
+ { "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" },
+ { "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" },
+ { "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" },
+ { "write_not_allowlisted_field", "no write support to nf_conn at off" },
+};
+
+enum {
+ TEST_XDP,
+ TEST_TC_BPF,
+};
+
+#define TIMEOUT_MS 3000
+#define IPS_STATUS_MASK (IPS_CONFIRMED | IPS_SEEN_REPLY | \
+ IPS_SRC_NAT_DONE | IPS_DST_NAT_DONE | \
+ IPS_SRC_NAT | IPS_DST_NAT)
+
+static int connect_to_server(int srv_fd)
+{
+ int fd = -1;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ goto out;
+
+ if (!ASSERT_EQ(connect_fd_to_fd(fd, srv_fd, TIMEOUT_MS), 0, "connect_fd_to_fd")) {
+ close(fd);
+ fd = -1;
+ }
+out:
+ return fd;
+}
+
+static void test_bpf_nf_ct(int mode)
+{
+ const char *iptables = "iptables-legacy -t raw %s PREROUTING -j CONNMARK --set-mark 42/0";
+ int srv_fd = -1, client_fd = -1, srv_client_fd = -1;
+ struct sockaddr_in peer_addr = {};
+ struct test_bpf_nf *skel;
+ int prog_fd, err;
+ socklen_t len;
+ u16 srv_port;
+ char cmd[128];
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ if (SYS_NOFAIL("iptables-legacy --version")) {
+ fprintf(stdout, "Missing required iptables-legacy tool\n");
+ test__skip();
+ return;
+ }
+
+ skel = test_bpf_nf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load"))
+ return;
+
+ /* Enable connection tracking */
+ snprintf(cmd, sizeof(cmd), iptables, "-A");
+ if (!ASSERT_OK(system(cmd), cmd))
+ goto end;
+
+ srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", 0, TIMEOUT_MS);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto end;
+
+ srv_port = get_socket_local_port(srv_fd);
+ if (!ASSERT_GE(srv_port, 0, "get_sock_local_port"))
+ goto end;
+
+ client_fd = connect_to_server(srv_fd);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_server"))
+ goto end;
+
+ len = sizeof(peer_addr);
+ srv_client_fd = accept(srv_fd, (struct sockaddr *)&peer_addr, &len);
+ if (!ASSERT_GE(srv_client_fd, 0, "accept"))
+ goto end;
+ if (!ASSERT_EQ(len, sizeof(struct sockaddr_in), "sockaddr len"))
+ goto end;
+
+ skel->bss->saddr = peer_addr.sin_addr.s_addr;
+ skel->bss->sport = peer_addr.sin_port;
+ skel->bss->daddr = peer_addr.sin_addr.s_addr;
+ skel->bss->dport = srv_port;
+
+ if (mode == TEST_XDP)
+ prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test);
+ else
+ prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test);
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run"))
+ goto end;
+
+ ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple");
+ ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0");
+ ASSERT_EQ(skel->bss->test_einval_reserved_new, -EINVAL, "Test EINVAL for reserved in new struct not set to 0");
+ ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1");
+ ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ");
+ ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP");
+ ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id");
+ ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup");
+ ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple");
+ ASSERT_EQ(skel->data->test_alloc_entry, 0, "Test for alloc new entry");
+ ASSERT_EQ(skel->data->test_insert_entry, 0, "Test for insert new entry");
+ ASSERT_EQ(skel->data->test_succ_lookup, 0, "Test for successful lookup");
+ /* allow some tolerance for test_delta_timeout value to avoid races. */
+ ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update");
+ ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update");
+ ASSERT_EQ(skel->bss->test_insert_lookup_mark, 77, "Test for insert and lookup mark value");
+ ASSERT_EQ(skel->bss->test_status, IPS_STATUS_MASK, "Test for ct status update ");
+ ASSERT_EQ(skel->data->test_exist_lookup, 0, "Test existing connection lookup");
+ ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark");
+ ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
+ ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
+ ASSERT_EQ(skel->data->test_ct_zone_id_alloc_entry, 0, "Test for alloc new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_insert_entry, 0, "Test for insert new entry in specified ct zone");
+ ASSERT_EQ(skel->data->test_ct_zone_id_succ_lookup, 0, "Test for successful lookup in specified ct_zone");
+ ASSERT_EQ(skel->bss->test_ct_zone_dir_enoent_lookup, -ENOENT, "Test ENOENT for lookup with wrong ct zone dir");
+ ASSERT_EQ(skel->bss->test_ct_zone_id_enoent_lookup, -ENOENT, "Test ENOENT for lookup in wrong ct zone");
+
+end:
+ if (client_fd != -1)
+ close(client_fd);
+ if (srv_client_fd != -1)
+ close(srv_client_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
+
+ snprintf(cmd, sizeof(cmd), iptables, "-D");
+ system(cmd);
+ test_bpf_nf__destroy(skel);
+}
+
+static void test_bpf_nf_ct_fail(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct test_bpf_nf_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = test_bpf_nf_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "test_bpf_nf_fail__open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = test_bpf_nf_fail__load(skel);
+ if (!ASSERT_ERR(ret, "test_bpf_nf_fail__load must fail"))
+ goto end;
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ test_bpf_nf_fail__destroy(skel);
+}
+
+void test_bpf_nf(void)
+{
+ int i;
+ if (test__start_subtest("xdp-ct"))
+ test_bpf_nf_ct(TEST_XDP);
+ if (test__start_subtest("tc-bpf-ct"))
+ test_bpf_nf_ct(TEST_TC_BPF);
+ for (i = 0; i < ARRAY_SIZE(test_bpf_nf_fail_tests); i++) {
+ if (test__start_subtest(test_bpf_nf_fail_tests[i].prog_name))
+ test_bpf_nf_ct_fail(test_bpf_nf_fail_tests[i].prog_name,
+ test_bpf_nf_fail_tests[i].err_msg);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
index 284d5921c345..f09d6ac2ef09 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -3,11 +3,11 @@
#define nr_iters 2
-void test_bpf_obj_id(void)
+void serial_test_bpf_obj_id(void)
{
const __u64 array_magic_value = 0xfaceb00c;
const __u32 array_key = 0;
- const char *file = "./test_obj_id.o";
+ const char *file = "./test_obj_id.bpf.o";
const char *expected_prog_name = "test_obj_id";
const char *expected_map_name = "test_map_id";
const __u64 nsec_per_sec = 1000000000;
@@ -25,7 +25,7 @@ void test_bpf_obj_id(void)
*/
__u32 map_ids[nr_iters + 1];
char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128];
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ __u32 i, next_id, info_len, nr_id_found;
struct timespec real_time_ts, boot_time_ts;
int err = 0;
__u64 array_value;
@@ -33,45 +33,46 @@ void test_bpf_obj_id(void)
time_t now, load_time;
err = bpf_prog_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_prog_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_prog_get_fd_by_id");
err = bpf_map_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
err = bpf_link_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
+ ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
+ ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
- /* Check bpf_obj_get_info_by_fd() */
+ /* Check bpf_map_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
&objs[i], &prog_fds[i]);
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
*/
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_prog_test_load"))
continue;
/* Insert a magic value to the map */
map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
- if (CHECK_FAIL(map_fds[i] < 0))
+ if (!ASSERT_GE(map_fds[i], 0, "bpf_find_map"))
goto done;
+
err = bpf_map_update_elem(map_fds[i], &array_key,
&array_magic_value, 0);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto done;
- prog = bpf_object__find_program_by_title(objs[i],
- "raw_tp/sys_enter");
- if (CHECK_FAIL(!prog))
+ prog = bpf_object__find_program_by_name(objs[i], "test_obj_id");
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto done;
+
links[i] = bpf_program__attach(prog);
err = libbpf_get_error(links[i]);
- if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) {
+ if (!ASSERT_OK(err, "bpf_program__attach")) {
links[i] = NULL;
goto done;
}
@@ -79,26 +80,16 @@ void test_bpf_obj_id(void)
/* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
- if (CHECK(err ||
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
- map_infos[i].key_size != sizeof(__u32) ||
- map_infos[i].value_size != sizeof(__u64) ||
- map_infos[i].max_entries != 1 ||
- map_infos[i].map_flags != 0 ||
- info_len != sizeof(struct bpf_map_info) ||
- strcmp((char *)map_infos[i].name, expected_map_name),
- "get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
- err, errno,
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
- info_len, sizeof(struct bpf_map_info),
- map_infos[i].key_size,
- map_infos[i].value_size,
- map_infos[i].max_entries,
- map_infos[i].map_flags,
- map_infos[i].name, expected_map_name))
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd") ||
+ !ASSERT_EQ(map_infos[i].type, BPF_MAP_TYPE_ARRAY, "map_type") ||
+ !ASSERT_EQ(map_infos[i].key_size, sizeof(__u32), "key_size") ||
+ !ASSERT_EQ(map_infos[i].value_size, sizeof(__u64), "value_size") ||
+ !ASSERT_EQ(map_infos[i].max_entries, 1, "max_entries") ||
+ !ASSERT_EQ(map_infos[i].map_flags, 0, "map_flags") ||
+ !ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "map_info_len") ||
+ !ASSERT_STREQ((char *)map_infos[i].name, expected_map_name, "map_name"))
goto done;
/* Check getting prog info */
@@ -112,48 +103,34 @@ void test_bpf_obj_id(void)
prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
prog_infos[i].nr_map_ids = 2;
+
err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "clock_gettime"))
goto done;
+
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "clock_gettime"))
goto done;
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
- &info_len);
+
+ err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
- if (CHECK(err ||
- prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT ||
- info_len != sizeof(struct bpf_prog_info) ||
- (env.jit_enabled && !prog_infos[i].jited_prog_len) ||
- (env.jit_enabled &&
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
- !prog_infos[i].xlated_prog_len ||
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
- load_time < now - 60 || load_time > now + 60 ||
- prog_infos[i].created_by_uid != my_uid ||
- prog_infos[i].nr_map_ids != 1 ||
- *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
- strcmp((char *)prog_infos[i].name, expected_prog_name),
- "get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%zu) "
- "jit_enabled %d jited_prog_len %u xlated_prog_len %u "
- "jited_prog %d xlated_prog %d load_time %lu(%lu) "
- "uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) "
- "name %s(%s)\n",
- err, errno, i,
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
- info_len, sizeof(struct bpf_prog_info),
- env.jit_enabled,
- prog_infos[i].jited_prog_len,
- prog_infos[i].xlated_prog_len,
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
- load_time, now,
- prog_infos[i].created_by_uid, my_uid,
- prog_infos[i].nr_map_ids, 1,
- *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
- prog_infos[i].name, expected_prog_name))
+
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(prog_infos[i].type, BPF_PROG_TYPE_RAW_TRACEPOINT, "prog_type") ||
+ !ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len") ||
+ !ASSERT_FALSE((env.jit_enabled && !prog_infos[i].jited_prog_len), "jited_prog_len") ||
+ !ASSERT_FALSE((env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))),
+ "jited_insns") ||
+ !ASSERT_NEQ(prog_infos[i].xlated_prog_len, 0, "xlated_prog_len") ||
+ !ASSERT_NEQ(memcmp(xlated_insns, zeros, sizeof(zeros)), 0, "xlated_insns") ||
+ !ASSERT_GE(load_time, (now - 60), "load_time") ||
+ !ASSERT_LE(load_time, (now + 60), "load_time") ||
+ !ASSERT_EQ(prog_infos[i].created_by_uid, my_uid, "created_by_uid") ||
+ !ASSERT_EQ(prog_infos[i].nr_map_ids, 1, "nr_map_ids") ||
+ !ASSERT_EQ(*(int *)(long)prog_infos[i].map_ids, map_infos[i].id, "map_ids") ||
+ !ASSERT_STREQ((char *)prog_infos[i].name, expected_prog_name, "prog_name"))
goto done;
/* Check getting link info */
@@ -161,27 +138,14 @@ void test_bpf_obj_id(void)
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
- err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
- &link_infos[i], &info_len);
- if (CHECK(err ||
- link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
- link_infos[i].prog_id != prog_infos[i].id ||
- link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
- strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
- "sys_enter") ||
- info_len != sizeof(struct bpf_link_info),
- "get-link-info(fd)",
- "err %d errno %d info_len %u(%zu) type %d(%d) id %d "
- "prog_id %d (%d) tp_name %s(%s)\n",
- err, errno,
- info_len, sizeof(struct bpf_link_info),
- link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
- link_infos[i].id,
- link_infos[i].prog_id, prog_infos[i].id,
- (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
- "sys_enter"))
+ err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
+ &link_infos[i], &info_len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd") ||
+ !ASSERT_EQ(link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type") ||
+ !ASSERT_EQ(link_infos[i].prog_id, prog_infos[i].id, "prog_id") ||
+ !ASSERT_EQ(link_infos[i].raw_tracepoint.tp_name, ptr_to_u64(&tp_name), "&tp_name") ||
+ !ASSERT_STREQ(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter", "tp_name"))
goto done;
-
}
/* Check bpf_prog_get_next_id() */
@@ -190,7 +154,7 @@ void test_bpf_obj_id(void)
while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info = {};
__u32 saved_map_id;
- int prog_fd;
+ int prog_fd, cmp_res;
info_len = sizeof(prog_info);
@@ -198,9 +162,7 @@ void test_bpf_obj_id(void)
if (prog_fd < 0 && errno == ENOENT)
/* The bpf_prog is in the dead row */
continue;
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
- "prog_fd %d next_id %d errno %d\n",
- prog_fd, next_id, errno))
+ if (!ASSERT_GE(prog_fd, 0, "bpf_prog_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -217,10 +179,9 @@ void test_bpf_obj_id(void)
* prog_info.map_ids = NULL
*/
prog_info.nr_map_ids = 1;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- if (CHECK(!err || errno != EFAULT,
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
- err, errno, EFAULT))
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (!ASSERT_ERR(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(errno, EFAULT, "bpf_prog_get_info_by_fd"))
break;
bzero(&prog_info, sizeof(prog_info));
info_len = sizeof(prog_info);
@@ -228,30 +189,25 @@ void test_bpf_obj_id(void)
saved_map_id = *(int *)((long)prog_infos[i].map_ids);
prog_info.map_ids = prog_infos[i].map_ids;
prog_info.nr_map_ids = 2;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
- memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)(long)prog_info.map_ids != saved_map_id,
- "get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
- err, errno, info_len, sizeof(struct bpf_prog_info),
- memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)(long)prog_info.map_ids, saved_map_id);
+ cmp_res = memcmp(&prog_info, &prog_infos[i], info_len);
+
+ ASSERT_OK(err, "bpf_prog_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len");
+ ASSERT_OK(cmp_res, "memcmp");
+ ASSERT_EQ(*(int *)(long)prog_info.map_ids, saved_map_id, "map_id");
close(prog_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total prog id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "prog_nr_id_found");
/* Check bpf_map_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info = {};
- int map_fd;
+ int map_fd, cmp_res;
info_len = sizeof(map_info);
@@ -259,9 +215,7 @@ void test_bpf_obj_id(void)
if (map_fd < 0 && errno == ENOENT)
/* The bpf_map is in the dead row */
continue;
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
- "map_fd %d next_id %u errno %d\n",
- map_fd, next_id, errno))
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -274,25 +228,19 @@ void test_bpf_obj_id(void)
nr_id_found++;
err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto done;
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
- memcmp(&map_info, &map_infos[i], info_len) ||
- array_value != array_magic_value,
- "check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
- err, errno, info_len, sizeof(struct bpf_map_info),
- memcmp(&map_info, &map_infos[i], info_len),
- array_value, array_magic_value);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
+ cmp_res = memcmp(&map_info, &map_infos[i], info_len);
+ ASSERT_OK(err, "bpf_map_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "info_len");
+ ASSERT_OK(cmp_res, "memcmp");
+ ASSERT_EQ(array_value, array_magic_value, "array_value");
close(map_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total map id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "map_nr_id_found");
/* Check bpf_link_get_next_id() */
nr_id_found = 0;
@@ -308,9 +256,7 @@ void test_bpf_obj_id(void)
if (link_fd < 0 && errno == ENOENT)
/* The bpf_link is in the dead row */
continue;
- if (CHECK(link_fd < 0, "get-link-fd(next_id)",
- "link_fd %d next_id %u errno %d\n",
- link_fd, next_id, errno))
+ if (!ASSERT_GE(link_fd, 0, "bpf_link_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@@ -322,20 +268,16 @@ void test_bpf_obj_id(void)
nr_id_found++;
- err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len);
+ err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
- CHECK(err || info_len != sizeof(link_info) || cmp_res,
- "check get-link-info(next_id->fd)",
- "err %d errno %d info_len %u(%zu) memcmp %d\n",
- err, errno, info_len, sizeof(struct bpf_link_info),
- cmp_res);
+ ASSERT_OK(err, "bpf_link_get_info_by_fd");
+ ASSERT_EQ(info_len, sizeof(link_info), "info_len");
+ ASSERT_OK(cmp_res, "memcmp");
close(link_fd);
}
- CHECK(nr_id_found != nr_iters,
- "check total link id found by get_next_id",
- "nr_id_found %u(%u)\n", nr_id_found, nr_iters);
+ ASSERT_EQ(nr_id_found, nr_iters, "link_nr_id_found");
done:
for (i = 0; i < nr_iters; i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
new file mode 100644
index 000000000000..ee0458a5ce78
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <linux/unistd.h>
+#include <linux/mount.h>
+#include <sys/syscall.h>
+#include "bpf/libbpf_internal.h"
+
+static inline int sys_fsopen(const char *fsname, unsigned flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
+}
+
+static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
+{
+ return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
+}
+
+__attribute__((unused))
+static inline int sys_move_mount(int from_dfd, const char *from_path,
+ int to_dfd, const char *to_path,
+ unsigned int ms_flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, ms_flags);
+}
+
+static void bpf_obj_pinning_detached(void)
+{
+ LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
+ LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
+ int fs_fd = -1, mnt_fd = -1;
+ int map_fd = -1, map_fd2 = -1;
+ int zero = 0, src_value, dst_value, err;
+ const char *map_name = "fsmount_map";
+
+ /* A bunch of below UAPI calls are constructed based on reading:
+ * https://brauner.io/2023/02/28/mounting-into-mount-namespaces.html
+ */
+
+ /* create VFS context */
+ fs_fd = sys_fsopen("bpf", 0);
+ if (!ASSERT_GE(fs_fd, 0, "fs_fd"))
+ goto cleanup;
+
+ /* instantiate FS object */
+ err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+ if (!ASSERT_OK(err, "fs_create"))
+ goto cleanup;
+
+ /* create O_PATH fd for detached mount */
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (!ASSERT_GE(mnt_fd, 0, "mnt_fd"))
+ goto cleanup;
+
+ /* If we wanted to expose detached mount in the file system, we'd do
+ * something like below. But the whole point is that we actually don't
+ * even have to expose BPF FS in the file system to be able to work
+ * (pin/get objects) with it.
+ *
+ * err = sys_move_mount(mnt_fd, "", -EBADF, mnt_path, MOVE_MOUNT_F_EMPTY_PATH);
+ * if (!ASSERT_OK(err, "move_mount"))
+ * goto cleanup;
+ */
+
+ /* create BPF map to pin */
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto cleanup;
+
+ /* pin BPF map into detached BPF FS through mnt_fd */
+ pin_opts.file_flags = BPF_F_PATH_FD;
+ pin_opts.path_fd = mnt_fd;
+ err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts);
+ if (!ASSERT_OK(err, "map_pin"))
+ goto cleanup;
+
+ /* get BPF map from detached BPF FS through mnt_fd */
+ get_opts.file_flags = BPF_F_PATH_FD;
+ get_opts.path_fd = mnt_fd;
+ map_fd2 = bpf_obj_get_opts(map_name, &get_opts);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* update map through one FD */
+ src_value = 0xcafebeef;
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq1");
+ ASSERT_EQ(dst_value, 0xcafebeef, "map_value_eq2");
+
+cleanup:
+ if (map_fd >= 0)
+ ASSERT_OK(close(map_fd), "close_map_fd");
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ if (fs_fd >= 0)
+ ASSERT_OK(close(fs_fd), "close_fs_fd");
+ if (mnt_fd >= 0)
+ ASSERT_OK(close(mnt_fd), "close_mnt_fd");
+}
+
+enum path_kind
+{
+ PATH_STR_ABS,
+ PATH_STR_REL,
+ PATH_FD_REL,
+};
+
+static void validate_pin(int map_fd, const char *map_name, int src_value,
+ enum path_kind path_kind)
+{
+ LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts);
+ char abs_path[PATH_MAX], old_cwd[PATH_MAX];
+ const char *pin_path = NULL;
+ int zero = 0, dst_value, map_fd2, err;
+
+ snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
+ old_cwd[0] = '\0';
+
+ switch (path_kind) {
+ case PATH_STR_ABS:
+ /* absolute path */
+ pin_path = abs_path;
+ break;
+ case PATH_STR_REL:
+ /* cwd + relative path */
+ ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
+ ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
+ pin_path = map_name;
+ break;
+ case PATH_FD_REL:
+ /* dir fd + relative path */
+ pin_opts.file_flags = BPF_F_PATH_FD;
+ pin_opts.path_fd = open("/sys/fs/bpf", O_PATH);
+ ASSERT_GE(pin_opts.path_fd, 0, "path_fd");
+ pin_path = map_name;
+ break;
+ }
+
+ /* pin BPF map using specified path definition */
+ err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts);
+ ASSERT_OK(err, "obj_pin");
+
+ /* cleanup */
+ if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0)
+ close(pin_opts.path_fd);
+ if (old_cwd[0])
+ ASSERT_OK(chdir(old_cwd), "restore_cwd");
+
+ map_fd2 = bpf_obj_get(abs_path);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* update map through one FD */
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq");
+cleanup:
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ unlink(abs_path);
+}
+
+static void validate_get(int map_fd, const char *map_name, int src_value,
+ enum path_kind path_kind)
+{
+ LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
+ char abs_path[PATH_MAX], old_cwd[PATH_MAX];
+ const char *pin_path = NULL;
+ int zero = 0, dst_value, map_fd2, err;
+
+ snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name);
+ /* pin BPF map using specified path definition */
+ err = bpf_obj_pin(map_fd, abs_path);
+ if (!ASSERT_OK(err, "pin_map"))
+ return;
+
+ old_cwd[0] = '\0';
+
+ switch (path_kind) {
+ case PATH_STR_ABS:
+ /* absolute path */
+ pin_path = abs_path;
+ break;
+ case PATH_STR_REL:
+ /* cwd + relative path */
+ ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd");
+ ASSERT_OK(chdir("/sys/fs/bpf"), "chdir");
+ pin_path = map_name;
+ break;
+ case PATH_FD_REL:
+ /* dir fd + relative path */
+ get_opts.file_flags = BPF_F_PATH_FD;
+ get_opts.path_fd = open("/sys/fs/bpf", O_PATH);
+ ASSERT_GE(get_opts.path_fd, 0, "path_fd");
+ pin_path = map_name;
+ break;
+ }
+
+ map_fd2 = bpf_obj_get_opts(pin_path, &get_opts);
+ if (!ASSERT_GE(map_fd2, 0, "map_get"))
+ goto cleanup;
+
+ /* cleanup */
+ if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0)
+ close(get_opts.path_fd);
+ if (old_cwd[0])
+ ASSERT_OK(chdir(old_cwd), "restore_cwd");
+
+ /* update map through one FD */
+ err = bpf_map_update_elem(map_fd, &zero, &src_value, 0);
+ ASSERT_OK(err, "map_update");
+
+ /* check values written/read through different FDs do match */
+ dst_value = 0;
+ err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value);
+ ASSERT_OK(err, "map_lookup");
+ ASSERT_EQ(dst_value, src_value, "map_value_eq");
+cleanup:
+ if (map_fd2 >= 0)
+ ASSERT_OK(close(map_fd2), "close_map_fd2");
+ unlink(abs_path);
+}
+
+static void bpf_obj_pinning_mounted(enum path_kind path_kind)
+{
+ const char *map_name = "mounted_map";
+ int map_fd;
+
+ /* create BPF map to pin */
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ return;
+
+ validate_pin(map_fd, map_name, 100 + (int)path_kind, path_kind);
+ validate_get(map_fd, map_name, 200 + (int)path_kind, path_kind);
+ ASSERT_OK(close(map_fd), "close_map_fd");
+}
+
+void test_bpf_obj_pinning()
+{
+ if (test__start_subtest("detached"))
+ bpf_obj_pinning_detached();
+ if (test__start_subtest("mounted-str-abs"))
+ bpf_obj_pinning_mounted(PATH_STR_ABS);
+ if (test__start_subtest("mounted-str-rel"))
+ bpf_obj_pinning_mounted(PATH_STR_REL);
+ if (test__start_subtest("mounted-fd-rel"))
+ bpf_obj_pinning_mounted(PATH_FD_REL);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
new file mode 100644
index 000000000000..730357cd0c9a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_qdisc.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/pkt_sched.h>
+#include <linux/rtnetlink.h>
+#include <test_progs.h>
+
+#include "network_helpers.h"
+#include "bpf_qdisc_fifo.skel.h"
+#include "bpf_qdisc_fq.skel.h"
+#include "bpf_qdisc_fail__incompl_ops.skel.h"
+
+#define LO_IFINDEX 1
+
+static const unsigned int total_bytes = 10 * 1024 * 1024;
+
+static void do_test(char *qdisc)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
+ .attach_point = BPF_TC_QDISC,
+ .parent = TC_H_ROOT,
+ .handle = 0x8000000,
+ .qdisc = qdisc);
+ int srv_fd = -1, cli_fd = -1;
+ int err;
+
+ err = bpf_tc_hook_create(&hook);
+ if (!ASSERT_OK(err, "attach qdisc"))
+ return;
+
+ srv_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_OK_FD(srv_fd, "start server"))
+ goto done;
+
+ cli_fd = connect_to_fd(srv_fd, 0);
+ if (!ASSERT_OK_FD(cli_fd, "connect to client"))
+ goto done;
+
+ err = send_recv_data(srv_fd, cli_fd, total_bytes);
+ ASSERT_OK(err, "send_recv_data");
+
+done:
+ if (srv_fd != -1)
+ close(srv_fd);
+ if (cli_fd != -1)
+ close(cli_fd);
+
+ bpf_tc_hook_destroy(&hook);
+}
+
+static void test_fifo(void)
+{
+ struct bpf_qdisc_fifo *fifo_skel;
+
+ fifo_skel = bpf_qdisc_fifo__open_and_load();
+ if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach"))
+ goto out;
+
+ do_test("bpf_fifo");
+out:
+ bpf_qdisc_fifo__destroy(fifo_skel);
+}
+
+static void test_fq(void)
+{
+ struct bpf_qdisc_fq *fq_skel;
+
+ fq_skel = bpf_qdisc_fq__open_and_load();
+ if (!ASSERT_OK_PTR(fq_skel, "bpf_qdisc_fq__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(bpf_qdisc_fq__attach(fq_skel), "bpf_qdisc_fq__attach"))
+ goto out;
+
+ do_test("bpf_fq");
+out:
+ bpf_qdisc_fq__destroy(fq_skel);
+}
+
+static void test_qdisc_attach_to_mq(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook,
+ .attach_point = BPF_TC_QDISC,
+ .parent = TC_H_MAKE(1 << 16, 1),
+ .handle = 0x11 << 16,
+ .qdisc = "bpf_fifo");
+ struct bpf_qdisc_fifo *fifo_skel;
+ int err;
+
+ fifo_skel = bpf_qdisc_fifo__open_and_load();
+ if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach"))
+ goto out;
+
+ SYS(out, "ip link add veth0 type veth peer veth1");
+ hook.ifindex = if_nametoindex("veth0");
+ SYS(out, "tc qdisc add dev veth0 root handle 1: mq");
+
+ err = bpf_tc_hook_create(&hook);
+ ASSERT_OK(err, "attach qdisc");
+
+ bpf_tc_hook_destroy(&hook);
+
+ SYS(out, "tc qdisc delete dev veth0 root mq");
+out:
+ bpf_qdisc_fifo__destroy(fifo_skel);
+}
+
+static void test_qdisc_attach_to_non_root(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
+ .attach_point = BPF_TC_QDISC,
+ .parent = TC_H_MAKE(1 << 16, 1),
+ .handle = 0x11 << 16,
+ .qdisc = "bpf_fifo");
+ struct bpf_qdisc_fifo *fifo_skel;
+ int err;
+
+ fifo_skel = bpf_qdisc_fifo__open_and_load();
+ if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach"))
+ goto out;
+
+ SYS(out, "tc qdisc add dev lo root handle 1: htb");
+ SYS(out_del_htb, "tc class add dev lo parent 1: classid 1:1 htb rate 75Kbit");
+
+ err = bpf_tc_hook_create(&hook);
+ if (!ASSERT_ERR(err, "attach qdisc"))
+ bpf_tc_hook_destroy(&hook);
+
+out_del_htb:
+ SYS(out, "tc qdisc delete dev lo root htb");
+out:
+ bpf_qdisc_fifo__destroy(fifo_skel);
+}
+
+static void test_incompl_ops(void)
+{
+ struct bpf_qdisc_fail__incompl_ops *skel;
+ struct bpf_link *link;
+
+ skel = bpf_qdisc_fail__incompl_ops__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_qdisc_fifo__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.test);
+ if (!ASSERT_ERR_PTR(link, "bpf_map__attach_struct_ops"))
+ bpf_link__destroy(link);
+
+ bpf_qdisc_fail__incompl_ops__destroy(skel);
+}
+
+static int get_default_qdisc(char *qdisc_name)
+{
+ FILE *f;
+ int num;
+
+ f = fopen("/proc/sys/net/core/default_qdisc", "r");
+ if (!f)
+ return -errno;
+
+ num = fscanf(f, "%s", qdisc_name);
+ fclose(f);
+
+ return num == 1 ? 0 : -EFAULT;
+}
+
+static void test_default_qdisc_attach_to_mq(void)
+{
+ char default_qdisc[IFNAMSIZ] = {};
+ struct bpf_qdisc_fifo *fifo_skel;
+ struct netns_obj *netns = NULL;
+ int err;
+
+ fifo_skel = bpf_qdisc_fifo__open_and_load();
+ if (!ASSERT_OK_PTR(fifo_skel, "bpf_qdisc_fifo__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(bpf_qdisc_fifo__attach(fifo_skel), "bpf_qdisc_fifo__attach"))
+ goto out;
+
+ err = get_default_qdisc(default_qdisc);
+ if (!ASSERT_OK(err, "read sysctl net.core.default_qdisc"))
+ goto out;
+
+ err = write_sysctl("/proc/sys/net/core/default_qdisc", "bpf_fifo");
+ if (!ASSERT_OK(err, "write sysctl net.core.default_qdisc"))
+ goto out;
+
+ netns = netns_new("bpf_qdisc_ns", true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ goto out;
+
+ SYS(out, "ip link add veth0 type veth peer veth1");
+ SYS(out, "tc qdisc add dev veth0 root handle 1: mq");
+
+ ASSERT_EQ(fifo_skel->bss->init_called, true, "init_called");
+
+ SYS(out, "tc qdisc delete dev veth0 root mq");
+out:
+ netns_free(netns);
+ if (default_qdisc[0])
+ write_sysctl("/proc/sys/net/core/default_qdisc", default_qdisc);
+
+ bpf_qdisc_fifo__destroy(fifo_skel);
+}
+
+void test_ns_bpf_qdisc(void)
+{
+ if (test__start_subtest("fifo"))
+ test_fifo();
+ if (test__start_subtest("fq"))
+ test_fq();
+ if (test__start_subtest("attach to mq"))
+ test_qdisc_attach_to_mq();
+ if (test__start_subtest("attach to non root"))
+ test_qdisc_attach_to_non_root();
+ if (test__start_subtest("incompl_ops"))
+ test_incompl_ops();
+}
+
+void serial_test_bpf_qdisc_default(void)
+{
+ test_default_qdisc_attach_to_mq();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 9a8f47fc0b91..b7d1b52309d0 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -2,228 +2,605 @@
/* Copyright (c) 2019 Facebook */
#include <linux/err.h>
+#include <netinet/tcp.h>
#include <test_progs.h>
+#include "network_helpers.h"
#include "bpf_dctcp.skel.h"
#include "bpf_cubic.skel.h"
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
+#include "bpf_tcp_nogpl.skel.h"
+#include "tcp_ca_update.skel.h"
+#include "bpf_dctcp_release.skel.h"
+#include "tcp_ca_write_sk_pacing.skel.h"
+#include "tcp_ca_incompl_cong_ops.skel.h"
+#include "tcp_ca_unsupp_cong_op.skel.h"
+#include "tcp_ca_kfunc.skel.h"
+#include "bpf_cc_cubic.skel.h"
static const unsigned int total_bytes = 10 * 1024 * 1024;
-static const struct timeval timeo_sec = { .tv_sec = 10 };
-static const size_t timeo_optlen = sizeof(timeo_sec);
static int expected_stg = 0xeB9F;
-static int stop, duration;
-static int settimeo(int fd)
+struct cb_opts {
+ const char *cc;
+ int map_fd;
+};
+
+static int settcpca(int fd, const char *tcp_ca)
{
int err;
- err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
- timeo_optlen);
- if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n",
- errno))
- return -1;
-
- err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
- timeo_optlen);
- if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n",
- errno))
+ err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
+ if (!ASSERT_NEQ(err, -1, "setsockopt"))
return -1;
return 0;
}
-static int settcpca(int fd, const char *tcp_ca)
+static bool start_test(char *addr_str,
+ const struct network_helper_opts *srv_opts,
+ const struct network_helper_opts *cli_opts,
+ int *srv_fd, int *cli_fd)
+{
+ *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts);
+ if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str"))
+ goto err;
+
+ /* connect to server */
+ *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts);
+ if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
+ goto err;
+
+ return true;
+
+err:
+ if (*srv_fd != -1) {
+ close(*srv_fd);
+ *srv_fd = -1;
+ }
+ if (*cli_fd != -1) {
+ close(*cli_fd);
+ *cli_fd = -1;
+ }
+ return false;
+}
+
+static void do_test(const struct network_helper_opts *opts)
+{
+ int lfd = -1, fd = -1;
+
+ if (!start_test(NULL, opts, opts, &lfd, &fd))
+ goto done;
+
+ ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
+
+done:
+ if (lfd != -1)
+ close(lfd);
+ if (fd != -1)
+ close(fd);
+}
+
+static int cc_cb(int fd, void *opts)
+{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
+
+ return settcpca(fd, cb_opts->cc);
+}
+
+static void test_cubic(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct bpf_cubic *cubic_skel;
+ struct bpf_link *link;
+
+ cubic_skel = bpf_cubic__open_and_load();
+ if (!ASSERT_OK_PTR(cubic_skel, "bpf_cubic__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ bpf_cubic__destroy(cubic_skel);
+ return;
+ }
+
+ do_test(&opts);
+
+ ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
+
+ bpf_link__destroy(link);
+ bpf_cubic__destroy(cubic_skel);
+}
+
+static int stg_post_socket_cb(int fd, void *opts)
{
+ struct cb_opts *cb_opts = (struct cb_opts *)opts;
int err;
- err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
- if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
- errno))
- return -1;
+ err = settcpca(fd, cb_opts->cc);
+ if (err)
+ return err;
+
+ err = bpf_map_update_elem(cb_opts->map_fd, &fd,
+ &expected_stg, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
+ return err;
return 0;
}
-static void *server(void *arg)
+static void test_dctcp(void)
{
- int lfd = (int)(long)arg, err = 0, fd;
- ssize_t nr_sent = 0, bytes = 0;
- char batch[1500];
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = stg_post_socket_cb,
+ .cb_opts = &cb_opts,
+ };
+ int lfd = -1, fd = -1, tmp_stg, err;
+ struct bpf_dctcp *dctcp_skel;
+ struct bpf_link *link;
- fd = accept(lfd, NULL, NULL);
- while (fd == -1) {
- if (errno == EINTR)
- continue;
- err = -errno;
- goto done;
+ dctcp_skel = bpf_dctcp__open_and_load();
+ if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ bpf_dctcp__destroy(dctcp_skel);
+ return;
}
- if (settimeo(fd)) {
- err = -errno;
+ cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map);
+ if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd))
goto done;
- }
- while (bytes < total_bytes && !READ_ONCE(stop)) {
- nr_sent = send(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
- if (nr_sent == -1 && errno == EINTR)
- continue;
- if (nr_sent == -1) {
- err = -errno;
- break;
- }
- bytes += nr_sent;
- }
+ err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
+ !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
+ goto done;
- CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
- bytes, total_bytes, nr_sent, errno);
+ ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
+ ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
done:
+ bpf_link__destroy(link);
+ bpf_dctcp__destroy(dctcp_skel);
+ if (lfd != -1)
+ close(lfd);
if (fd != -1)
close(fd);
- if (err) {
- WRITE_ONCE(stop, 1);
- return ERR_PTR(err);
+}
+
+static void test_dctcp_autoattach_map(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct bpf_dctcp *dctcp_skel;
+ struct bpf_link *link;
+
+ dctcp_skel = bpf_dctcp__open_and_load();
+ if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
+ return;
+
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true);
+ bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false);
+
+ if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach"))
+ goto destroy;
+
+ /* struct_ops is auto-attached */
+ link = dctcp_skel->links.dctcp;
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto destroy;
+
+ do_test(&opts);
+
+destroy:
+ bpf_dctcp__destroy(dctcp_skel);
+}
+
+static char *err_str;
+static bool found;
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ const char *prog_name, *log_buf;
+
+ if (level != LIBBPF_WARN ||
+ !strstr(format, "-- BEGIN PROG LOAD LOG --")) {
+ vprintf(format, args);
+ return 0;
}
- return NULL;
+
+ prog_name = va_arg(args, char *);
+ log_buf = va_arg(args, char *);
+ if (!log_buf)
+ goto out;
+ if (err_str && strstr(log_buf, err_str) != NULL)
+ found = true;
+out:
+ printf(format, prog_name, log_buf);
+ return 0;
}
-static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
+static void test_invalid_license(void)
{
- struct sockaddr_in6 sa6 = {};
- ssize_t nr_recv = 0, bytes = 0;
- int lfd = -1, fd = -1;
- pthread_t srv_thread;
- socklen_t addrlen = sizeof(sa6);
- void *thread_ret;
- char batch[1500];
- int err;
+ libbpf_print_fn_t old_print_fn;
+ struct bpf_tcp_nogpl *skel;
- WRITE_ONCE(stop, 0);
+ err_str = "struct ops programs must have a GPL compatible license";
+ found = false;
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
- lfd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
- return;
- fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
- close(lfd);
+ skel = bpf_tcp_nogpl__open_and_load();
+ ASSERT_NULL(skel, "bpf_tcp_nogpl");
+ ASSERT_EQ(found, true, "expected_err_msg");
+
+ bpf_tcp_nogpl__destroy(skel);
+ libbpf_set_print(old_print_fn);
+}
+
+static void test_dctcp_fallback(void)
+{
+ int err, lfd = -1, cli_fd = -1, srv_fd = -1;
+ struct bpf_dctcp *dctcp_skel;
+ struct bpf_link *link = NULL;
+ struct cb_opts dctcp = {
+ .cc = "bpf_dctcp",
+ };
+ struct network_helper_opts srv_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &dctcp,
+ };
+ struct cb_opts cubic = {
+ .cc = "cubic",
+ };
+ struct network_helper_opts cli_opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cubic,
+ };
+ char srv_cc[16];
+ socklen_t cc_len = sizeof(srv_cc);
+
+ dctcp_skel = bpf_dctcp__open();
+ if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
return;
- }
+ strcpy(dctcp_skel->rodata->fallback_cc, "cubic");
+ if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
+ goto done;
- if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) ||
- settimeo(lfd) || settimeo(fd))
+ link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
+ if (!ASSERT_OK_PTR(link, "dctcp link"))
goto done;
- /* bind, listen and start server thread to accept */
- sa6.sin6_family = AF_INET6;
- sa6.sin6_addr = in6addr_loopback;
- err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "bind", "errno:%d\n", errno))
+ if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd))
goto done;
- err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
- if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
+
+ srv_fd = accept(lfd, NULL, 0);
+ if (!ASSERT_GE(srv_fd, 0, "srv_fd"))
goto done;
- err = listen(lfd, 1);
- if (CHECK(err == -1, "listen", "errno:%d\n", errno))
+ ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res");
+ ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res");
+ /* All setsockopt(TCP_CONGESTION) in the recurred
+ * bpf_dctcp->init() should fail with -EBUSY.
+ */
+ ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt");
+
+ err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len);
+ if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)"))
goto done;
+ ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc");
- if (sk_stg_map) {
- err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
- &expected_stg, BPF_NOEXIST);
- if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
- goto done;
- }
+done:
+ bpf_link__destroy(link);
+ bpf_dctcp__destroy(dctcp_skel);
+ if (lfd != -1)
+ close(lfd);
+ if (srv_fd != -1)
+ close(srv_fd);
+ if (cli_fd != -1)
+ close(cli_fd);
+}
- /* connect to server */
- err = connect(fd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "connect", "errno:%d\n", errno))
- goto done;
+static void test_rel_setsockopt(void)
+{
+ struct bpf_dctcp_release *rel_skel;
+ libbpf_print_fn_t old_print_fn;
- if (sk_stg_map) {
- int tmp_stg;
+ err_str = "program of this type cannot use helper bpf_setsockopt";
+ found = false;
- err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
- &tmp_stg);
- if (CHECK(!err || errno != ENOENT,
- "bpf_map_lookup_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
- goto done;
- }
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
+ rel_skel = bpf_dctcp_release__open_and_load();
+ libbpf_set_print(old_print_fn);
- err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
- if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
- goto done;
+ ASSERT_ERR_PTR(rel_skel, "rel_skel");
+ ASSERT_TRUE(found, "expected_err_msg");
- /* recv total_bytes */
- while (bytes < total_bytes && !READ_ONCE(stop)) {
- nr_recv = recv(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
- if (nr_recv == -1 && errno == EINTR)
- continue;
- if (nr_recv == -1)
- break;
- bytes += nr_recv;
- }
+ bpf_dctcp_release__destroy(rel_skel);
+}
- CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
- bytes, total_bytes, nr_recv, errno);
+static void test_write_sk_pacing(void)
+{
+ struct tcp_ca_write_sk_pacing *skel;
+ struct bpf_link *link;
- WRITE_ONCE(stop, 1);
- pthread_join(srv_thread, &thread_ret);
- CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
- PTR_ERR(thread_ret));
-done:
- close(lfd);
- close(fd);
+ skel = tcp_ca_write_sk_pacing__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
+ ASSERT_OK_PTR(link, "attach_struct_ops");
+
+ bpf_link__destroy(link);
+ tcp_ca_write_sk_pacing__destroy(skel);
}
-static void test_cubic(void)
+static void test_incompl_cong_ops(void)
{
- struct bpf_cubic *cubic_skel;
+ struct tcp_ca_incompl_cong_ops *skel;
struct bpf_link *link;
- cubic_skel = bpf_cubic__open_and_load();
- if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
+ skel = tcp_ca_incompl_cong_ops__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
return;
- link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
- bpf_cubic__destroy(cubic_skel);
+ /* That cong_avoid() and cong_control() are missing is only reported at
+ * this point:
+ */
+ link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
+ ASSERT_ERR_PTR(link, "attach_struct_ops");
+
+ bpf_link__destroy(link);
+ tcp_ca_incompl_cong_ops__destroy(skel);
+}
+
+static void test_unsupp_cong_op(void)
+{
+ libbpf_print_fn_t old_print_fn;
+ struct tcp_ca_unsupp_cong_op *skel;
+
+ err_str = "attach to unsupported member get_info";
+ found = false;
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+ skel = tcp_ca_unsupp_cong_op__open_and_load();
+ ASSERT_NULL(skel, "open_and_load");
+ ASSERT_EQ(found, true, "expected_err_msg");
+
+ tcp_ca_unsupp_cong_op__destroy(skel);
+ libbpf_set_print(old_print_fn);
+}
+
+static void test_update_ca(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct tcp_ca_update *skel;
+ struct bpf_link *link;
+ int saved_ca1_cnt;
+ int err;
+
+ skel = tcp_ca_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open"))
return;
- }
- do_test("bpf_cubic", NULL);
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
+
+ do_test(&opts);
+ saved_ca1_cnt = skel->bss->ca1_cnt;
+ ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
+
+ err = bpf_link__update_map(link, skel->maps.ca_update_2);
+ ASSERT_OK(err, "update_map");
+
+ do_test(&opts);
+ ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
+ ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
bpf_link__destroy(link);
- bpf_cubic__destroy(cubic_skel);
+out:
+ tcp_ca_update__destroy(skel);
}
-static void test_dctcp(void)
+static void test_update_wrong(void)
{
- struct bpf_dctcp *dctcp_skel;
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct tcp_ca_update *skel;
struct bpf_link *link;
+ int saved_ca1_cnt;
+ int err;
- dctcp_skel = bpf_dctcp__open_and_load();
- if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
+ skel = tcp_ca_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open"))
return;
- link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
- bpf_dctcp__destroy(dctcp_skel);
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto out;
+
+ do_test(&opts);
+ saved_ca1_cnt = skel->bss->ca1_cnt;
+ ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
+
+ err = bpf_link__update_map(link, skel->maps.ca_wrong);
+ ASSERT_ERR(err, "update_map");
+
+ do_test(&opts);
+ ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
+
+ bpf_link__destroy(link);
+out:
+ tcp_ca_update__destroy(skel);
+}
+
+static void test_mixed_links(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "tcp_ca_update",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct tcp_ca_update *skel;
+ struct bpf_link *link, *link_nl;
+ int err;
+
+ skel = tcp_ca_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
+ if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
+ goto out;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ ASSERT_OK_PTR(link, "attach_struct_ops");
+
+ do_test(&opts);
+ ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
+
+ err = bpf_link__update_map(link, skel->maps.ca_no_link);
+ ASSERT_ERR(err, "update_map");
+
+ bpf_link__destroy(link);
+ bpf_link__destroy(link_nl);
+out:
+ tcp_ca_update__destroy(skel);
+}
+
+static void test_multi_links(void)
+{
+ struct tcp_ca_update *skel;
+ struct bpf_link *link;
+
+ skel = tcp_ca_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ ASSERT_OK_PTR(link, "attach_struct_ops_1st");
+ bpf_link__destroy(link);
+
+ /* A map should be able to be used to create links multiple
+ * times.
+ */
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
+ bpf_link__destroy(link);
+
+ tcp_ca_update__destroy(skel);
+}
+
+static void test_link_replace(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
+ struct tcp_ca_update *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = tcp_ca_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ ASSERT_OK_PTR(link, "attach_struct_ops_1st");
+ bpf_link__destroy(link);
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
+ goto out;
+
+ /* BPF_F_REPLACE with a wrong old map Fd. It should fail!
+ *
+ * With BPF_F_REPLACE, the link should be updated only if the
+ * old map fd given here matches the map backing the link.
+ */
+ opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1);
+ opts.flags = BPF_F_REPLACE;
+ err = bpf_link_update(bpf_link__fd(link),
+ bpf_map__fd(skel->maps.ca_update_1),
+ &opts);
+ ASSERT_ERR(err, "bpf_link_update_fail");
+
+ /* BPF_F_REPLACE with a correct old map Fd. It should success! */
+ opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2);
+ err = bpf_link_update(bpf_link__fd(link),
+ bpf_map__fd(skel->maps.ca_update_1),
+ &opts);
+ ASSERT_OK(err, "bpf_link_update_success");
+
+ bpf_link__destroy(link);
+
+out:
+ tcp_ca_update__destroy(skel);
+}
+
+static void test_tcp_ca_kfunc(void)
+{
+ struct tcp_ca_kfunc *skel;
+
+ skel = tcp_ca_kfunc__open_and_load();
+ ASSERT_OK_PTR(skel, "tcp_ca_kfunc__open_and_load");
+ tcp_ca_kfunc__destroy(skel);
+}
+
+static void test_cc_cubic(void)
+{
+ struct cb_opts cb_opts = {
+ .cc = "bpf_cc_cubic",
+ };
+ struct network_helper_opts opts = {
+ .post_socket_cb = cc_cb,
+ .cb_opts = &cb_opts,
+ };
+ struct bpf_cc_cubic *cc_cubic_skel;
+ struct bpf_link *link;
+
+ cc_cubic_skel = bpf_cc_cubic__open_and_load();
+ if (!ASSERT_OK_PTR(cc_cubic_skel, "bpf_cc_cubic__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(cc_cubic_skel->maps.cc_cubic);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ bpf_cc_cubic__destroy(cc_cubic_skel);
return;
}
- do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
- CHECK(dctcp_skel->bss->stg_result != expected_stg,
- "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
- dctcp_skel->bss->stg_result, expected_stg);
+ do_test(&opts);
bpf_link__destroy(link);
- bpf_dctcp__destroy(dctcp_skel);
+ bpf_cc_cubic__destroy(cc_cubic_skel);
}
void test_bpf_tcp_ca(void)
@@ -232,4 +609,32 @@ void test_bpf_tcp_ca(void)
test_dctcp();
if (test__start_subtest("cubic"))
test_cubic();
+ if (test__start_subtest("invalid_license"))
+ test_invalid_license();
+ if (test__start_subtest("dctcp_fallback"))
+ test_dctcp_fallback();
+ if (test__start_subtest("rel_setsockopt"))
+ test_rel_setsockopt();
+ if (test__start_subtest("write_sk_pacing"))
+ test_write_sk_pacing();
+ if (test__start_subtest("incompl_cong_ops"))
+ test_incompl_cong_ops();
+ if (test__start_subtest("unsupp_cong_op"))
+ test_unsupp_cong_op();
+ if (test__start_subtest("update_ca"))
+ test_update_ca();
+ if (test__start_subtest("update_wrong"))
+ test_update_wrong();
+ if (test__start_subtest("mixed_links"))
+ test_mixed_links();
+ if (test__start_subtest("multi_links"))
+ test_multi_links();
+ if (test__start_subtest("link_replace"))
+ test_link_replace();
+ if (test__start_subtest("tcp_ca_kfunc"))
+ test_tcp_ca_kfunc();
+ if (test__start_subtest("cc_cubic"))
+ test_cc_cubic();
+ if (test__start_subtest("dctcp_autoattach_map"))
+ test_dctcp_autoattach_map();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e698ee6bb6c2..73f669014b69 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -19,101 +19,215 @@ extern int extra_prog_load_log_flags;
static int check_load(const char *file, enum bpf_prog_type type)
{
- struct bpf_prog_load_attr attr;
struct bpf_object *obj = NULL;
- int err, prog_fd;
-
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = type;
- attr.log_level = 4 | extra_prog_load_log_flags;
- attr.prog_flags = BPF_F_TEST_RND_HI32;
- err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ struct bpf_program *prog;
+ int err;
+
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (err)
+ return err;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (!prog) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ bpf_program__set_type(prog, type);
+ bpf_program__set_flags(prog, testing_prog_flags());
+ bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
+
+ err = bpf_object__load(obj);
+
+err_out:
bpf_object__close(obj);
return err;
}
-struct scale_test_def {
- const char *file;
- enum bpf_prog_type attach_type;
- bool fails;
-};
-
-void test_bpf_verif_scale(void)
-{
- struct scale_test_def tests[] = {
- { "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ },
-
- { "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS },
- { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS },
- { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
-
- { "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- /* full unroll by llvm */
- { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- /* partial unroll. llvm will unroll loop ~150 times.
- * C loop count -> 600.
- * Asm loop count -> 4.
- * 16k insns in loop body.
- * Total of 5 such loops. Total program size ~82k insns.
- */
- { "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- /* no unroll at all.
- * C loop count -> 600.
- * ASM loop count -> 600.
- * ~110 insns in loop body.
- * Total of 5 such loops. Total program size ~1500 insns.
- */
- { "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- { "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "loop4.o", BPF_PROG_TYPE_SCHED_CLS },
- { "loop5.o", BPF_PROG_TYPE_SCHED_CLS },
-
- /* partial unroll. 19k insn in a loop.
- * Total program size 20.8k insn.
- * ~350k processed_insns
- */
- { "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- /* no unroll, tiny loops */
- { "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
- { "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- /* non-inlined subprogs */
- { "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
- { "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
- { "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
-
- { "test_xdp_loop.o", BPF_PROG_TYPE_XDP },
- { "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL },
- };
+static void scale_test(const char *file,
+ enum bpf_prog_type attach_type,
+ bool should_fail)
+{
libbpf_print_fn_t old_print_fn = NULL;
- int err, i;
+ int err;
if (env.verifier_stats) {
test__force_log();
old_print_fn = libbpf_set_print(libbpf_debug_print);
}
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- const struct scale_test_def *test = &tests[i];
-
- if (!test__start_subtest(test->file))
- continue;
-
- err = check_load(test->file, test->attach_type);
- CHECK_FAIL(err && !test->fails);
- }
+ err = check_load(file, attach_type);
+ if (should_fail)
+ ASSERT_ERR(err, "expect_error");
+ else
+ ASSERT_OK(err, "expect_success");
if (env.verifier_stats)
libbpf_set_print(old_print_fn);
}
+
+void test_verif_scale1()
+{
+ scale_test("test_verif_scale1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale2()
+{
+ scale_test("test_verif_scale2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale3()
+{
+ scale_test("test_verif_scale3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_pyperf_global()
+{
+ scale_test("pyperf_global.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf_subprogs()
+{
+ scale_test("pyperf_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf50()
+{
+ /* full unroll by llvm */
+ scale_test("pyperf50.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf100()
+{
+ /* full unroll by llvm */
+ scale_test("pyperf100.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf180()
+{
+ /* full unroll by llvm */
+ scale_test("pyperf180.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600()
+{
+ /* partial unroll. llvm will unroll loop ~150 times.
+ * C loop count -> 600.
+ * Asm loop count -> 4.
+ * 16k insns in loop body.
+ * Total of 5 such loops. Total program size ~82k insns.
+ */
+ scale_test("pyperf600.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600_bpf_loop(void)
+{
+ /* use the bpf_loop helper*/
+ scale_test("pyperf600_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600_nounroll()
+{
+ /* no unroll at all.
+ * C loop count -> 600.
+ * ASM loop count -> 600.
+ * ~110 insns in loop body.
+ * Total of 5 such loops. Total program size ~1500 insns.
+ */
+ scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600_iter()
+{
+ /* open-coded BPF iterator version */
+ scale_test("pyperf600_iter.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop1()
+{
+ scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop2()
+{
+ scale_test("loop2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop3_fail()
+{
+ scale_test("loop3.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */);
+}
+
+void test_verif_scale_loop4()
+{
+ scale_test("loop4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_loop5()
+{
+ scale_test("loop5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_loop6()
+{
+ scale_test("loop6.bpf.o", BPF_PROG_TYPE_KPROBE, false);
+}
+
+void test_verif_scale_strobemeta()
+{
+ /* partial unroll. 19k insn in a loop.
+ * Total program size 20.8k insn.
+ * ~350k processed_insns
+ */
+ scale_test("strobemeta.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_bpf_loop(void)
+{
+ /* use the bpf_loop helper*/
+ scale_test("strobemeta_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_nounroll1()
+{
+ /* no unroll, tiny loops */
+ scale_test("strobemeta_nounroll1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_nounroll2()
+{
+ /* no unroll, tiny loops */
+ scale_test("strobemeta_nounroll2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_subprogs()
+{
+ /* non-inlined subprogs */
+ scale_test("strobemeta_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_sysctl_loop1()
+{
+ scale_test("test_sysctl_loop1.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
+}
+
+void test_verif_scale_sysctl_loop2()
+{
+ scale_test("test_sysctl_loop2.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
+}
+
+void test_verif_scale_xdp_loop()
+{
+ scale_test("test_xdp_loop.bpf.o", BPF_PROG_TYPE_XDP, false);
+}
+
+void test_verif_scale_seg6_loop()
+{
+ scale_test("test_seg6_loop.bpf.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false);
+}
+
+void test_verif_twfw()
+{
+ scale_test("twfw.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 8ae97e2a4b9d..054ecb6b1e9f 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -8,7 +8,6 @@
#include <linux/filter.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
-#include <sys/resource.h>
#include <libelf.h>
#include <gelf.h>
#include <string.h>
@@ -22,7 +21,6 @@
#include <bpf/libbpf.h>
#include <bpf/btf.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "../test_btf.h"
#include "test_progs.h"
@@ -36,11 +34,10 @@ static bool always_log;
#undef CHECK
#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
-#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
-#define NAME_NTH(N) (0xffff0000 | N)
-#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
+#define NAME_NTH(N) (0xfffe0000 | N)
+#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000)
#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
#define MAX_NR_RAW_U32 1024
@@ -882,6 +879,34 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid elem",
},
+{
+ .descr = "var after datasec, ptr followed by modifier",
+ .raw_types = {
+ /* .bss section */ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
+ sizeof(void*)+4),
+ BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
+ BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
+ /* int */ /* [2] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+ /* int* */ /* [3] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 3, 0), /* [4] */
+ /* const int */ /* [5] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0c\0",
+ .str_sec_size = sizeof("\0a\0b\0c\0"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = sizeof(void*)+4,
+ .key_type_id = 0,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
/* Test member exceeds the size of struct.
*
* struct A {
@@ -914,7 +939,7 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Member exceeds struct_size",
},
-/* Test member exeeds the size of struct
+/* Test member exceeds the size of struct
*
* struct A {
* int m;
@@ -948,7 +973,7 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Member exceeds struct_size",
},
-/* Test member exeeds the size of struct
+/* Test member exceeds the size of struct
*
* struct A {
* int m;
@@ -1903,7 +1928,7 @@ static struct btf_raw_test raw_tests[] = {
.raw_types = {
/* int */ /* [1] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
- BTF_TYPE_ENC(0, 0x10000000, 4),
+ BTF_TYPE_ENC(0, 0x20000000, 4),
BTF_END_RAW,
},
.str_sec = "",
@@ -2899,26 +2924,6 @@ static struct btf_raw_test raw_tests[] = {
},
{
- .descr = "invalid enum kind_flag",
- .raw_types = {
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 1, 1), 4), /* [2] */
- BTF_ENUM_ENC(NAME_TBD, 0),
- BTF_END_RAW,
- },
- BTF_STR_SEC("\0A"),
- .map_type = BPF_MAP_TYPE_ARRAY,
- .map_name = "enum_type_check_btf",
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .key_type_id = 1,
- .value_type_id = 1,
- .max_entries = 4,
- .btf_load_err = true,
- .err_str = "Invalid btf_info kind_flag",
-},
-
-{
.descr = "valid fwd kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -3509,6 +3514,749 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
+/*
+ * elf .rodata section size 4 and btf .rodata section vlen 0.
+ */
+{
+ .descr = "datasec: vlen == 0",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* .rodata section */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 0), 4),
+ /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0.rodata"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "datasec: name '?.foo bar:buz' is ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0?.foo bar:buz"),
+},
+{
+ .descr = "datasec: name with non-printable first char not is ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0\7foo"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
+{
+ .descr = "datasec: name '\\0' is not ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC \0 */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
+{
+ .descr = "type name '?foo' is not ok",
+ .raw_types = {
+ /* union ?foo; */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0?foo"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
+
+{
+ .descr = "float test #1, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [2] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [3] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 8), /* [4] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 12), /* [5] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 16), /* [6] */
+ BTF_STRUCT_ENC(NAME_TBD, 5, 48), /* [7] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 3, 32),
+ BTF_MEMBER_ENC(NAME_TBD, 4, 64),
+ BTF_MEMBER_ENC(NAME_TBD, 5, 128),
+ BTF_MEMBER_ENC(NAME_TBD, 6, 256),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0_Float16\0float\0double\0_Float80\0long_double"
+ "\0floats\0a\0b\0c\0d\0e"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 1,
+ .value_type_id = 7,
+ .max_entries = 1,
+},
+{
+ .descr = "float test #2, invalid vlen",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 1), 4),
+ /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "vlen != 0",
+},
+{
+ .descr = "float test #3, invalid kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 1, 0), 4),
+ /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+{
+ .descr = "float test #4, member does not fit",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 2), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float\0floats\0x"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Member exceeds struct_size",
+},
+{
+ .descr = "float test #5, member is not properly aligned",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float\0floats\0x"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Member is not properly aligned",
+},
+{
+ .descr = "float test #6, invalid size",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* [1] */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 6), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0float"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "float_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 6,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_size",
+},
+
+{
+ .descr = "decl_tag test #1, struct/member, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #2, union/member, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #3, variable, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, -1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0global\0tag1\0tag2"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #4, func/parameter, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, -1),
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, 0),
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #5, invalid value",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_DECL_TAG_ENC(0, 2, -1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid value",
+},
+{
+ .descr = "decl_tag test #6, invalid target type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 1, -1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type",
+},
+{
+ .descr = "decl_tag test #7, invalid vlen",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 1), 2), (0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "vlen != 0",
+},
+{
+ .descr = "decl_tag test #8, tag with kflag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_DECL_ATTR_ENC(NAME_TBD, 2, -1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #9, var, invalid component_idx",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid component_idx",
+},
+{
+ .descr = "decl_tag test #10, struct member, invalid component_idx",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid component_idx",
+},
+{
+ .descr = "decl_tag test #11, func parameter, invalid component_idx",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid component_idx",
+},
+{
+ .descr = "decl_tag test #12, < -1 component_idx",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, -2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid component_idx",
+},
+{
+ .descr = "decl_tag test #13, typedef, well-formed",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "decl_tag test #14, typedef, invalid component_idx",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid component_idx",
+},
+{
+ .descr = "decl_tag test #15, func, invalid func proto",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), /* [2] */
+ BTF_FUNC_ENC(NAME_TBD, 8), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0func"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "decl_tag test #16, func proto, return type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */
+ BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1"),
+ .btf_load_err = true,
+ .err_str = "Invalid return type",
+},
+{
+ .descr = "decl_tag test #17, func proto, argument",
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 4), (-1), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [2] */
+ BTF_FUNC_PROTO_ENC(0, 1), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_VAR_ENC(NAME_TBD, 2, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0local\0tag1\0var"),
+ .btf_load_err = true,
+ .err_str = "Invalid arg#1",
+},
+{
+ .descr = "decl_tag test #18, decl_tag as the map key type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag"),
+ .map_type = BPF_MAP_TYPE_HASH,
+ .map_name = "tag_type_check_btf",
+ .key_size = 8,
+ .value_size = 4,
+ .key_type_id = 3,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .map_create_err = true,
+},
+{
+ .descr = "decl_tag test #19, decl_tag as the map value type",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(0, 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 32),
+ BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0m1\0m2\0tag"),
+ .map_type = BPF_MAP_TYPE_HASH,
+ .map_name = "tag_type_check_btf",
+ .key_size = 4,
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .map_create_err = true,
+},
+{
+ .descr = "type_tag test #1",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "type_tag test #2, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_CONST_ENC(3), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #3, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #4, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #5, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(1), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "type_tag test #6, type tag order",
+ .raw_types = {
+ BTF_PTR_ENC(2), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
+ BTF_PTR_ENC(6), /* [5] */
+ BTF_CONST_ENC(2), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #7, tag with kflag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ATTR_ENC(NAME_TBD, 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "enum64 test #1, unsigned, size 8",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [2] */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
+{
+ .descr = "enum64 test #2, signed, size 4",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 1, 2), 4), /* [2] */
+ BTF_ENUM64_ENC(NAME_TBD, -1, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
}; /* struct btf_raw_test raw_tests[] */
@@ -3616,20 +4364,40 @@ static void *btf_raw_create(const struct btf_header *hdr,
next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
done:
+ free(strs_idx);
if (err) {
- if (raw_btf)
- free(raw_btf);
- if (strs_idx)
- free(strs_idx);
+ free(raw_btf);
return NULL;
}
return raw_btf;
}
+static int load_raw_btf(const void *raw_data, size_t raw_size)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ int btf_fd;
+
+ if (always_log) {
+ opts.log_buf = btf_log_buf,
+ opts.log_size = BTF_LOG_BUF_SIZE,
+ opts.log_level = 1;
+ }
+
+ btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
+ if (btf_fd < 0 && !always_log) {
+ opts.log_buf = btf_log_buf,
+ opts.log_size = BTF_LOG_BUF_SIZE,
+ opts.log_level = 1;
+ btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
+ }
+
+ return btf_fd;
+}
+
static void do_test_raw(unsigned int test_num)
{
struct btf_raw_test *test = &raw_tests[test_num - 1];
- struct bpf_create_map_attr create_attr = {};
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
struct btf_header *hdr;
@@ -3655,44 +4423,40 @@ static void do_test_raw(unsigned int test_num)
hdr->str_len = (int)hdr->str_len + test->str_len_delta;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
- err = ((btf_fd == -1) != test->btf_load_err);
+ err = ((btf_fd < 0) != test->btf_load_err);
if (CHECK(err, "btf_fd:%d test->btf_load_err:%u",
btf_fd, test->btf_load_err) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
- "expected err_str:%s", test->err_str)) {
+ "expected err_str:%s\n", test->err_str)) {
err = -1;
goto done;
}
- if (err || btf_fd == -1)
+ if (err || btf_fd < 0)
goto done;
- create_attr.name = test->map_name;
- create_attr.map_type = test->map_type;
- create_attr.key_size = test->key_size;
- create_attr.value_size = test->value_size;
- create_attr.max_entries = test->max_entries;
- create_attr.btf_fd = btf_fd;
- create_attr.btf_key_type_id = test->key_type_id;
- create_attr.btf_value_type_id = test->value_type_id;
+ if (!test->map_type)
+ goto done;
- map_fd = bpf_create_map_xattr(&create_attr);
+ opts.btf_fd = btf_fd;
+ opts.btf_key_type_id = test->key_type_id;
+ opts.btf_value_type_id = test->value_type_id;
+ map_fd = bpf_map_create(test->map_type, test->map_name,
+ test->key_size, test->value_size, test->max_entries, &opts);
- err = ((map_fd == -1) != test->map_create_err);
+ err = ((map_fd < 0) != test->map_create_err);
CHECK(err, "map_fd:%d test->map_create_err:%u",
map_fd, test->map_create_err);
done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
}
@@ -3787,10 +4551,8 @@ static int test_big_btf_info(unsigned int test_num)
goto done;
}
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
+ if (CHECK(btf_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -3806,7 +4568,7 @@ static int test_big_btf_info(unsigned int test_num)
info->btf = ptr_to_u64(user_btf);
info->btf_size = raw_btf_size;
- err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(!err, "!err")) {
err = -1;
goto done;
@@ -3819,7 +4581,7 @@ static int test_big_btf_info(unsigned int test_num)
* to userspace.
*/
info_garbage.garbage = 0;
- err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
@@ -3836,7 +4598,7 @@ done:
free(raw_btf);
free(user_btf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
return err;
@@ -3845,7 +4607,7 @@ done:
static int test_btf_id(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
- struct bpf_create_map_attr create_attr = {};
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
uint8_t *raw_btf = NULL, *user_btf[2] = {};
int btf_fd[2] = {-1, -1}, map_fd = -1;
struct bpf_map_info map_info = {};
@@ -3875,30 +4637,28 @@ static int test_btf_id(unsigned int test_num)
info[i].btf_size = raw_btf_size;
}
- btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
- if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
+ btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size);
+ if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
/* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */
info_len = sizeof(info[0]);
- err = bpf_obj_get_info_by_fd(btf_fd[0], &info[0], &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd[0], &info[0], &info_len);
if (CHECK(err, "errno:%d", errno)) {
err = -1;
goto done;
}
btf_fd[1] = bpf_btf_get_fd_by_id(info[0].id);
- if (CHECK(btf_fd[1] == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd[1] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
ret = 0;
- err = bpf_obj_get_info_by_fd(btf_fd[1], &info[1], &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd[1], &info[1], &info_len);
if (CHECK(err || info[0].id != info[1].id ||
info[0].btf_size != info[1].btf_size ||
(ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)),
@@ -3910,23 +4670,18 @@ static int test_btf_id(unsigned int test_num)
}
/* Test btf members in struct bpf_map_info */
- create_attr.name = "test_btf_id";
- create_attr.map_type = BPF_MAP_TYPE_ARRAY;
- create_attr.key_size = sizeof(int);
- create_attr.value_size = sizeof(unsigned int);
- create_attr.max_entries = 4;
- create_attr.btf_fd = btf_fd[0];
- create_attr.btf_key_type_id = 1;
- create_attr.btf_value_type_id = 2;
-
- map_fd = bpf_create_map_xattr(&create_attr);
- if (CHECK(map_fd == -1, "errno:%d", errno)) {
+ opts.btf_fd = btf_fd[0];
+ opts.btf_key_type_id = 1;
+ opts.btf_value_type_id = 2;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id",
+ sizeof(int), sizeof(int), 4, &opts);
+ if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
info_len = sizeof(map_info);
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
if (CHECK(err || map_info.btf_id != info[0].id ||
map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2,
"err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u",
@@ -3943,7 +4698,7 @@ static int test_btf_id(unsigned int test_num)
/* Test BTF ID is removed from the kernel */
btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
- if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -3953,11 +4708,6 @@ static int test_btf_id(unsigned int test_num)
/* The map holds the last ref to BTF and its btf_id */
close(map_fd);
map_fd = -1;
- btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
- if (CHECK(btf_fd[0] != -1, "BTF lingers")) {
- err = -1;
- goto done;
- }
fprintf(stderr, "OK");
@@ -3966,11 +4716,11 @@ done:
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
for (i = 0; i < 2; i++) {
free(user_btf[i]);
- if (btf_fd[i] != -1)
+ if (btf_fd[i] >= 0)
close(btf_fd[i]);
}
@@ -4012,10 +4762,8 @@ static void do_test_get_info(unsigned int test_num)
goto done;
}
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
+ if (CHECK(btf_fd <= 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4031,7 +4779,7 @@ static void do_test_get_info(unsigned int test_num)
info.btf_size = user_btf_size;
ret = 0;
- err = bpf_obj_get_info_by_fd(btf_fd, &info, &info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
@@ -4061,7 +4809,7 @@ done:
free(raw_btf);
free(user_btf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
}
@@ -4071,9 +4819,8 @@ struct btf_file_test {
};
static struct btf_file_test file_tests[] = {
- { .file = "test_btf_haskv.o", },
- { .file = "test_btf_newkv.o", },
- { .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
+ { .file = "test_btf_newkv.bpf.o", },
+ { .file = "test_btf_nokv.bpf.o", .btf_kv_notfound = true, },
};
static void do_test_file(unsigned int test_num)
@@ -4098,8 +4845,9 @@ static void do_test_file(unsigned int test_num)
return;
btf = btf__parse_elf(test->file, &btf_ext);
- if (IS_ERR(btf)) {
- if (PTR_ERR(btf) == -ENOENT) {
+ err = libbpf_get_error(btf);
+ if (err) {
+ if (err == -ENOENT) {
printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
test__skip();
return;
@@ -4111,11 +4859,14 @@ static void do_test_file(unsigned int test_num)
has_btf_ext = btf_ext != NULL;
btf_ext__free(btf_ext);
+ /* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
obj = bpf_object__open(test->file);
- if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "obj: %d", err))
return;
- prog = bpf_program__next(NULL, obj);
+ prog = bpf_object__next_program(obj, NULL);
if (CHECK(!prog, "Cannot find bpf_prog")) {
err = -1;
goto done;
@@ -4145,9 +4896,9 @@ static void do_test_file(unsigned int test_num)
/* get necessary program info */
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -4177,9 +4928,9 @@ static void do_test_file(unsigned int test_num)
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -4197,7 +4948,8 @@ static void do_test_file(unsigned int test_num)
goto done;
}
- err = btf__get_from_id(info.btf_id, &btf);
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ err = libbpf_get_error(btf);
if (CHECK(err, "cannot get btf from kernel, err: %d", err))
goto done;
@@ -4233,6 +4985,9 @@ skip:
fprintf(stderr, "OK");
done:
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+ btf__free(btf);
free(func_info);
bpf_object__close(obj);
}
@@ -4280,7 +5035,7 @@ struct pprint_mapv_int128 {
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
@@ -4347,7 +5102,7 @@ static struct btf_raw_test pprint_test_template[] = {
* be encoded with kind_flag set.
*/
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
@@ -4414,7 +5169,7 @@ static struct btf_raw_test pprint_test_template[] = {
* will have both int and enum types.
*/
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
@@ -4583,6 +5338,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
#endif
assert(0);
+ return 0;
}
static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
@@ -4704,7 +5460,7 @@ static void do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
- struct bpf_create_map_attr create_attr = {};
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
unsigned int key, nr_read_elems;
@@ -4730,27 +5486,20 @@ static void do_test_pprint(int test_num)
return;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
- if (CHECK(btf_fd == -1, "errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "errno:%d\n", errno)) {
err = -1;
goto done;
}
- create_attr.name = test->map_name;
- create_attr.map_type = test->map_type;
- create_attr.key_size = test->key_size;
- create_attr.value_size = test->value_size;
- create_attr.max_entries = test->max_entries;
- create_attr.btf_fd = btf_fd;
- create_attr.btf_key_type_id = test->key_type_id;
- create_attr.btf_value_type_id = test->value_type_id;
-
- map_fd = bpf_create_map_xattr(&create_attr);
- if (CHECK(map_fd == -1, "errno:%d", errno)) {
+ opts.btf_fd = btf_fd;
+ opts.btf_key_type_id = test->key_type_id;
+ opts.btf_value_type_id = test->value_type_id;
+ map_fd = bpf_map_create(test->map_type, test->map_name,
+ test->key_size, test->value_size, test->max_entries, &opts);
+ if (CHECK(map_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
}
@@ -4758,7 +5507,7 @@ static void do_test_pprint(int test_num)
ret = snprintf(pin_path, sizeof(pin_path), "%s/%s",
"/sys/fs/bpf", test->map_name);
- if (CHECK(ret == sizeof(pin_path), "pin_path %s/%s is too long",
+ if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long",
"/sys/fs/bpf", test->map_name)) {
err = -1;
goto done;
@@ -4831,7 +5580,7 @@ static void do_test_pprint(int test_num)
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
- if (err == -1)
+ if (err < 0)
goto done;
}
@@ -4847,7 +5596,7 @@ static void do_test_pprint(int test_num)
cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
- if (err == -1)
+ if (err < 0)
goto done;
cmapv = cmapv + rounded_value_size;
@@ -4885,9 +5634,9 @@ done:
fprintf(stderr, "OK");
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (map_fd != -1)
+ if (map_fd >= 0)
close(map_fd);
if (pin_file)
fclose(pin_file);
@@ -5798,8 +6547,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* get necessary lens */
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
@@ -5828,8 +6577,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
fprintf(stderr, "%s\n", btf_log_buf);
err = -1;
goto done;
@@ -5892,8 +6641,8 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
- if (CHECK(err == -1, "err:%d errno:%d", err, errno)) {
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err < 0, "err:%d errno:%d", err, errno)) {
err = -1;
goto done;
}
@@ -5966,13 +6715,13 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
info.jited_func_lens = ptr_to_u64(jited_func_lens);
}
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
/*
* Only recheck the info.*line_info* fields.
* Other fields are not the concern of this test.
*/
- if (CHECK(err == -1 ||
+ if (CHECK(err < 0 ||
info.nr_line_info != cnt ||
(jited_cnt && !info.jited_line_info) ||
info.nr_jited_line_info != jited_cnt ||
@@ -6088,7 +6837,7 @@ done:
static void do_test_info_raw(unsigned int test_num)
{
const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
- unsigned int raw_btf_size, linfo_str_off, linfo_size;
+ unsigned int raw_btf_size, linfo_str_off, linfo_size = 0;
int btf_fd = -1, prog_fd = -1, err = 0;
void *raw_btf, *patched_linfo = NULL;
const char *ret_next_str;
@@ -6104,12 +6853,10 @@ static void do_test_info_raw(unsigned int test_num)
return;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
- if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) {
err = -1;
goto done;
}
@@ -6122,7 +6869,8 @@ static void do_test_info_raw(unsigned int test_num)
patched_linfo = patch_name_tbd(test->line_info,
test->str_sec, linfo_str_off,
test->str_sec_size, &linfo_size);
- if (IS_ERR(patched_linfo)) {
+ err = libbpf_get_error(patched_linfo);
+ if (err) {
fprintf(stderr, "error in creating raw bpf_line_info");
err = -1;
goto done;
@@ -6146,7 +6894,7 @@ static void do_test_info_raw(unsigned int test_num)
}
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
- err = ((prog_fd == -1) != test->expected_prog_load_failure);
+ err = ((prog_fd < 0) != test->expected_prog_load_failure);
if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d",
prog_fd, test->expected_prog_load_failure, errno) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
@@ -6155,7 +6903,7 @@ static void do_test_info_raw(unsigned int test_num)
goto done;
}
- if (prog_fd == -1)
+ if (prog_fd < 0)
goto done;
err = test_get_finfo(test, prog_fd);
@@ -6172,12 +6920,12 @@ done:
if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
- if (btf_fd != -1)
+ if (btf_fd >= 0)
close(btf_fd);
- if (prog_fd != -1)
+ if (prog_fd >= 0)
close(prog_fd);
- if (!IS_ERR(patched_linfo))
+ if (!libbpf_get_error(patched_linfo))
free(patched_linfo);
}
@@ -6194,7 +6942,7 @@ struct btf_dedup_test {
struct btf_dedup_opts opts;
};
-const struct btf_dedup_test dedup_tests[] = {
+static struct btf_dedup_test dedup_tests[] = {
{
.descr = "dedup: unused strings filtering",
@@ -6214,9 +6962,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: strings deduplication",
@@ -6239,9 +6984,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long int"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: struct example #1",
@@ -6260,57 +7002,67 @@ const struct btf_dedup_test dedup_tests[] = {
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
- BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [3] */
BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
+ BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
BTF_PTR_ENC(6), /* [5] */
/* const -> [1] int */
BTF_CONST_ENC(1), /* [6] */
+ /* tag -> [3] struct s */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
+ /* tag -> [3] struct s, member 1 */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
/* full copy of the above */
- BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
- BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
- BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
- BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
- BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
- BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
- BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
- BTF_PTR_ENC(9), /* [10] */
- BTF_PTR_ENC(12), /* [11] */
- BTF_CONST_ENC(7), /* [12] */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */
+ BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 12, 0),
+ BTF_MEMBER_ENC(NAME_NTH(4), 13, 64),
+ BTF_MEMBER_ENC(NAME_NTH(5), 10, 128),
+ BTF_MEMBER_ENC(NAME_NTH(6), 9, 640),
+ BTF_MEMBER_ENC(NAME_NTH(8), 15, 672),
+ BTF_PTR_ENC(11), /* [12] */
+ BTF_PTR_ENC(14), /* [13] */
+ BTF_CONST_ENC(9), /* [14] */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
+ BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"),
},
.expect = {
.raw_types = {
/* int */
- BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* int[16] */
BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
/* struct s { */
- BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
- BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
+ BTF_STRUCT_ENC(NAME_NTH(8), 5, 88), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(7), 4, 0), /* struct s *next; */
BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
+ BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */
/* ptr -> [3] struct s */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> [6] const int */
BTF_PTR_ENC(6), /* [5] */
/* const -> [1] int */
BTF_CONST_ENC(1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
- },
- .opts = {
- .dont_resolve_fwds = false,
+ BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
},
},
{
@@ -6354,8 +7106,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1, /* force hash collisions */
+ .force_collisions = true, /* force hash collisions */
},
},
{
@@ -6401,8 +7152,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1, /* force hash collisions */
+ .force_collisions = true, /* force hash collisions */
},
},
{
@@ -6426,11 +7176,19 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
+ BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
+ BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
+ BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
.expect = {
.raw_types = {
@@ -6451,18 +7209,23 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
+ BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
+ BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
+ BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
- },
- .opts = {
- .dont_resolve_fwds = false,
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
},
{
- .descr = "dedup: no int duplicates",
+ .descr = "dedup: no int/float duplicates",
.input = {
.raw_types = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
@@ -6477,9 +7240,15 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ /* all allowed sizes */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0some other int"),
+ BTF_STR_SEC("\0int\0some other int\0float"),
},
.expect = {
.raw_types = {
@@ -6495,12 +7264,15 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
/* different byte size */
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ /* all allowed sizes */
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12),
+ BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16),
BTF_END_RAW,
},
- BTF_STR_SEC("\0int\0some other int"),
- },
- .opts = {
- .dont_resolve_fwds = false,
+ BTF_STR_SEC("\0int\0some other int\0float"),
},
},
{
@@ -6517,7 +7289,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_ENUM_ENC(NAME_NTH(4), 456),
/* [4] fwd enum 'e2' after full enum */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
- /* [5] incompatible fwd enum with different size */
+ /* [5] fwd enum with different size, size does not matter for fwd */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
/* [6] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
@@ -6534,18 +7306,13 @@ const struct btf_dedup_test dedup_tests[] = {
/* [2] full enum 'e2' */
BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(4), 456),
- /* [3] incompatible fwd enum with different size */
- BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
- /* [4] incompatible full enum with different value */
+ /* [3] incompatible full enum with different value */
BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
BTF_ENUM_ENC(NAME_NTH(2), 321),
BTF_END_RAW,
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: datasec and vars pass-through",
@@ -6588,11 +7355,738 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0.bss\0t"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1
+ .force_collisions = true
+ },
+},
+{
+ .descr = "dedup: func/func_arg/var tags",
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* static int t */
+ BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
+ /* void f(int a1, int a2) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
+ /* tag -> t */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
+ /* tag -> func */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */
+ /* tag -> func arg a1 */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
+ BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
+ },
+},
+{
+ .descr = "dedup: func/func_param tags",
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* void f(int a1, int a2) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
+ BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
+ /* void f(int a1, int a2) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
+ BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */
+ /* tag -> f: tag1, tag2 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */
+ /* tag -> f/a2: tag1, tag2 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */
+ /* tag -> f: tag1, tag3 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */
+ /* tag -> f/a2: tag1, tag3 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
+ BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
+ },
+},
+{
+ .descr = "dedup: struct/struct_member tags",
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
+ /* tag -> t: tag1, tag2 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
+ /* tag -> t/m2: tag1, tag2 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
+ /* tag -> t: tag1, tag3 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */
+ /* tag -> t/m2: tag1, tag3 */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
+ BTF_DECL_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
+ },
+},
+{
+ .descr = "dedup: recursive typedef",
+ /*
+ * This test simulates a recursive typedef, which in GO is defined as such:
+ *
+ * type Foo func() Foo
+ *
+ * In BTF terms, this is represented as a TYPEDEF referencing
+ * a FUNC_PROTO that returns the same TYPEDEF.
+ */
+ .input = {
+ .raw_types = {
+ /*
+ * [1] typedef Foo -> func() Foo
+ * [2] func_proto() -> Foo
+ * [3] typedef Foo -> func() Foo
+ * [4] func_proto() -> Foo
+ */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 2), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 0), /* [2] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 4), /* [3] */
+ BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0Foo"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 2), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 0), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0Foo"),
+ },
+},
+{
+ .descr = "dedup: typedef",
+ /*
+ * // CU 1:
+ * typedef int foo;
+ *
+ * // CU 2:
+ * typedef int foo;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ /* CU 2 */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo"),
+ },
+},
+{
+ .descr = "dedup: typedef tags",
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [3] */
+ /* tag -> t: tag1, tag2 */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [5] */
+ /* tag -> t: tag1, tag3 */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [6] */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [3] */
+ BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [4] */
+ BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #1",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */
+ BTF_PTR_ENC(8), /* [9] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> int */
+ BTF_PTR_ENC(2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #2",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #3",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #4",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ /* ptr -> tag1 -> long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ /* ptr -> tag1 -> long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #5, struct",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [4] */
+ BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [5] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 4, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0t\0m"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0t\0m"),
+ },
+},
+{
+ .descr = "dedup: enum64, standalone",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
+{
+ .descr = "dedup: enum64, fwd resolution",
+ .input = {
+ .raw_types = {
+ /* [1] fwd enum64 'e1' before full enum */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [2] full enum64 'e1' after fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ /* [3] full enum64 'e2' before fwd */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
+ /* [4] fwd enum64 'e2' after full enum */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [5] incompatible full enum64 with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] full enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ /* [2] full enum64 'e2' */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
+ /* [3] incompatible full enum64 with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+},
+{
+ .descr = "dedup: enum and enum64, no dedup",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
+{
+ .descr = "dedup: enum of different size: no dedup",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
+{
+ .descr = "dedup: enum fwd to enum64",
+ .input = {
+ .raw_types = {
+ /* [1] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ /* [2] enum 'e1' fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [3] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ /* [2] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+},
+{
+ .descr = "dedup: enum64 fwd to enum",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [3] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] typedef enum 'e1' td */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0td"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration struct",
+ /*
+ * Verify that CU1:foo and CU2:foo would be unified and that
+ * typedef/ptr would be updated to point to CU1:foo.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * struct foo;
+ * typedef struct foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_PTR_ENC(1), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration union",
+ /*
+ * Verify that CU1:foo and CU2:foo would be unified and that
+ * typedef/ptr would be updated to point to CU1:foo.
+ * Same as "dedup: standalone fwd declaration struct" but for unions.
+ *
+ * // CU 1:
+ * union foo { int x; };
+ *
+ * // CU 2:
+ * union foo;
+ * typedef union foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_TBD, 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_PTR_ENC(1), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration wrong kind",
+ /*
+ * Negative test for btf_dedup_resolve_fwds:
+ * - CU1:foo is a struct, C2:foo is a union, thus CU2:foo is not deduped;
+ * - typedef/ptr should remain unchanged as well.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * union foo;
+ * typedef union foo *foo_ptr;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0foo_ptr"),
+ },
+},
+{
+ .descr = "dedup: standalone fwd declaration name conflict",
+ /*
+ * Negative test for btf_dedup_resolve_fwds:
+ * - two candidates for CU2:foo dedup, thus it is unchanged;
+ * - typedef/ptr should remain unchanged as well.
+ *
+ * // CU 1:
+ * struct foo { int x; };
+ *
+ * // CU 2:
+ * struct foo;
+ * typedef struct foo *foo_ptr;
+ *
+ * // CU 3:
+ * struct foo { int x; int y; };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
+ /* CU 3 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ /* CU 2 */
+ BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */
+ /* CU 3 */
+ BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ BTF_MEMBER_ENC(NAME_NTH(3), 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"),
},
},
-
};
static int btf_type_size(const struct btf_type *t)
@@ -6609,11 +8103,15 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
case BTF_KIND_ENUM:
return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ENUM64:
+ return base_size + vlen * sizeof(struct btf_enum64);
case BTF_KIND_ARRAY:
return base_size + sizeof(struct btf_array);
case BTF_KIND_STRUCT:
@@ -6625,6 +8123,8 @@ static int btf_type_size(const struct btf_type *t)
return base_size + sizeof(struct btf_var);
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
+ case BTF_KIND_DECL_TAG:
+ return base_size + sizeof(struct btf_decl_tag);
default:
fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
return -EINVAL;
@@ -6645,7 +8145,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
static void do_test_dedup(unsigned int test_num)
{
- const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
+ struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
const struct btf_header *test_hdr, *expect_hdr;
struct btf *test_btf = NULL, *expect_btf = NULL;
@@ -6668,9 +8168,9 @@ static void do_test_dedup(unsigned int test_num)
return;
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ err = libbpf_get_error(test_btf);
free(raw_btf);
- if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
- PTR_ERR(test_btf))) {
+ if (CHECK(err, "invalid test_btf errno:%d", err)) {
err = -1;
goto done;
}
@@ -6682,21 +8182,22 @@ static void do_test_dedup(unsigned int test_num)
if (!raw_btf)
return;
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ err = libbpf_get_error(expect_btf);
free(raw_btf);
- if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
- PTR_ERR(expect_btf))) {
+ if (CHECK(err, "invalid expect_btf errno:%d", err)) {
err = -1;
goto done;
}
- err = btf__dedup(test_btf, NULL, &test->opts);
+ test->opts.sz = sizeof(test->opts);
+ err = btf__dedup(test_btf, &test->opts);
if (CHECK(err, "btf_dedup failed errno:%d", err)) {
err = -1;
goto done;
}
- test_btf_data = btf__get_raw_data(test_btf, &test_btf_size);
- expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size);
+ test_btf_data = btf__raw_data(test_btf, &test_btf_size);
+ expect_btf_data = btf__raw_data(expect_btf, &expect_btf_size);
if (CHECK(test_btf_size != expect_btf_size,
"test_btf_size:%u != expect_btf_size:%u",
test_btf_size, expect_btf_size)) {
@@ -6750,8 +8251,8 @@ static void do_test_dedup(unsigned int test_num)
expect_str_cur += expect_len + 1;
}
- test_nr_types = btf__get_nr_types(test_btf);
- expect_nr_types = btf__get_nr_types(expect_btf);
+ test_nr_types = btf__type_cnt(test_btf);
+ expect_nr_types = btf__type_cnt(expect_btf);
if (CHECK(test_nr_types != expect_nr_types,
"test_nr_types:%u != expect_nr_types:%u",
test_nr_types, expect_nr_types)) {
@@ -6759,7 +8260,7 @@ static void do_test_dedup(unsigned int test_num)
goto done;
}
- for (i = 1; i <= test_nr_types; i++) {
+ for (i = 1; i < test_nr_types; i++) {
const struct btf_type *test_type, *expect_type;
int test_size, expect_size;
@@ -6795,10 +8296,8 @@ static void do_test_dedup(unsigned int test_num)
}
done:
- if (!IS_ERR(test_btf))
- btf__free(test_btf);
- if (!IS_ERR(expect_btf))
- btf__free(expect_btf);
+ btf__free(test_btf);
+ btf__free(expect_btf);
}
void test_btf(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
index 64554fd33547..5bc15bb6b7ce 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
@@ -92,7 +92,7 @@ struct s2 {\n\
int *f3;\n\
};\n\n", "c_dump");
- err = btf__dedup(btf2, NULL, NULL);
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@@ -143,6 +143,10 @@ static void test_split_fwd_resolve() {
btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */
btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
/* } */
+ /* keep this not a part of type the graph to test btf_dedup_resolve_fwds */
+ btf__add_struct(btf1, "s3", 4); /* [6] struct s3 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
VALIDATE_RAW_BTF(
btf1,
@@ -153,20 +157,24 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[6] STRUCT 's3' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0");
btf2 = btf__new_empty_split(btf1);
if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
goto cleanup;
- btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */
- btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */
- btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */
- btf__add_ptr(btf2, 8); /* [9] ptr to fwd struct s2 */
- btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */
- btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */
- btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */
+ btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [7] int */
+ btf__add_ptr(btf2, 11); /* [8] ptr to struct s1 */
+ btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [9] fwd for struct s2 */
+ btf__add_ptr(btf2, 9); /* [10] ptr to fwd struct s2 */
+ btf__add_struct(btf2, "s1", 16); /* [11] struct s1 { */
+ btf__add_field(btf2, "f1", 8, 0, 0); /* struct s1 *f1; */
+ btf__add_field(btf2, "f2", 10, 64, 0); /* struct s2 *f2; */
/* } */
+ btf__add_fwd(btf2, "s3", BTF_FWD_STRUCT); /* [12] fwd for struct s3 */
+ btf__add_ptr(btf2, 12); /* [13] ptr to struct s1 */
VALIDATE_RAW_BTF(
btf2,
@@ -178,15 +186,19 @@ static void test_split_fwd_resolve() {
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0",
- "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
- "[7] PTR '(anon)' type_id=10",
- "[8] FWD 's2' fwd_kind=struct",
- "[9] PTR '(anon)' type_id=8",
- "[10] STRUCT 's1' size=16 vlen=2\n"
- "\t'f1' type_id=7 bits_offset=0\n"
- "\t'f2' type_id=9 bits_offset=64");
-
- err = btf__dedup(btf2, NULL, NULL);
+ "[6] STRUCT 's3' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[7] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[8] PTR '(anon)' type_id=11",
+ "[9] FWD 's2' fwd_kind=struct",
+ "[10] PTR '(anon)' type_id=9",
+ "[11] STRUCT 's1' size=16 vlen=2\n"
+ "\t'f1' type_id=8 bits_offset=0\n"
+ "\t'f2' type_id=10 bits_offset=64",
+ "[12] FWD 's3' fwd_kind=struct",
+ "[13] PTR '(anon)' type_id=12");
+
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@@ -199,7 +211,10 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=2 bits_offset=0\n"
"\t'f2' type_id=3 bits_offset=64",
"[5] STRUCT 's2' size=4 vlen=1\n"
- "\t'f1' type_id=1 bits_offset=0");
+ "\t'f1' type_id=1 bits_offset=0",
+ "[6] STRUCT 's3' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[7] PTR '(anon)' type_id=6");
cleanup:
btf__free(btf2);
@@ -283,7 +298,7 @@ static void test_split_struct_duped() {
"[13] STRUCT 's3' size=8 vlen=1\n"
"\t'f1' type_id=12 bits_offset=0");
- err = btf__dedup(btf2, NULL, NULL);
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@@ -314,6 +329,216 @@ cleanup:
btf__free(btf1);
}
+static void btf_add_dup_struct_in_cu(struct btf *btf, int start_id)
+{
+#define ID(n) (start_id + n)
+ btf__set_pointer_size(btf, 8); /* enforce 64-bit arch */
+
+ btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
+
+ btf__add_struct(btf, "s", 8); /* [2] struct s { */
+ btf__add_field(btf, "a", ID(3), 0, 0); /* struct anon a; */
+ btf__add_field(btf, "b", ID(4), 0, 0); /* struct anon b; */
+ /* } */
+
+ btf__add_struct(btf, "(anon)", 8); /* [3] struct anon { */
+ btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */
+ btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */
+ /* } */
+
+ btf__add_struct(btf, "(anon)", 8); /* [4] struct anon { */
+ btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */
+ btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */
+ /* } */
+#undef ID
+}
+
+static void test_split_dup_struct_in_cu()
+{
+ struct btf *btf1, *btf2 = NULL;
+ int err;
+
+ /* generate the base data.. */
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf_add_dup_struct_in_cu(btf1, 0);
+
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's' size=8 vlen=2\n"
+ "\t'a' type_id=3 bits_offset=0\n"
+ "\t'b' type_id=4 bits_offset=0",
+ "[3] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32",
+ "[4] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32");
+
+ /* ..dedup them... */
+ err = btf__dedup(btf1, NULL);
+ if (!ASSERT_OK(err, "btf_dedup"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's' size=8 vlen=2\n"
+ "\t'a' type_id=3 bits_offset=0\n"
+ "\t'b' type_id=3 bits_offset=0",
+ "[3] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32");
+
+ /* and add the same data on top of it */
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf_add_dup_struct_in_cu(btf2, 3);
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's' size=8 vlen=2\n"
+ "\t'a' type_id=3 bits_offset=0\n"
+ "\t'b' type_id=3 bits_offset=0",
+ "[3] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32",
+ "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[5] STRUCT 's' size=8 vlen=2\n"
+ "\t'a' type_id=6 bits_offset=0\n"
+ "\t'b' type_id=7 bits_offset=0",
+ "[6] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=4 bits_offset=0\n"
+ "\t'f2' type_id=4 bits_offset=32",
+ "[7] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=4 bits_offset=0\n"
+ "\t'f2' type_id=4 bits_offset=32");
+
+ err = btf__dedup(btf2, NULL);
+ if (!ASSERT_OK(err, "btf_dedup"))
+ goto cleanup;
+
+ /* after dedup it should match the original data */
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's' size=8 vlen=2\n"
+ "\t'a' type_id=3 bits_offset=0\n"
+ "\t'b' type_id=3 bits_offset=0",
+ "[3] STRUCT '(anon)' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32");
+
+cleanup:
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* Ensure module split BTF dedup worked correctly; when dedup fails badly
+ * core kernel types are in split BTF also, so ensure that references to
+ * such types point at base - not split - BTF.
+ *
+ * bpf_testmod_test_write() has multiple core kernel type parameters;
+ *
+ * ssize_t
+ * bpf_testmod_test_write(struct file *file, struct kobject *kobj,
+ * struct bin_attribute *bin_attr,
+ * char *buf, loff_t off, size_t len);
+ *
+ * Ensure each of the FUNC_PROTO params is a core kernel type.
+ *
+ * Do the same for
+ *
+ * __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk);
+ *
+ * ...and
+ *
+ * __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb);
+ *
+ */
+const char *mod_funcs[] = {
+ "bpf_testmod_test_write",
+ "bpf_kfunc_call_test3",
+ "bpf_kfunc_call_test_pass_ctx"
+};
+
+static void test_split_module(void)
+{
+ struct btf *vmlinux_btf, *btf1 = NULL;
+ int i, nr_base_types;
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux_btf"))
+ return;
+ nr_base_types = btf__type_cnt(vmlinux_btf);
+ if (!ASSERT_GT(nr_base_types, 0, "nr_base_types"))
+ goto cleanup;
+
+ btf1 = btf__parse_split("/sys/kernel/btf/bpf_testmod", vmlinux_btf);
+ if (!ASSERT_OK_PTR(btf1, "split_btf"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mod_funcs); i++) {
+ const struct btf_param *p;
+ const struct btf_type *t;
+ __u16 vlen;
+ __u32 id;
+ int j;
+
+ id = btf__find_by_name_kind(btf1, mod_funcs[i], BTF_KIND_FUNC);
+ if (!ASSERT_GE(id, nr_base_types, "func_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf1, id);
+ if (!ASSERT_OK_PTR(t, "func_id_type"))
+ goto cleanup;
+ t = btf__type_by_id(btf1, t->type);
+ if (!ASSERT_OK_PTR(t, "func_proto_id_type"))
+ goto cleanup;
+ if (!ASSERT_EQ(btf_is_func_proto(t), true, "is_func_proto"))
+ goto cleanup;
+ vlen = btf_vlen(t);
+
+ for (j = 0, p = btf_params(t); j < vlen; j++, p++) {
+ /* bpf_testmod uses resilient split BTF, so any
+ * reference types will be added to split BTF and their
+ * associated targets will be base BTF types; for example
+ * for a "struct sock *" the PTR will be in split BTF
+ * while the "struct sock" will be in base.
+ *
+ * In some cases like loff_t we have to resolve
+ * multiple typedefs hence the while() loop below.
+ *
+ * Note that resilient split BTF generation depends
+ * on pahole version, so we do not assert that
+ * reference types are in split BTF, as if pahole
+ * does not support resilient split BTF they will
+ * also be base BTF types.
+ */
+ id = p->type;
+ do {
+ t = btf__type_by_id(btf1, id);
+ if (!ASSERT_OK_PTR(t, "param_ref_type"))
+ goto cleanup;
+ if (!btf_is_mod(t) && !btf_is_ptr(t) && !btf_is_typedef(t))
+ break;
+ id = t->type;
+ } while (true);
+
+ if (!ASSERT_LT(id, nr_base_types, "verify_base_type"))
+ goto cleanup;
+ }
+ }
+cleanup:
+ btf__free(btf1);
+ btf__free(vmlinux_btf);
+}
+
void test_btf_dedup_split()
{
if (test__start_subtest("split_simple"))
@@ -322,4 +547,8 @@ void test_btf_dedup_split()
test_split_struct_duped();
if (test__start_subtest("split_fwd_resolve"))
test_split_fwd_resolve();
+ if (test__start_subtest("split_dup_struct_in_cu"))
+ test_split_dup_struct_in_cu();
+ if (test__start_subtest("split_module"))
+ test_split_module();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
new file mode 100644
index 000000000000..fb67ae195a73
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+
+/* Fabricate base, split BTF with references to base types needed; then create
+ * split BTF with distilled base BTF and ensure expectations are met:
+ * - only referenced base types from split BTF are present
+ * - struct/union/enum are represented as empty unless anonymous, when they
+ * are represented in full in split BTF
+ */
+static void test_distilled_base(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_ptr(btf1, 1); /* [2] ptr to int */
+ btf__add_struct(btf1, "s1", 8); /* [3] struct s1 { */
+ btf__add_field(btf1, "f1", 2, 0, 0); /* int *f1; */
+ /* } */
+ btf__add_struct(btf1, "", 12); /* [4] struct { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 3, 32, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_int(btf1, "unsigned int", 4, 0); /* [5] unsigned int */
+ btf__add_union(btf1, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf1, "f2", 2, 0, 0); /* int *f2; */
+ /* } */
+ btf__add_union(btf1, "", 4); /* [7] union { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_enum(btf1, "e1", 4); /* [8] enum e1 { */
+ btf__add_enum_value(btf1, "v1", 1); /* v1 = 1; */
+ /* } */
+ btf__add_enum(btf1, "", 4); /* [9] enum { */
+ btf__add_enum_value(btf1, "av1", 2); /* av1 = 2; */
+ /* } */
+ btf__add_enum64(btf1, "e641", 8, true); /* [10] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1024); /* v1 = 1024; */
+ /* } */
+ btf__add_enum64(btf1, "", 8, true); /* [11] enum64 { */
+ btf__add_enum64_value(btf1, "v1", 1025); /* v1 = 1025; */
+ /* } */
+ btf__add_struct(btf1, "unneeded", 4); /* [12] struct unneeded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_struct(btf1, "embedded", 4); /* [13] struct embedded { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_func_proto(btf1, 1); /* [14] int (*)(int *p1); */
+ btf__add_func_param(btf1, "p1", 1);
+
+ btf__add_array(btf1, 1, 1, 3); /* [15] int [3]; */
+
+ btf__add_struct(btf1, "from_proto", 4); /* [16] struct from_proto { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ btf__add_union(btf1, "u1", 4); /* [17] union u1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0");
+
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf__add_ptr(btf2, 3); /* [18] ptr to struct s1 */
+ /* add ptr to struct anon */
+ btf__add_ptr(btf2, 4); /* [19] ptr to struct (anon) */
+ btf__add_const(btf2, 6); /* [20] const union u1 */
+ btf__add_restrict(btf2, 7); /* [21] restrict union (anon) */
+ btf__add_volatile(btf2, 8); /* [22] volatile enum e1 */
+ btf__add_typedef(btf2, "et", 9); /* [23] typedef enum (anon) */
+ btf__add_const(btf2, 10); /* [24] const enum64 e641 */
+ btf__add_ptr(btf2, 11); /* [25] restrict enum64 (anon) */
+ btf__add_struct(btf2, "with_embedded", 4); /* [26] struct with_embedded { */
+ btf__add_field(btf2, "f1", 13, 0, 0); /* struct embedded f1; */
+ /* } */
+ btf__add_func(btf2, "fn", BTF_FUNC_STATIC, 14); /* [27] int fn(int p1); */
+ btf__add_typedef(btf2, "arraytype", 15); /* [28] typedef int[3] foo; */
+ btf__add_func_proto(btf2, 1); /* [29] int (*)(struct from proto p1); */
+ btf__add_func_param(btf2, "p1", 16);
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=4",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=7",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=9",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=11",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=14 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=15",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16");
+
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(8, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=8 vlen=0",
+ "[3] UNION 'u1' size=12 vlen=0",
+ "[4] ENUM 'e1' encoding=UNSIGNED size=4 vlen=0",
+ "[5] ENUM 'e641' encoding=UNSIGNED size=8 vlen=0",
+ "[6] STRUCT 'embedded' size=4 vlen=0",
+ "[7] STRUCT 'from_proto' size=4 vlen=0",
+ /* split BTF; these types should match split BTF above from 17-28, with
+ * updated type id references
+ */
+ "[8] PTR '(anon)' type_id=2",
+ "[9] PTR '(anon)' type_id=20",
+ "[10] CONST '(anon)' type_id=3",
+ "[11] RESTRICT '(anon)' type_id=21",
+ "[12] VOLATILE '(anon)' type_id=4",
+ "[13] TYPEDEF 'et' type_id=22",
+ "[14] CONST '(anon)' type_id=5",
+ "[15] PTR '(anon)' type_id=23",
+ "[16] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=6 bits_offset=0",
+ "[17] FUNC 'fn' type_id=24 linkage=static",
+ "[18] TYPEDEF 'arraytype' type_id=25",
+ "[19] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=7",
+ /* split BTF types added from original base BTF below */
+ "[20] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=32",
+ "[21] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[22] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[23] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[24] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] STRUCT 's1' size=8 vlen=1\n"
+ "\t'f1' type_id=2 bits_offset=0",
+ "[4] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=2 bits_offset=0",
+ "[7] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'v1' val=1",
+ "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1024",
+ "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[12] STRUCT 'unneeded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[13] STRUCT 'embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3",
+ "[16] STRUCT 'from_proto' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[17] UNION 'u1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[18] PTR '(anon)' type_id=3",
+ "[19] PTR '(anon)' type_id=30",
+ "[20] CONST '(anon)' type_id=6",
+ "[21] RESTRICT '(anon)' type_id=31",
+ "[22] VOLATILE '(anon)' type_id=8",
+ "[23] TYPEDEF 'et' type_id=32",
+ "[24] CONST '(anon)' type_id=10",
+ "[25] PTR '(anon)' type_id=33",
+ "[26] STRUCT 'with_embedded' size=4 vlen=1\n"
+ "\t'f1' type_id=13 bits_offset=0",
+ "[27] FUNC 'fn' type_id=34 linkage=static",
+ "[28] TYPEDEF 'arraytype' type_id=35",
+ "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=16",
+ /* below here are (duplicate) anon base types added by distill
+ * process to split BTF.
+ */
+ "[30] STRUCT '(anon)' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=3 bits_offset=32",
+ "[31] UNION '(anon)' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[32] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n"
+ "\t'av1' val=2",
+ "[33] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n"
+ "\t'v1' val=1025",
+ "[34] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n"
+ "\t'p1' type_id=1",
+ "[35] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* ensure we can cope with multiple types with the same name in
+ * distilled base BTF. In this case because sizes are different,
+ * we can still disambiguate them.
+ */
+static void test_distilled_base_multi(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* If a needed type is not present in the base BTF we wish to relocate
+ * with, btf__relocate() should error our.
+ */
+static void test_distilled_base_missing_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ goto cleanup;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in distilled base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ btf__add_const(btf2, 2);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] CONST '(anon)' type_id=2");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf1), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* With 2 types of same size in base BTF, relocation should
+ * fail as we have no means to choose between them.
+ */
+static void test_distilled_base_multi_err2(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(btf2, 1);
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ btf3,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ goto cleanup;
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */
+ VALIDATE_RAW_BTF(
+ btf5,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+/* create split reference BTF from vmlinux + split BTF with a few type references;
+ * ensure the resultant split reference BTF is as expected, containing only types
+ * needed to disambiguate references from split BTF.
+ */
+static void test_distilled_base_vmlinux(void)
+{
+ struct btf *split_btf = NULL, *vmlinux_btf = btf__load_vmlinux_btf();
+ struct btf *split_dist = NULL, *base_dist = NULL;
+ __s32 int_id, myint_id;
+
+ if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux"))
+ return;
+ int_id = btf__find_by_name_kind(vmlinux_btf, "int", BTF_KIND_INT);
+ if (!ASSERT_GT(int_id, 0, "find_int"))
+ goto cleanup;
+ split_btf = btf__new_empty_split(vmlinux_btf);
+ if (!ASSERT_OK_PTR(split_btf, "new_split"))
+ goto cleanup;
+ myint_id = btf__add_typedef(split_btf, "myint", int_id);
+ btf__add_ptr(split_btf, myint_id);
+
+ if (!ASSERT_EQ(btf__distill_base(split_btf, &base_dist, &split_dist), 0,
+ "distill_vmlinux_base"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(split_dist, "split_distilled") ||
+ !ASSERT_OK_PTR(base_dist, "base_dist"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ split_dist,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] TYPEDEF 'myint' type_id=1",
+ "[3] PTR '(anon)' type_id=2");
+
+cleanup:
+ btf__free(split_dist);
+ btf__free(base_dist);
+ btf__free(split_btf);
+ btf__free(vmlinux_btf);
+}
+
+/* Split and new base BTFs should inherit endianness from source BTF. */
+static void test_distilled_endianness(void)
+{
+ struct btf *base = NULL, *split = NULL, *new_base = NULL, *new_split = NULL;
+ struct btf *new_base1 = NULL, *new_split1 = NULL;
+ enum btf_endianness inverse_endianness;
+ const void *raw_data;
+ __u32 size;
+
+ base = btf__new_empty();
+ if (!ASSERT_OK_PTR(base, "empty_main_btf"))
+ return;
+ inverse_endianness = btf__endianness(base) == BTF_LITTLE_ENDIAN ? BTF_BIG_ENDIAN
+ : BTF_LITTLE_ENDIAN;
+ btf__set_endianness(base, inverse_endianness);
+ btf__add_int(base, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ base,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ split = btf__new_empty_split(base);
+ if (!ASSERT_OK_PTR(split, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(split, 1);
+ VALIDATE_RAW_BTF(
+ split,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+ if (!ASSERT_EQ(0, btf__distill_base(split, &new_base, &new_split),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(new_base, "distilled_base") ||
+ !ASSERT_OK_PTR(new_split, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(new_base), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ new_split,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+
+ raw_data = btf__raw_data(new_base, &size);
+ if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #1"))
+ goto cleanup;
+ new_base1 = btf__new(raw_data, size);
+ if (!ASSERT_OK_PTR(new_base1, "new_base1 = btf__new()"))
+ goto cleanup;
+ raw_data = btf__raw_data(new_split, &size);
+ if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #2"))
+ goto cleanup;
+ new_split1 = btf__new_split(raw_data, size, new_base1);
+ if (!ASSERT_OK_PTR(new_split1, "new_split1 = btf__new()"))
+ goto cleanup;
+
+ ASSERT_EQ(btf__endianness(new_base1), inverse_endianness, "new_base1 endianness");
+ ASSERT_EQ(btf__endianness(new_split1), inverse_endianness, "new_split1 endianness");
+ VALIDATE_RAW_BTF(
+ new_split1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+cleanup:
+ btf__free(new_split1);
+ btf__free(new_base1);
+ btf__free(new_split);
+ btf__free(new_base);
+ btf__free(split);
+ btf__free(base);
+}
+
+/* If a needed composite type, which is the member of composite type
+ * in the split BTF, has a different size in the base BTF we wish to
+ * relocate with, btf__relocate() should error out.
+ */
+static void test_distilled_base_embedded_err(void)
+{
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "empty_main_btf"))
+ return;
+
+ btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_struct(btf1, "s1", 4); /* [2] struct s1 { */
+ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0");
+
+ btf2 = btf__new_empty_split(btf1);
+ if (!ASSERT_OK_PTR(btf2, "empty_split_btf"))
+ goto cleanup;
+
+ btf__add_struct(btf2, "with_embedded", 8); /* [3] struct with_embedded { */
+ btf__add_field(btf2, "e1", 2, 0, 0); /* struct s1 e1; */
+ /* } */
+
+ VALIDATE_RAW_BTF(
+ btf2,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] STRUCT 's1' size=4 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0",
+ "[3] STRUCT 'with_embedded' size=8 vlen=1\n"
+ "\t'e1' type_id=2 bits_offset=0");
+
+ if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(btf3, "distilled_base") ||
+ !ASSERT_OK_PTR(btf4, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf4,
+ "[1] STRUCT 's1' size=4 vlen=0",
+ "[2] STRUCT 'with_embedded' size=8 vlen=1\n"
+ "\t'e1' type_id=1 bits_offset=0");
+
+ btf5 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+ goto cleanup;
+
+ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ /* struct with the same name but different size */
+ btf__add_struct(btf5, "s1", 8); /* [2] struct s1 { */
+ btf__add_field(btf5, "f1", 1, 0, 0); /* int f1; */
+ /* } */
+
+ ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split");
+cleanup:
+ btf__free(btf5);
+ btf__free(btf4);
+ btf__free(btf3);
+ btf__free(btf2);
+ btf__free(btf1);
+}
+
+void test_btf_distill(void)
+{
+ if (test__start_subtest("distilled_base"))
+ test_distilled_base();
+ if (test__start_subtest("distilled_base_multi"))
+ test_distilled_base_multi();
+ if (test__start_subtest("distilled_base_missing_err"))
+ test_distilled_base_missing_err();
+ if (test__start_subtest("distilled_base_multi_err"))
+ test_distilled_base_multi_err();
+ if (test__start_subtest("distilled_base_multi_err2"))
+ test_distilled_base_multi_err2();
+ if (test__start_subtest("distilled_base_embedded_err"))
+ test_distilled_base_embedded_err();
+ if (test__start_subtest("distilled_base_vmlinux"))
+ test_distilled_base_vmlinux();
+ if (test__start_subtest("distilled_endianness"))
+ test_distilled_endianness();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index c60091ee8a21..10cba526d3e6 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -13,29 +13,28 @@ static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
- struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
- {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
- {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
- {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
- {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
- {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
- {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
- {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
+ {"btf_dump: syntax", "btf_dump_test_case_syntax", true},
+ {"btf_dump: ordering", "btf_dump_test_case_ordering", false},
+ {"btf_dump: padding", "btf_dump_test_case_padding", true},
+ {"btf_dump: packing", "btf_dump_test_case_packing", true},
+ {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true},
+ {"btf_dump: multidim", "btf_dump_test_case_multidim", false},
+ {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false},
};
-static int btf_dump_all_types(const struct btf *btf,
- const struct btf_dump_opts *opts)
+static int btf_dump_all_types(const struct btf *btf, void *ctx)
{
- size_t type_cnt = btf__get_nr_types(btf);
+ size_t type_cnt = btf__type_cnt(btf);
struct btf_dump *d;
int err = 0, id;
- d = btf_dump__new(btf, NULL, opts, btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ d = btf_dump__new(btf, btf_dump_printf, ctx, NULL);
+ err = libbpf_get_error(d);
+ if (err)
+ return err;
- for (id = 1; id <= type_cnt; id++) {
+ for (id = 1; id < type_cnt; id++) {
err = btf_dump__dump_type(d, id);
if (err)
goto done;
@@ -53,11 +52,10 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
int err = 0, fd = -1;
FILE *f = NULL;
- snprintf(test_file, sizeof(test_file), "%s.o", t->file);
+ snprintf(test_file, sizeof(test_file), "%s.bpf.o", t->file);
btf = btf__parse_elf(test_file, NULL);
- if (CHECK(IS_ERR(btf), "btf_parse_elf",
- "failed to load test BTF: %ld\n", PTR_ERR(btf))) {
+ if (!ASSERT_OK_PTR(btf, "btf_parse_elf")) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
@@ -65,7 +63,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
- * do, it should be 8 regardless of host architecture, becaues BPF
+ * do, it should be 8 regardless of host architecture, because BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
@@ -77,7 +75,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
- if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
+ if (!ASSERT_GE(fd, 0, "create_tmp")) {
err = fd;
goto done;
}
@@ -88,8 +86,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done;
}
- t->opts.ctx = f;
- err = btf_dump_all_types(btf, &t->opts);
+ err = btf_dump_all_types(btf, f);
fclose(f);
close(fd);
if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
@@ -129,60 +126,118 @@ done:
return err;
}
-static char *dump_buf;
-static size_t dump_buf_sz;
-static FILE *dump_buf_file;
+struct test_ctx {
+ struct btf *btf;
+ struct btf_dump *d;
+ char *dump_buf;
+ size_t dump_buf_sz;
+ FILE *dump_buf_file;
+};
-void test_btf_dump_incremental(void)
+static void test_ctx__free(struct test_ctx *t)
{
- struct btf *btf = NULL;
- struct btf_dump *d = NULL;
- struct btf_dump_opts opts;
- int id, err, i;
+ fclose(t->dump_buf_file);
+ free(t->dump_buf);
+ btf_dump__free(t->d);
+ btf__free(t->btf);
+}
- dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
- if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
- return;
- btf = btf__new_empty();
- if (!ASSERT_OK_PTR(btf, "new_empty"))
+static int test_ctx__init(struct test_ctx *t)
+{
+ t->dump_buf_file = open_memstream(&t->dump_buf, &t->dump_buf_sz);
+ if (!ASSERT_OK_PTR(t->dump_buf_file, "dump_memstream"))
+ return -1;
+ t->btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(t->btf, "new_empty"))
goto err_out;
- opts.ctx = dump_buf_file;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
- if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new"))
+ t->d = btf_dump__new(t->btf, btf_dump_printf, t->dump_buf_file, NULL);
+ if (!ASSERT_OK(libbpf_get_error(t->d), "btf_dump__new"))
goto err_out;
+ return 0;
+
+err_out:
+ test_ctx__free(t);
+ return -1;
+}
+
+static void test_ctx__dump_and_compare(struct test_ctx *t,
+ const char *expected_output,
+ const char *message)
+{
+ int i, err;
+
+ for (i = 1; i < btf__type_cnt(t->btf); i++) {
+ err = btf_dump__dump_type(t->d, i);
+ ASSERT_OK(err, "dump_type_ok");
+ }
+
+ fflush(t->dump_buf_file);
+ t->dump_buf[t->dump_buf_sz] = 0; /* some libc implementations don't do this */
+
+ ASSERT_STREQ(t->dump_buf, expected_output, message);
+}
+
+static void test_btf_dump_incremental(void)
+{
+ struct test_ctx t = {};
+ struct btf *btf;
+ int id, err;
+
+ if (test_ctx__init(&t))
+ return;
+
+ btf = t.btf;
+
/* First, generate BTF corresponding to the following C code:
*
- * enum { VAL = 1 };
+ * enum x;
+ *
+ * enum x { X = 1 };
+ *
+ * enum { Y = 1 };
+ *
+ * struct s;
*
* struct s { int x; };
*
*/
+ id = btf__add_enum(btf, "x", 4);
+ ASSERT_EQ(id, 1, "enum_declaration_id");
+ id = btf__add_enum(btf, "x", 4);
+ ASSERT_EQ(id, 2, "named_enum_id");
+ err = btf__add_enum_value(btf, "X", 1);
+ ASSERT_OK(err, "named_enum_val_ok");
+
id = btf__add_enum(btf, NULL, 4);
- ASSERT_EQ(id, 1, "enum_id");
- err = btf__add_enum_value(btf, "VAL", 1);
- ASSERT_OK(err, "enum_val_ok");
+ ASSERT_EQ(id, 3, "anon_enum_id");
+ err = btf__add_enum_value(btf, "Y", 1);
+ ASSERT_OK(err, "anon_enum_val_ok");
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
- ASSERT_EQ(id, 2, "int_id");
+ ASSERT_EQ(id, 4, "int_id");
+
+ id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT);
+ ASSERT_EQ(id, 5, "fwd_id");
id = btf__add_struct(btf, "s", 4);
- ASSERT_EQ(id, 3, "struct_id");
- err = btf__add_field(btf, "x", 2, 0, 0);
+ ASSERT_EQ(id, 6, "struct_id");
+ err = btf__add_field(btf, "x", 4, 0, 0);
ASSERT_OK(err, "field_ok");
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
- err = btf_dump__dump_type(d, i);
- ASSERT_OK(err, "dump_type_ok");
- }
-
- fflush(dump_buf_file);
- dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
- ASSERT_STREQ(dump_buf,
+ test_ctx__dump_and_compare(&t,
+"enum x;\n"
+"\n"
+"enum x {\n"
+" X = 1,\n"
+"};\n"
+"\n"
"enum {\n"
-" VAL = 1,\n"
+" Y = 1,\n"
"};\n"
"\n"
+"struct s;\n"
+"\n"
"struct s {\n"
" int x;\n"
"};\n\n", "c_dump1");
@@ -201,38 +256,797 @@ void test_btf_dump_incremental(void)
* enum values don't conflict;
*
*/
- fseek(dump_buf_file, 0, SEEK_SET);
+ fseek(t.dump_buf_file, 0, SEEK_SET);
id = btf__add_struct(btf, "s", 4);
- ASSERT_EQ(id, 4, "struct_id");
- err = btf__add_field(btf, "x", 1, 0, 0);
+ ASSERT_EQ(id, 7, "struct_id");
+ err = btf__add_field(btf, "x", 2, 0, 0);
ASSERT_OK(err, "field_ok");
- err = btf__add_field(btf, "s", 3, 32, 0);
+ err = btf__add_field(btf, "y", 3, 32, 0);
+ ASSERT_OK(err, "field_ok");
+ err = btf__add_field(btf, "s", 6, 64, 0);
ASSERT_OK(err, "field_ok");
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
- err = btf_dump__dump_type(d, i);
- ASSERT_OK(err, "dump_type_ok");
- }
-
- fflush(dump_buf_file);
- dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
- ASSERT_STREQ(dump_buf,
+ test_ctx__dump_and_compare(&t,
"struct s___2 {\n"
+" enum x x;\n"
" enum {\n"
-" VAL___2 = 1,\n"
-" } x;\n"
+" Y___2 = 1,\n"
+" } y;\n"
" struct s s;\n"
"};\n\n" , "c_dump1");
-err_out:
- fclose(dump_buf_file);
- free(dump_buf);
+ test_ctx__free(&t);
+}
+
+static void test_btf_dump_type_tags(void)
+{
+ struct test_ctx t = {};
+ struct btf *btf;
+ int id, err;
+
+ if (test_ctx__init(&t))
+ return;
+
+ btf = t.btf;
+
+ /* Generate BTF corresponding to the following C code:
+ *
+ * struct s {
+ * void __attribute__((btf_type_tag(\"void_tag\"))) *p1;
+ * void __attribute__((void_attr)) *p2;
+ * };
+ *
+ */
+
+ id = btf__add_type_tag(btf, "void_tag", 0);
+ ASSERT_EQ(id, 1, "type_tag_id");
+ id = btf__add_ptr(btf, id);
+ ASSERT_EQ(id, 2, "void_ptr_id1");
+
+ id = btf__add_type_attr(btf, "void_attr", 0);
+ ASSERT_EQ(id, 3, "type_attr_id");
+ id = btf__add_ptr(btf, id);
+ ASSERT_EQ(id, 4, "void_ptr_id2");
+
+ id = btf__add_struct(btf, "s", 8);
+ ASSERT_EQ(id, 5, "struct_id");
+ err = btf__add_field(btf, "p1", 2, 0, 0);
+ ASSERT_OK(err, "field_ok1");
+ err = btf__add_field(btf, "p2", 4, 0, 0);
+ ASSERT_OK(err, "field_ok2");
+
+ test_ctx__dump_and_compare(&t,
+"struct s {\n"
+" void __attribute__((btf_type_tag(\"void_tag\"))) *p1;\n"
+" void __attribute__((void_attr)) *p2;\n"
+"};\n\n", "dump_and_compare");
+
+ test_ctx__free(&t);
+}
+
+#define STRSIZE 4096
+
+static void btf_dump_snprintf(void *ctx, const char *fmt, va_list args)
+{
+ char *s = ctx, new[STRSIZE];
+
+ vsnprintf(new, STRSIZE, fmt, args);
+ if (strlen(s) < STRSIZE)
+ strncat(s, new, STRSIZE - strlen(s) - 1);
+}
+
+static int btf_dump_data(struct btf *btf, struct btf_dump *d,
+ char *name, char *prefix, __u64 flags, void *ptr,
+ size_t ptr_sz, char *str, const char *expected_val)
+{
+ DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
+ size_t type_sz;
+ __s32 type_id;
+ int ret = 0;
+
+ if (flags & BTF_F_COMPACT)
+ opts.compact = true;
+ if (flags & BTF_F_NONAME)
+ opts.skip_names = true;
+ if (flags & BTF_F_ZERO)
+ opts.emit_zeroes = true;
+ if (prefix) {
+ ASSERT_STRNEQ(name, prefix, strlen(prefix),
+ "verify prefix match");
+ name += strlen(prefix) + 1;
+ }
+ type_id = btf__find_by_name(btf, name);
+ if (!ASSERT_GE(type_id, 0, "find type id"))
+ return -ENOENT;
+ type_sz = btf__resolve_size(btf, type_id);
+ str[0] = '\0';
+ ret = btf_dump__dump_type_data(d, type_id, ptr, ptr_sz, &opts);
+ if (type_sz <= ptr_sz) {
+ if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz"))
+ return -EINVAL;
+ } else {
+ if (!ASSERT_EQ(ret, -E2BIG, "failed to return -E2BIG"))
+ return -EINVAL;
+ }
+ if (!ASSERT_STREQ(str, expected_val, "ensure expected/actual match"))
+ return -EFAULT;
+ return 0;
+}
+
+#define TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \
+ _expected, ...) \
+ do { \
+ char __ptrtype[64] = #_type; \
+ char *_ptrtype = (char *)__ptrtype; \
+ _type _ptrdata = __VA_ARGS__; \
+ void *_ptr = &_ptrdata; \
+ \
+ (void) btf_dump_data(_b, _d, _ptrtype, _prefix, _flags, \
+ _ptr, sizeof(_type), _str, \
+ _expected); \
+ } while (0)
+
+/* Use where expected data string matches its stringified declaration */
+#define TEST_BTF_DUMP_DATA_C(_b, _d, _prefix, _str, _type, _flags, \
+ ...) \
+ TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \
+ "(" #_type ")" #__VA_ARGS__, __VA_ARGS__)
+
+/* overflow test; pass typesize < expected type size, ensure E2BIG returned */
+#define TEST_BTF_DUMP_DATA_OVER(_b, _d, _prefix, _str, _type, _type_sz, \
+ _expected, ...) \
+ do { \
+ char __ptrtype[64] = #_type; \
+ char *_ptrtype = (char *)__ptrtype; \
+ _type _ptrdata = __VA_ARGS__; \
+ void *_ptr = &_ptrdata; \
+ \
+ (void) btf_dump_data(_b, _d, _ptrtype, _prefix, 0, \
+ _ptr, _type_sz, _str, _expected); \
+ } while (0)
+
+#define TEST_BTF_DUMP_VAR(_b, _d, _prefix, _str, _var, _type, _flags, \
+ _expected, ...) \
+ do { \
+ _type _ptrdata = __VA_ARGS__; \
+ void *_ptr = &_ptrdata; \
+ \
+ (void) btf_dump_data(_b, _d, _var, _prefix, _flags, \
+ _ptr, sizeof(_type), _str, \
+ _expected); \
+ } while (0)
+
+static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+#ifdef __SIZEOF_INT128__
+ unsigned __int128 i = 0xffffffffffffffff;
+
+ /* this dance is required because we cannot directly initialize
+ * a 128-bit value to anything larger than a 64-bit value.
+ */
+ i = (i << 64) | (i - 1);
+#endif
+ /* simple int */
+ TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, 1234);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
+ "1234", 1234);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)1234", 1234);
+
+ /* zero value should be printed at toplevel */
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT, "(int)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
+ "0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_ZERO,
+ "(int)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "0", 0);
+ TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, -4567);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME,
+ "-4567", -4567);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)-4567", -4567);
+
+ TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1);
+
+#ifdef __SIZEOF_INT128__
+ /* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf,
+ * and clang encode it with name "unsigned __int128" in dwarf.
+ * Do an availability test for either variant before doing actual test.
+ */
+ if (btf__find_by_name(btf, "unsigned __int128") > 0) {
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT,
+ "(unsigned __int128)0xffffffffffffffff",
+ 0xffffffffffffffff);
+ ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str,
+ "(unsigned __int128)0xfffffffffffffffffffffffffffffffe"),
+ "dump unsigned __int128");
+ } else if (btf__find_by_name(btf, "__int128 unsigned") > 0) {
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT,
+ "(__int128 unsigned)0xffffffffffffffff",
+ 0xffffffffffffffff);
+ ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str,
+ "(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"),
+ "dump unsigned __int128");
+ } else {
+ ASSERT_TRUE(false, "unsigned_int128_not_found");
+ }
+#endif
+}
+
+static void test_btf_dump_float_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+ float t1 = 1.234567;
+ float t2 = -1.234567;
+ float t3 = 0.0;
+ double t4 = 5.678912;
+ double t5 = -5.678912;
+ double t6 = 0.0;
+ long double t7 = 9.876543;
+ long double t8 = -9.876543;
+ long double t9 = 0.0;
+
+ /* since the kernel does not likely have any float types in its BTF, we
+ * will need to add some of various sizes.
+ */
+
+ ASSERT_GT(btf__add_float(btf, "test_float", 4), 0, "add float");
+ ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t1, 4, str,
+ "(test_float)1.234567"), "dump float");
+ ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t2, 4, str,
+ "(test_float)-1.234567"), "dump float");
+ ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t3, 4, str,
+ "(test_float)0.000000"), "dump float");
+
+ ASSERT_GT(btf__add_float(btf, "test_double", 8), 0, "add_double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t4, 8, str,
+ "(test_double)5.678912"), "dump double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t5, 8, str,
+ "(test_double)-5.678912"), "dump double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t6, 8, str,
+ "(test_double)0.000000"), "dump double");
+
+ ASSERT_GT(btf__add_float(btf, "test_long_double", 16), 0, "add long double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t7, 16,
+ str, "(test_long_double)9.876543"),
+ "dump long_double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t8, 16,
+ str, "(test_long_double)-9.876543"),
+ "dump long_double");
+ ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t9, 16,
+ str, "(test_long_double)0.000000"),
+ "dump long_double");
+}
+
+static void test_btf_dump_char_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+ /* simple char */
+ TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, char, BTF_F_COMPACT, 100);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME,
+ "100", 100);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)100", 100);
+ /* zero value should be printed at toplevel */
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT,
+ "(char)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME,
+ "0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_ZERO,
+ "(char)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)0", 0);
+
+ TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, char, sizeof(char)-1, "", 100);
+}
+
+static void test_btf_dump_typedef_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+ /* simple typedef */
+ TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, uint64_t, BTF_F_COMPACT, 100);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME,
+ "1", 1);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)1", 1);
+ /* zero value should be printed at toplevel */
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT, "(u64)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME,
+ "0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_ZERO,
+ "(u64)0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "0", 0);
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)0", 0);
+
+ /* typedef struct */
+ TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, atomic_t, BTF_F_COMPACT,
+ {.counter = (int)1,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME,
+ "{1,}", { .counter = 1 });
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0,
+"(atomic_t){\n"
+" .counter = (int)1,\n"
+"}",
+ {.counter = 1,});
+ /* typedef with 0 value should be printed at toplevel */
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, "(atomic_t){}",
+ {.counter = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME,
+ "{}", {.counter = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0,
+"(atomic_t){\n"
+"}",
+ {.counter = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_ZERO,
+ "(atomic_t){.counter = (int)0,}",
+ {.counter = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "{0,}", {.counter = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_ZERO,
+"(atomic_t){\n"
+" .counter = (int)0,\n"
+"}",
+ { .counter = 0,});
+
+ /* overflow should show type but not value since it overflows */
+ TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, atomic_t, sizeof(atomic_t)-1,
+ "(atomic_t){\n", { .counter = 1});
+}
+
+static void test_btf_dump_enum_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+ /* enum where enum value does (and does not) exist */
+ TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT,
+ BPF_MAP_CREATE);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT,
+ "(enum bpf_cmd)BPF_MAP_CREATE", 0);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "BPF_MAP_CREATE",
+ BPF_MAP_CREATE);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0,
+ "(enum bpf_cmd)BPF_MAP_CREATE",
+ BPF_MAP_CREATE);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "BPF_MAP_CREATE", 0);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
+ BTF_F_COMPACT | BTF_F_ZERO,
+ "(enum bpf_cmd)BPF_MAP_CREATE",
+ BPF_MAP_CREATE);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "BPF_MAP_CREATE", BPF_MAP_CREATE);
+ TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, 2000);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "2000", 2000);
+ TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0,
+ "(enum bpf_cmd)2000", 2000);
+
+ TEST_BTF_DUMP_DATA_OVER(btf, d, "enum", str, enum bpf_cmd,
+ sizeof(enum bpf_cmd) - 1, "", BPF_MAP_CREATE);
+}
+
+static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+ DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
+ char zero_data[512] = { };
+ char type_data[512];
+ void *fops = type_data;
+ void *skb = type_data;
+ size_t type_sz;
+ __s32 type_id;
+ char *cmpstr;
+ int ret;
+
+ memset(type_data, 255, sizeof(type_data));
+
+ /* simple struct */
+ TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT,
+ {.name_off = (__u32)3,.val = (__s32)-1,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{3,-1,}",
+ { .name_off = 3, .val = -1,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0,
+"(struct btf_enum){\n"
+" .name_off = (__u32)3,\n"
+" .val = (__s32)-1,\n"
+"}",
+ { .name_off = 3, .val = -1,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{-1,}",
+ { .name_off = 0, .val = -1,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO,
+ "{0,-1,}",
+ { .name_off = 0, .val = -1,});
+ /* empty struct should be printed */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT,
+ "(struct btf_enum){}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0,
+"(struct btf_enum){\n"
+"}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_COMPACT | BTF_F_ZERO,
+ "(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum,
+ BTF_F_ZERO,
+"(struct btf_enum){\n"
+" .name_off = (__u32)0,\n"
+" .val = (__s32)0,\n"
+"}",
+ { .name_off = 0, .val = 0,});
+
+ /* struct with pointers */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT,
+ "(struct list_head){.next = (struct list_head *)0x1,}",
+ { .next = (struct list_head *)1 });
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0,
+"(struct list_head){\n"
+" .next = (struct list_head *)0x1,\n"
+"}",
+ { .next = (struct list_head *)1 });
+ /* NULL pointer should not be displayed */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT,
+ "(struct list_head){}",
+ { .next = (struct list_head *)0 });
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0,
+"(struct list_head){\n"
+"}",
+ { .next = (struct list_head *)0 });
+
+ /* struct with function pointers */
+ type_id = btf__find_by_name(btf, "file_operations");
+ if (ASSERT_GT(type_id, 0, "find type id")) {
+ type_sz = btf__resolve_size(btf, type_id);
+ str[0] = '\0';
+
+ ret = btf_dump__dump_type_data(d, type_id, fops, type_sz, &opts);
+ ASSERT_EQ(ret, type_sz,
+ "unexpected return value dumping file_operations");
+ cmpstr =
+"(struct file_operations){\n"
+" .owner = (struct module *)0xffffffffffffffff,\n"
+" .fop_flags = (fop_flags_t)4294967295,";
+
+ ASSERT_STRNEQ(str, cmpstr, strlen(cmpstr), "file_operations");
+ }
+
+ /* struct with char array */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
+ "(struct bpf_prog_info){.name = (char[16])['f','o','o',],}",
+ { .name = "foo",});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{['f','o','o',],}",
+ {.name = "foo",});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, 0,
+"(struct bpf_prog_info){\n"
+" .name = (char[16])[\n"
+" 'f',\n"
+" 'o',\n"
+" 'o',\n"
+" ],\n"
+"}",
+ {.name = "foo",});
+ /* leading null char means do not display string */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
+ "(struct bpf_prog_info){}",
+ {.name = {'\0', 'f', 'o', 'o'}});
+ /* handle non-printable characters */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT,
+ "(struct bpf_prog_info){.name = (char[16])[1,2,3,],}",
+ { .name = {1, 2, 3, 0}});
+
+ /* struct with non-char array */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT,
+ "(struct __sk_buff){.cb = (__u32[5])[1,2,3,4,5,],}",
+ { .cb = {1, 2, 3, 4, 5,},});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{[1,2,3,4,5,],}",
+ { .cb = { 1, 2, 3, 4, 5},});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0,
+"(struct __sk_buff){\n"
+" .cb = (__u32[5])[\n"
+" 1,\n"
+" 2,\n"
+" 3,\n"
+" 4,\n"
+" 5,\n"
+" ],\n"
+"}",
+ { .cb = { 1, 2, 3, 4, 5},});
+ /* For non-char, arrays, show non-zero values only */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT,
+ "(struct __sk_buff){.cb = (__u32[5])[0,0,1,0,0,],}",
+ { .cb = { 0, 0, 1, 0, 0},});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0,
+"(struct __sk_buff){\n"
+" .cb = (__u32[5])[\n"
+" 0,\n"
+" 0,\n"
+" 1,\n"
+" 0,\n"
+" 0,\n"
+" ],\n"
+"}",
+ { .cb = { 0, 0, 1, 0, 0},});
+
+ /* struct with bitfields */
+ TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT,
+ {.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn,
+ BTF_F_COMPACT | BTF_F_NONAME,
+ "{1,0x2,0x3,4,5,}",
+ { .code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4,
+ .imm = 5,});
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, 0,
+"(struct bpf_insn){\n"
+" .code = (__u8)1,\n"
+" .dst_reg = (__u8)0x2,\n"
+" .src_reg = (__u8)0x3,\n"
+" .off = (__s16)4,\n"
+" .imm = (__s32)5,\n"
+"}",
+ {.code = 1, .dst_reg = 2, .src_reg = 3, .off = 4, .imm = 5});
+
+ /* zeroed bitfields should not be displayed */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT,
+ "(struct bpf_insn){.dst_reg = (__u8)0x1,}",
+ { .code = 0, .dst_reg = 1});
+
+ /* struct with enum bitfield */
+ type_id = btf__find_by_name(btf, "fs_context");
+ if (ASSERT_GT(type_id, 0, "find fs_context")) {
+ type_sz = btf__resolve_size(btf, type_id);
+ str[0] = '\0';
+
+ opts.emit_zeroes = true;
+ ret = btf_dump__dump_type_data(d, type_id, zero_data, type_sz, &opts);
+ ASSERT_EQ(ret, type_sz,
+ "unexpected return value dumping fs_context");
+
+ ASSERT_NEQ(strstr(str, "FS_CONTEXT_FOR_MOUNT"), NULL,
+ "bitfield value not present");
+ }
+
+ /* struct with nested anon union */
+ TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_sock_ops, BTF_F_COMPACT,
+ "(struct bpf_sock_ops){.op = (__u32)1,(union){.args = (__u32[4])[1,2,3,4,],.reply = (__u32)1,.replylong = (__u32[4])[1,2,3,4,],},}",
+ { .op = 1, .args = { 1, 2, 3, 4}});
+
+ /* union with nested struct */
+ TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT,
+ "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,.pid = (__u32)1,},}",
+ { .cgroup = { .order = 1, .cgroup_fd = 1, }});
+
+ /* struct skb with nested structs/unions; because type output is so
+ * complex, we don't do a string comparison, just verify we return
+ * the type size as the amount of data displayed.
+ */
+ type_id = btf__find_by_name(btf, "sk_buff");
+ if (ASSERT_GT(type_id, 0, "find struct sk_buff")) {
+ type_sz = btf__resolve_size(btf, type_id);
+ str[0] = '\0';
+
+ ret = btf_dump__dump_type_data(d, type_id, skb, type_sz, &opts);
+ ASSERT_EQ(ret, type_sz,
+ "unexpected return value dumping sk_buff");
+ }
+
+ /* overflow bpf_sock_ops struct with final element nonzero/zero.
+ * Regardless of the value of the final field, we don't have all the
+ * data we need to display it, so we should trigger an overflow.
+ * In other words overflow checking should trump "is field zero?"
+ * checks because if we've overflowed, it shouldn't matter what the
+ * field is - we can't trust its value so shouldn't display it.
+ */
+ TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
+ sizeof(struct bpf_sock_ops) - 1,
+ "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
+ { .op = 1, .skb_hwtstamp = 2});
+ TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops,
+ sizeof(struct bpf_sock_ops) - 1,
+ "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n",
+ { .op = 1, .skb_hwtstamp = 0});
+}
+
+static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
+ char *str)
+{
+#if 0
+ TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT,
+ "int cpu_number = (int)100", 100);
+#endif
+ TEST_BTF_DUMP_VAR(btf, d, NULL, str, "bpf_cgrp_storage_busy", int, BTF_F_COMPACT,
+ "static int bpf_cgrp_storage_busy = (int)2", 2);
+}
+
+struct btf_dump_string_ctx {
+ struct btf *btf;
+ struct btf_dump *d;
+ char *str;
+ struct btf_dump_type_data_opts *opts;
+ int array_id;
+};
+
+static int btf_dump_one_string(struct btf_dump_string_ctx *ctx,
+ char *ptr, size_t ptr_sz,
+ const char *expected_val)
+{
+ size_t type_sz;
+ int ret;
+
+ ctx->str[0] = '\0';
+ type_sz = btf__resolve_size(ctx->btf, ctx->array_id);
+ ret = btf_dump__dump_type_data(ctx->d, ctx->array_id, ptr, ptr_sz, ctx->opts);
+ if (type_sz <= ptr_sz) {
+ if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz"))
+ return -EINVAL;
+ }
+ if (!ASSERT_STREQ(ctx->str, expected_val, "ensure expected/actual match"))
+ return -EFAULT;
+ return 0;
+}
+
+static void btf_dump_strings(struct btf_dump_string_ctx *ctx)
+{
+ struct btf_dump_type_data_opts *opts = ctx->opts;
+
+ opts->emit_strings = true;
+
+ opts->compact = true;
+ opts->emit_zeroes = false;
+
+ opts->skip_names = false;
+ btf_dump_one_string(ctx, "foo", 4, "(char[4])\"foo\"");
+
+ opts->skip_names = true;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* This should have no effect. */
+ opts->emit_zeroes = false;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* This should have no effect. */
+ opts->compact = false;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* Non-printable characters come out as hex. */
+ btf_dump_one_string(ctx, "fo\xff", 4, "\"fo\\xff\"");
+ btf_dump_one_string(ctx, "fo\x7", 4, "\"fo\\x07\"");
+
+ /*
+ * Strings that are too long for the specified type ("char[4]")
+ * should fall back to the current behavior.
+ */
+ opts->compact = true;
+ btf_dump_one_string(ctx, "abcde", 6, "['a','b','c','d',]");
+
+ /*
+ * Strings that are too short for the specified type ("char[4]")
+ * should work normally.
+ */
+ btf_dump_one_string(ctx, "ab", 3, "\"ab\"");
+
+ /* Non-NUL-terminated arrays don't get printed as strings. */
+ char food[4] = { 'f', 'o', 'o', 'd' };
+ char bye[3] = { 'b', 'y', 'e' };
+
+ btf_dump_one_string(ctx, food, 4, "['f','o','o','d',]");
+ btf_dump_one_string(ctx, bye, 3, "['b','y','e',]");
+
+ /* The embedded NUL should terminate the string. */
+ char embed[4] = { 'f', 'o', '\0', 'd' };
+
+ btf_dump_one_string(ctx, embed, 4, "\"fo\"");
+}
+
+static void test_btf_dump_string_data(void)
+{
+ struct test_ctx t = {};
+ char str[STRSIZE];
+ struct btf_dump *d;
+ DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
+ struct btf_dump_string_ctx ctx;
+ int char_id, int_id, array_id;
+
+ if (test_ctx__init(&t))
+ return;
+
+ d = btf_dump__new(t.btf, btf_dump_snprintf, str, NULL);
+ if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
+ return;
+
+ /* Generate BTF for a four-element char array. */
+ char_id = btf__add_int(t.btf, "char", 1, BTF_INT_CHAR);
+ ASSERT_EQ(char_id, 1, "char_id");
+ int_id = btf__add_int(t.btf, "int", 4, BTF_INT_SIGNED);
+ ASSERT_EQ(int_id, 2, "int_id");
+ array_id = btf__add_array(t.btf, int_id, char_id, 4);
+ ASSERT_EQ(array_id, 3, "array_id");
+
+ ctx.btf = t.btf;
+ ctx.d = d;
+ ctx.str = str;
+ ctx.opts = &opts;
+ ctx.array_id = array_id;
+
+ btf_dump_strings(&ctx);
+
+ btf_dump__free(d);
+ test_ctx__free(&t);
+}
+
+static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
+ const char *name, const char *expected_val,
+ void *data, size_t data_sz)
+{
+ DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
+ int ret = 0, cmp;
+ size_t secsize;
+ __s32 type_id;
+
+ opts.compact = true;
+
+ type_id = btf__find_by_name(btf, name);
+ if (!ASSERT_GT(type_id, 0, "find type id"))
+ return;
+
+ secsize = btf__resolve_size(btf, type_id);
+ ASSERT_EQ(secsize, 0, "verify section size");
+
+ str[0] = '\0';
+ ret = btf_dump__dump_type_data(d, type_id, data, data_sz, &opts);
+ ASSERT_EQ(ret, 0, "unexpected return value");
+
+ cmp = strcmp(str, expected_val);
+ ASSERT_EQ(cmp, 0, "ensure expected/actual match");
+}
+
+static void test_btf_dump_datasec_data(char *str)
+{
+ struct btf *btf;
+ char license[4] = "GPL";
+ struct btf_dump *d;
+
+ btf = btf__parse("xdping_kern.bpf.o", NULL);
+ if (!ASSERT_OK_PTR(btf, "xdping_kern.bpf.o BTF not found"))
+ return;
+
+ d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
+ if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
+ goto out;
+
+ test_btf_datasec(btf, d, str, "license",
+ "SEC(\"license\") char[4] _license = (char[4])['G','P','L',];",
+ license, sizeof(license));
+out:
btf_dump__free(d);
btf__free(btf);
}
void test_btf_dump() {
+ char str[STRSIZE];
+ struct btf_dump *d;
+ struct btf *btf;
int i;
for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) {
@@ -245,4 +1059,38 @@ void test_btf_dump() {
}
if (test__start_subtest("btf_dump: incremental"))
test_btf_dump_incremental();
+
+ if (test__start_subtest("btf_dump: type_tags"))
+ test_btf_dump_type_tags();
+
+ btf = libbpf_find_kernel_btf();
+ if (!ASSERT_OK_PTR(btf, "no kernel BTF found"))
+ return;
+
+ d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
+ if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
+ return;
+
+ /* Verify type display for various types. */
+ if (test__start_subtest("btf_dump: int_data"))
+ test_btf_dump_int_data(btf, d, str);
+ if (test__start_subtest("btf_dump: float_data"))
+ test_btf_dump_float_data(btf, d, str);
+ if (test__start_subtest("btf_dump: char_data"))
+ test_btf_dump_char_data(btf, d, str);
+ if (test__start_subtest("btf_dump: typedef_data"))
+ test_btf_dump_typedef_data(btf, d, str);
+ if (test__start_subtest("btf_dump: enum_data"))
+ test_btf_dump_enum_data(btf, d, str);
+ if (test__start_subtest("btf_dump: struct_data"))
+ test_btf_dump_struct_data(btf, d, str);
+ if (test__start_subtest("btf_dump: var_data"))
+ test_btf_dump_var_data(btf, d, str);
+ if (test__start_subtest("btf_dump: string_data"))
+ test_btf_dump_string_data();
+ btf_dump__free(d);
+ btf__free(btf);
+
+ if (test__start_subtest("btf_dump: datasec_data"))
+ test_btf_dump_datasec_data(str);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
index 8c52d72c876e..5b9f84dbeb43 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_endian.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
@@ -6,15 +6,13 @@
#include <test_progs.h>
#include <bpf/btf.h>
-static int duration = 0;
-
void test_btf_endian() {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
enum btf_endianness endian = BTF_LITTLE_ENDIAN;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
enum btf_endianness endian = BTF_BIG_ENDIAN;
#else
-#error "Unrecognized __BYTE_ORDER"
+#error "Unrecognized __BYTE_ORDER__"
#endif
enum btf_endianness swap_endian = 1 - endian;
struct btf *btf = NULL, *swap_btf = NULL;
@@ -25,7 +23,7 @@ void test_btf_endian() {
int var_id;
/* Load BTF in native endianness */
- btf = btf__parse_elf("btf_dump_test_case_syntax.o", NULL);
+ btf = btf__parse_elf("btf_dump_test_case_syntax.bpf.o", NULL);
if (!ASSERT_OK_PTR(btf, "parse_native_btf"))
goto err_out;
@@ -34,7 +32,7 @@ void test_btf_endian() {
ASSERT_EQ(btf__endianness(btf), swap_endian, "endian");
/* Get raw BTF data in non-native endianness... */
- raw_data = btf__get_raw_data(btf, &raw_sz);
+ raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
goto err_out;
@@ -44,9 +42,9 @@ void test_btf_endian() {
goto err_out;
ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
- ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types");
+ ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types");
- swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz);
+ swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz);
if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
goto err_out;
@@ -60,7 +58,7 @@ void test_btf_endian() {
/* swap it back to native endianness */
btf__set_endianness(swap_btf, endian);
- swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz);
+ swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz);
if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
goto err_out;
@@ -71,13 +69,13 @@ void test_btf_endian() {
/* now modify original BTF */
var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1);
- CHECK(var_id <= 0, "var_id", "failed %d\n", var_id);
+ ASSERT_GT(var_id, 0, "var_id");
btf__free(swap_btf);
swap_btf = NULL;
btf__set_endianness(btf, swap_endian);
- raw_data = btf__get_raw_data(btf, &raw_sz);
+ raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
goto err_out;
@@ -87,7 +85,7 @@ void test_btf_endian() {
goto err_out;
ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
- ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types");
+ ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types");
/* the type should appear as if it was stored in native endianness */
t = btf__type_by_id(swap_btf, var_id);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
new file mode 100644
index 000000000000..32159d3eb281
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "btf_helpers.h"
+#include "bpf/libbpf_internal.h"
+
+struct field_data {
+ __u32 ids[5];
+ const char *strs[5];
+} fields[] = {
+ { .ids = {}, .strs = {} },
+ { .ids = {}, .strs = { "int" } },
+ { .ids = {}, .strs = { "int64" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 2, 1 }, .strs = { "" } },
+ { .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } },
+ { .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } },
+ { .ids = {}, .strs = { "e1", "v1", "v2" } },
+ { .ids = {}, .strs = { "fw1" } },
+ { .ids = { 1 }, .strs = { "t" } },
+ { .ids = { 2 }, .strs = { "" } },
+ { .ids = { 1 }, .strs = { "" } },
+ { .ids = { 3 }, .strs = { "" } },
+ { .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } },
+ { .ids = { 13 }, .strs = { "func" } },
+ { .ids = { 1 }, .strs = { "var1" } },
+ { .ids = { 3 }, .strs = { "var2" } },
+ { .ids = {}, .strs = { "float" } },
+ { .ids = { 11 }, .strs = { "decltag" } },
+ { .ids = { 6 }, .strs = { "typetag" } },
+ { .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } },
+ { .ids = { 15, 16 }, .strs = { "datasec1" } }
+
+};
+
+/* Fabricate BTF with various types and check BTF field iteration finds types,
+ * strings expected.
+ */
+void test_btf_field_iter(void)
+{
+ struct btf *btf = NULL;
+ int id;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+
+ btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */
+ btf__add_ptr(btf, 1); /* [3] int * */
+ btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */
+ btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */
+ btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */
+ btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */
+ /* } */
+ btf__add_union(btf, "u1", 12); /* [6] union u1 { */
+ btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */
+ btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */
+ /* } */
+ btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */
+ btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */
+ btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */
+ /* } */
+
+ btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */
+ btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */
+ btf__add_volatile(btf, 2); /* [10] volatile int64; */
+ btf__add_const(btf, 1); /* [11] const int; */
+ btf__add_restrict(btf, 3); /* [12] restrict int *; */
+ btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */
+ btf__add_func_param(btf, "p1", 1);
+ btf__add_func_param(btf, "p2", 3);
+
+ btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */
+ btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */
+ btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */
+ btf__add_float(btf, "float", 4); /* [17] float; */
+ btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */
+ btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */
+ btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */
+ btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */
+ btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */
+ btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */
+ /* } */
+ btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */
+ btf__add_datasec_var_info(btf, 15, 0, 4);
+ btf__add_datasec_var_info(btf, 16, 4, 8);
+
+ VALIDATE_RAW_BTF(
+ btf,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
+ "[3] PTR '(anon)' type_id=1",
+ "[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3",
+ "[5] STRUCT 's1' size=12 vlen=2\n"
+ "\t'f1' type_id=3 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=0",
+ "[6] UNION 'u1' size=12 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=5 bits_offset=0",
+ "[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[8] FWD 'fw1' fwd_kind=struct",
+ "[9] TYPEDEF 't' type_id=1",
+ "[10] VOLATILE '(anon)' type_id=2",
+ "[11] CONST '(anon)' type_id=1",
+ "[12] RESTRICT '(anon)' type_id=3",
+ "[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
+ "\t'p1' type_id=1\n"
+ "\t'p2' type_id=3",
+ "[14] FUNC 'func' type_id=13 linkage=global",
+ "[15] VAR 'var1' type_id=1, linkage=static",
+ "[16] VAR 'var2' type_id=3, linkage=static",
+ "[17] FLOAT 'float' size=4",
+ "[18] DECL_TAG 'decltag' type_id=11 component_idx=-1",
+ "[19] TYPE_TAG 'typetag' type_id=6",
+ "[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n"
+ "\t'eval1' val=1000\n"
+ "\t'eval2' val=2000\n"
+ "\t'eval3' val=3000",
+ "[21] DATASEC 'datasec1' size=12 vlen=2\n"
+ "\ttype_id=15 offset=0 size=4\n"
+ "\ttype_id=16 offset=4 size=8");
+
+ for (id = 1; id < btf__type_cnt(btf); id++) {
+ struct btf_type *t = btf_type_by_id(btf, id);
+ struct btf_field_iter it_strs, it_ids;
+ int str_idx = 0, id_idx = 0;
+ __u32 *next_str, *next_id;
+
+ if (!ASSERT_OK_PTR(t, "btf_type_by_id"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS),
+ "iter_init_strs"))
+ break;
+ if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS),
+ "iter_init_ids"))
+ break;
+ while ((next_str = btf_field_iter_next(&it_strs))) {
+ const char *str = btf__str_by_offset(btf, *next_str);
+
+ if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match"))
+ break;
+ str_idx++;
+ }
+ /* ensure no more strings are expected */
+ ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt");
+
+ while ((next_id = btf_field_iter_next(&it_ids))) {
+ if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match"))
+ break;
+ id_idx++;
+ }
+ /* ensure no more ids are expected */
+ ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt");
+ }
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index 76ebe4c250f1..f66ceccd7029 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -14,51 +14,18 @@ static __u32 bpf_map_id(struct bpf_map *map)
int err;
memset(&info, 0, info_len);
- err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
+ err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
if (err)
return 0;
return info.id;
}
-/*
- * Trigger synchronize_rcu() in kernel.
- *
- * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
- * if looking up an existing non-NULL element or updating the map with a valid
- * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
- * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
- * cleanup. At the end, at least one synchronize_rcu() would be called.
- */
-static int kern_sync_rcu(void)
-{
- int inner_map_fd, outer_map_fd, err, zero = 0;
-
- inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
- if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
- return -1;
-
- outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
- sizeof(int), inner_map_fd, 1, 0);
- if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
- close(inner_map_fd);
- return -1;
- }
-
- err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
- if (err)
- err = -errno;
- CHECK(err, "outer_map_update", "failed %d\n", err);
- close(inner_map_fd);
- close(outer_map_fd);
- return err;
-}
-
static void test_lookup_update(void)
{
int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
struct test_btf_map_in_map *skel;
- int err, key = 0, val, i, fd;
+ int err, key = 0, val, i;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -135,30 +102,6 @@ static void test_lookup_update(void)
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
- test_btf_map_in_map__destroy(skel);
- skel = NULL;
-
- /* we need to either wait for or force synchronize_rcu(), before
- * checking for "still exists" condition, otherwise map could still be
- * resolvable by ID, causing false positives.
- *
- * Older kernels (5.8 and earlier) freed map only after two
- * synchronize_rcu()s, so trigger two, to be entirely sure.
- */
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
-
- fd = bpf_map_get_fd_by_id(map1_id);
- if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
- fd = bpf_map_get_fd_by_id(map2_id);
- if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
-
cleanup:
test_btf_map_in_map__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_module.c b/tools/testing/selftests/bpf/prog_tests/btf_module.c
new file mode 100644
index 000000000000..2239d1fe0332
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_module.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+static const char *module_name = "bpf_testmod";
+static const char *symbol_name = "bpf_testmod_test_read";
+
+void test_btf_module()
+{
+ struct btf *vmlinux_btf, *module_btf;
+ __s32 type_id;
+
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
+ return;
+
+ module_btf = btf__load_module_btf(module_name, vmlinux_btf);
+ if (!ASSERT_OK_PTR(module_btf, "could not load module BTF"))
+ goto cleanup;
+
+ type_id = btf__find_by_name(module_btf, symbol_name);
+ ASSERT_GT(type_id, 0, "func not found");
+
+cleanup:
+ btf__free(module_btf);
+ btf__free(vmlinux_btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
index 762f6a9da8b5..cf15cc3be491 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
@@ -9,6 +9,7 @@
#include <string.h>
#include <errno.h>
#include <sched.h>
+#include <net/if.h>
#include <linux/compiler.h>
#include <bpf/libbpf.h>
@@ -16,50 +17,37 @@
#include "test_progs.h"
#include "test_btf_skc_cls_ingress.skel.h"
-static struct test_btf_skc_cls_ingress *skel;
-static struct sockaddr_in6 srv_sa6;
-static __u32 duration;
+#define TEST_NS "skc_cls_ingress"
-#define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress"
+#define BIT(n) (1 << (n))
+#define TEST_MODE_IPV4 BIT(0)
+#define TEST_MODE_IPV6 BIT(1)
+#define TEST_MODE_DUAL (TEST_MODE_IPV4 | TEST_MODE_IPV6)
-static int write_sysctl(const char *sysctl, const char *value)
-{
- int fd, err, len;
-
- fd = open(sysctl, O_WRONLY);
- if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
- sysctl, strerror(errno), errno))
- return -1;
-
- len = strlen(value);
- err = write(fd, value, len);
- close(fd);
- if (CHECK(err != len, "write sysctl",
- "write(%s, %s, %d): err:%d %s (%d)\n",
- sysctl, value, len, err, strerror(errno), errno))
- return -1;
-
- return 0;
-}
+#define SERVER_ADDR_IPV4 "127.0.0.1"
+#define SERVER_ADDR_IPV6 "::1"
+#define SERVER_ADDR_DUAL "::0"
+/* RFC791, 576 for minimal IPv4 datagram, minus 40 bytes of TCP header */
+#define MIN_IPV4_MSS 536
-static int prepare_netns(void)
+static struct netns_obj *prepare_netns(struct test_btf_skc_cls_ingress *skel)
{
- if (CHECK(unshare(CLONE_NEWNET), "create netns",
- "unshare(CLONE_NEWNET): %s (%d)",
- strerror(errno), errno))
- return -1;
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach,
+ .prog_fd = bpf_program__fd(skel->progs.cls_ingress));
+ struct netns_obj *ns = NULL;
- if (CHECK(system("ip link set dev lo up"),
- "ip link set dev lo up", "failed\n"))
- return -1;
+ ns = netns_new(TEST_NS, true);
+ if (!ASSERT_OK_PTR(ns, "create and join netns"))
+ return ns;
- if (CHECK(system("tc qdisc add dev lo clsact"),
- "tc qdisc add dev lo clsact", "failed\n"))
- return -1;
+ qdisc_lo.ifindex = if_nametoindex("lo");
+ if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact"))
+ goto free_ns;
- if (CHECK(system("tc filter add dev lo ingress bpf direct-action object-pinned " PROG_PIN_FILE),
- "install tc cls-prog at ingress", "failed\n"))
- return -1;
+ if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach),
+ "filter add dev lo ingress"))
+ goto free_ns;
/* Ensure 20 bytes options (i.e. in total 40 bytes tcp header) for the
* bpf_tcp_gen_syncookie() helper.
@@ -67,168 +55,221 @@ static int prepare_netns(void)
if (write_sysctl("/proc/sys/net/ipv4/tcp_window_scaling", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_timestamps", "1") ||
write_sysctl("/proc/sys/net/ipv4/tcp_sack", "1"))
- return -1;
+ goto free_ns;
- return 0;
+ return ns;
+
+free_ns:
+ netns_free(ns);
+ return NULL;
}
-static void reset_test(void)
+static void reset_test(struct test_btf_skc_cls_ingress *skel)
{
+ memset(&skel->bss->srv_sa4, 0, sizeof(skel->bss->srv_sa4));
memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6));
skel->bss->listen_tp_sport = 0;
skel->bss->req_sk_sport = 0;
skel->bss->recv_cookie = 0;
skel->bss->gen_cookie = 0;
skel->bss->linum = 0;
+ skel->bss->mss = 0;
}
-static void print_err_line(void)
+static void print_err_line(struct test_btf_skc_cls_ingress *skel)
{
if (skel->bss->linum)
printf("bpf prog error at line %u\n", skel->bss->linum);
}
-static void test_conn(void)
+static int v6only_true(int fd, void *opts)
+{
+ int mode = true;
+
+ return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode));
+}
+
+static int v6only_false(int fd, void *opts)
{
- int listen_fd = -1, cli_fd = -1, err;
- socklen_t addrlen = sizeof(srv_sa6);
+ int mode = false;
+
+ return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode));
+}
+
+static void run_test(struct test_btf_skc_cls_ingress *skel, bool gen_cookies,
+ int ip_mode)
+{
+ const char *tcp_syncookies = gen_cookies ? "2" : "1";
+ int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
+ struct network_helper_opts opts = { 0 };
+ struct sockaddr_storage *addr;
+ struct sockaddr_in6 srv_sa6;
+ struct sockaddr_in srv_sa4;
+ socklen_t addr_len;
+ int sock_family;
+ char *srv_addr;
int srv_port;
- if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
+ switch (ip_mode) {
+ case TEST_MODE_IPV4:
+ sock_family = AF_INET;
+ srv_addr = SERVER_ADDR_IPV4;
+ addr = (struct sockaddr_storage *)&srv_sa4;
+ addr_len = sizeof(srv_sa4);
+ break;
+ case TEST_MODE_IPV6:
+ opts.post_socket_cb = v6only_true;
+ sock_family = AF_INET6;
+ srv_addr = SERVER_ADDR_IPV6;
+ addr = (struct sockaddr_storage *)&srv_sa6;
+ addr_len = sizeof(srv_sa6);
+ break;
+ case TEST_MODE_DUAL:
+ opts.post_socket_cb = v6only_false;
+ sock_family = AF_INET6;
+ srv_addr = SERVER_ADDR_DUAL;
+ addr = (struct sockaddr_storage *)&srv_sa6;
+ addr_len = sizeof(srv_sa6);
+ break;
+ default:
+ PRINT_FAIL("Unknown IP mode %d", ip_mode);
return;
+ }
- listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
- if (CHECK_FAIL(listen_fd == -1))
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", tcp_syncookies))
return;
- err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
- if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
- errno))
+ listen_fd = start_server_str(sock_family, SOCK_STREAM, srv_addr, 0,
+ &opts);
+ if (!ASSERT_OK_FD(listen_fd, "start server"))
+ return;
+
+ err = getsockname(listen_fd, (struct sockaddr *)addr, &addr_len);
+ if (!ASSERT_OK(err, "getsockname(listen_fd)"))
goto done;
- memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
- srv_port = ntohs(srv_sa6.sin6_port);
- cli_fd = connect_to_fd(listen_fd, 0);
- if (CHECK_FAIL(cli_fd == -1))
+ switch (ip_mode) {
+ case TEST_MODE_IPV4:
+ memcpy(&skel->bss->srv_sa4, &srv_sa4, sizeof(srv_sa4));
+ srv_port = ntohs(srv_sa4.sin_port);
+ break;
+ case TEST_MODE_IPV6:
+ case TEST_MODE_DUAL:
+ memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
+ srv_port = ntohs(srv_sa6.sin6_port);
+ break;
+ default:
goto done;
+ }
- if (CHECK(skel->bss->listen_tp_sport != srv_port ||
- skel->bss->req_sk_sport != srv_port,
- "Unexpected sk src port",
- "listen_tp_sport:%u req_sk_sport:%u expected:%u\n",
- skel->bss->listen_tp_sport, skel->bss->req_sk_sport,
- srv_port))
+ cli_fd = connect_to_fd(listen_fd, 0);
+ if (!ASSERT_OK_FD(cli_fd, "connect client"))
goto done;
- if (CHECK(skel->bss->gen_cookie || skel->bss->recv_cookie,
- "Unexpected syncookie states",
- "gen_cookie:%u recv_cookie:%u\n",
- skel->bss->gen_cookie, skel->bss->recv_cookie))
+ srv_fd = accept(listen_fd, NULL, NULL);
+ if (!ASSERT_OK_FD(srv_fd, "accept connection"))
goto done;
- CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
- skel->bss->linum);
+ ASSERT_EQ(skel->bss->listen_tp_sport, srv_port, "listen tp src port");
+
+ if (!gen_cookies) {
+ ASSERT_EQ(skel->bss->req_sk_sport, srv_port,
+ "request socket source port with syncookies disabled");
+ ASSERT_EQ(skel->bss->gen_cookie, 0,
+ "generated syncookie with syncookies disabled");
+ ASSERT_EQ(skel->bss->recv_cookie, 0,
+ "received syncookie with syncookies disabled");
+ } else {
+ ASSERT_EQ(skel->bss->req_sk_sport, 0,
+ "request socket source port with syncookies enabled");
+ ASSERT_NEQ(skel->bss->gen_cookie, 0,
+ "syncookie properly generated");
+ ASSERT_EQ(skel->bss->gen_cookie, skel->bss->recv_cookie,
+ "matching syncookies on client and server");
+ ASSERT_GT(skel->bss->mss, MIN_IPV4_MSS,
+ "MSS in cookie min value");
+ ASSERT_LT(skel->bss->mss, USHRT_MAX,
+ "MSS in cookie max value");
+ }
done:
if (listen_fd != -1)
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
}
-static void test_syncookie(void)
+static void test_conn_ipv4(struct test_btf_skc_cls_ingress *skel)
{
- int listen_fd = -1, cli_fd = -1, err;
- socklen_t addrlen = sizeof(srv_sa6);
- int srv_port;
-
- /* Enforce syncookie mode */
- if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
- return;
-
- listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
- if (CHECK_FAIL(listen_fd == -1))
- return;
-
- err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
- if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
- errno))
- goto done;
- memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
- srv_port = ntohs(srv_sa6.sin6_port);
-
- cli_fd = connect_to_fd(listen_fd, 0);
- if (CHECK_FAIL(cli_fd == -1))
- goto done;
+ run_test(skel, false, TEST_MODE_IPV4);
+}
- if (CHECK(skel->bss->listen_tp_sport != srv_port,
- "Unexpected tp src port",
- "listen_tp_sport:%u expected:%u\n",
- skel->bss->listen_tp_sport, srv_port))
- goto done;
+static void test_conn_ipv6(struct test_btf_skc_cls_ingress *skel)
+{
+ run_test(skel, false, TEST_MODE_IPV6);
+}
- if (CHECK(skel->bss->req_sk_sport,
- "Unexpected req_sk src port",
- "req_sk_sport:%u expected:0\n",
- skel->bss->req_sk_sport))
- goto done;
+static void test_conn_dual(struct test_btf_skc_cls_ingress *skel)
+{
+ run_test(skel, false, TEST_MODE_DUAL);
+}
- if (CHECK(!skel->bss->gen_cookie ||
- skel->bss->gen_cookie != skel->bss->recv_cookie,
- "Unexpected syncookie states",
- "gen_cookie:%u recv_cookie:%u\n",
- skel->bss->gen_cookie, skel->bss->recv_cookie))
- goto done;
+static void test_syncookie_ipv4(struct test_btf_skc_cls_ingress *skel)
+{
+ run_test(skel, true, TEST_MODE_IPV4);
+}
- CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
- skel->bss->linum);
+static void test_syncookie_ipv6(struct test_btf_skc_cls_ingress *skel)
+{
+ run_test(skel, true, TEST_MODE_IPV6);
+}
-done:
- if (listen_fd != -1)
- close(listen_fd);
- if (cli_fd != -1)
- close(cli_fd);
+static void test_syncookie_dual(struct test_btf_skc_cls_ingress *skel)
+{
+ run_test(skel, true, TEST_MODE_DUAL);
}
struct test {
const char *desc;
- void (*run)(void);
+ void (*run)(struct test_btf_skc_cls_ingress *skel);
};
#define DEF_TEST(name) { #name, test_##name }
static struct test tests[] = {
- DEF_TEST(conn),
- DEF_TEST(syncookie),
+ DEF_TEST(conn_ipv4),
+ DEF_TEST(conn_ipv6),
+ DEF_TEST(conn_dual),
+ DEF_TEST(syncookie_ipv4),
+ DEF_TEST(syncookie_ipv6),
+ DEF_TEST(syncookie_dual),
};
void test_btf_skc_cls_ingress(void)
{
- int i, err;
+ struct test_btf_skc_cls_ingress *skel;
+ struct netns_obj *ns;
+ int i;
skel = test_btf_skc_cls_ingress__open_and_load();
- if (CHECK(!skel, "test_btf_skc_cls_ingress__open_and_load", "failed\n"))
+ if (!ASSERT_OK_PTR(skel, "test_btf_skc_cls_ingress__open_and_load"))
return;
- err = bpf_program__pin(skel->progs.cls_ingress, PROG_PIN_FILE);
- if (CHECK(err, "bpf_program__pin",
- "cannot pin bpf prog to %s. err:%d\n", PROG_PIN_FILE, err)) {
- test_btf_skc_cls_ingress__destroy(skel);
- return;
- }
-
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
- if (prepare_netns())
+ ns = prepare_netns(skel);
+ if (!ns)
break;
- tests[i].run();
+ tests[i].run(skel);
- print_err_line();
- reset_test();
+ print_err_line(skel);
+ reset_test(skel);
+ netns_free(ns);
}
- bpf_program__unpin(skel->progs.cls_ingress, PROG_PIN_FILE);
test_btf_skc_cls_ingress__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c
index ca7c2a91610a..2d47cad50a51 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c
@@ -12,11 +12,45 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
vfprintf(ctx, fmt, args);
}
-void test_btf_split() {
- struct btf_dump_opts opts;
+/* Write raw BTF to file, return number of bytes written or negative errno */
+static ssize_t btf_raw_write(struct btf *btf, char *file)
+{
+ ssize_t written = 0;
+ const void *data;
+ __u32 size = 0;
+ int fd, ret;
+
+ fd = mkstemp(file);
+ if (!ASSERT_GE(fd, 0, "create_file"))
+ return -errno;
+
+ data = btf__raw_data(btf, &size);
+ if (!ASSERT_OK_PTR(data, "btf__raw_data")) {
+ close(fd);
+ return -EINVAL;
+ }
+ while (written < size) {
+ ret = write(fd, data + written, size - written);
+ if (!ASSERT_GE(ret, 0, "write succeeded")) {
+ close(fd);
+ return -errno;
+ }
+ written += ret;
+ }
+ close(fd);
+ return written;
+}
+
+static void __test_btf_split(bool multi)
+{
+ char multisplit_btf_file[] = "/tmp/test_btf_multisplit.XXXXXX";
+ char split_btf_file[] = "/tmp/test_btf_split.XXXXXX";
+ char base_btf_file[] = "/tmp/test_btf_base.XXXXXX";
+ ssize_t multisplit_btf_sz = 0, split_btf_sz = 0, base_btf_sz = 0;
struct btf_dump *d = NULL;
- const struct btf_type *t;
- struct btf *btf1, *btf2;
+ const struct btf_type *t, *ot;
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL;
+ struct btf *btf4 = NULL, *btf5 = NULL, *btf6 = NULL;
int str_off, i, err;
btf1 = btf__new_empty();
@@ -64,15 +98,46 @@ void test_btf_split() {
ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen");
ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name");
+ if (multi) {
+ btf3 = btf__new_empty_split(btf2);
+ if (!ASSERT_OK_PTR(btf3, "multi_split_btf"))
+ goto cleanup;
+ } else {
+ btf3 = btf2;
+ }
+
+ btf__add_union(btf3, "u1", 16); /* [5] union u1 { */
+ btf__add_field(btf3, "f1", 4, 0, 0); /* struct s2 f1; */
+ btf__add_field(btf3, "uf2", 1, 0, 0); /* int f2; */
+ /* } */
+
+ if (multi) {
+ t = btf__type_by_id(btf2, 5);
+ ASSERT_NULL(t, "multisplit_type_in_first_split");
+ }
+
+ t = btf__type_by_id(btf3, 5);
+ if (!ASSERT_OK_PTR(t, "split_union_type"))
+ goto cleanup;
+ ASSERT_EQ(btf_is_union(t), true, "split_union_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "split_union_vlen");
+ ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "u1", "split_union_name");
+ ASSERT_EQ(btf__type_cnt(btf3), 6, "split_type_cnt");
+
+ t = btf__type_by_id(btf3, 1);
+ if (!ASSERT_OK_PTR(t, "split_base_type"))
+ goto cleanup;
+ ASSERT_EQ(btf_is_int(t), true, "split_base_int");
+ ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "int", "split_base_type_name");
+
/* BTF-to-C dump of split BTF */
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
return;
- opts.ctx = dump_buf_file;
- d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf);
+ d = btf_dump__new(btf3, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK_PTR(d, "btf_dump__new"))
goto cleanup;
- for (i = 1; i <= btf__get_nr_types(btf2); i++) {
+ for (i = 1; i < btf__type_cnt(btf3); i++) {
err = btf_dump__dump_type(d, i);
ASSERT_OK(err, "dump_type_ok");
}
@@ -81,14 +146,56 @@ void test_btf_split() {
ASSERT_STREQ(dump_buf,
"struct s1 {\n"
" int f1;\n"
-"};\n"
-"\n"
+"};\n\n"
"struct s2 {\n"
" struct s1 f1;\n"
" int f2;\n"
" int *f3;\n"
+"};\n\n"
+"union u1 {\n"
+" struct s2 f1;\n"
+" int uf2;\n"
"};\n\n", "c_dump");
+ /* write base, split BTFs to files and ensure parsing succeeds */
+ base_btf_sz = btf_raw_write(btf1, base_btf_file);
+ if (base_btf_sz < 0)
+ goto cleanup;
+ split_btf_sz = btf_raw_write(btf2, split_btf_file);
+ if (split_btf_sz < 0)
+ goto cleanup;
+ btf4 = btf__parse(base_btf_file, NULL);
+ if (!ASSERT_OK_PTR(btf4, "parse_base"))
+ goto cleanup;
+ btf5 = btf__parse_split(split_btf_file, btf4);
+ if (!ASSERT_OK_PTR(btf5, "parse_split"))
+ goto cleanup;
+ if (multi) {
+ multisplit_btf_sz = btf_raw_write(btf3, multisplit_btf_file);
+ if (multisplit_btf_sz < 0)
+ goto cleanup;
+ btf6 = btf__parse_split(multisplit_btf_file, btf5);
+ if (!ASSERT_OK_PTR(btf6, "parse_multisplit"))
+ goto cleanup;
+ } else {
+ btf6 = btf5;
+ }
+
+ if (!ASSERT_EQ(btf__type_cnt(btf3), btf__type_cnt(btf6), "cmp_type_cnt"))
+ goto cleanup;
+
+ /* compare parsed to original BTF */
+ for (i = 1; i < btf__type_cnt(btf6); i++) {
+ t = btf__type_by_id(btf6, i);
+ if (!ASSERT_OK_PTR(t, "type_in_parsed_btf"))
+ goto cleanup;
+ ot = btf__type_by_id(btf3, i);
+ if (!ASSERT_OK_PTR(ot, "type_in_orig_btf"))
+ goto cleanup;
+ if (!ASSERT_EQ(memcmp(t, ot, sizeof(*ot)), 0, "cmp_parsed_orig_btf"))
+ goto cleanup;
+ }
+
cleanup:
if (dump_buf_file)
fclose(dump_buf_file);
@@ -96,4 +203,24 @@ cleanup:
btf_dump__free(d);
btf__free(btf1);
btf__free(btf2);
+ if (btf2 != btf3)
+ btf__free(btf3);
+ btf__free(btf4);
+ btf__free(btf5);
+ if (btf5 != btf6)
+ btf__free(btf6);
+ if (base_btf_sz > 0)
+ unlink(base_btf_file);
+ if (split_btf_sz > 0)
+ unlink(split_btf_file);
+ if (multisplit_btf_sz > 0)
+ unlink(multisplit_btf_file);
+}
+
+void test_btf_split(void)
+{
+ if (test__start_subtest("single_split"))
+ __test_btf_split(false);
+ if (test__start_subtest("multi_split"))
+ __test_btf_split(true);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c
new file mode 100644
index 000000000000..3923e64c4c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2025 Isovalent */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+static void test_btf_mmap_sysfs(const char *path, struct btf *base)
+{
+ struct stat st;
+ __u64 btf_size, end;
+ void *raw_data = NULL;
+ int fd = -1;
+ long page_size;
+ struct btf *btf = NULL;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ if (!ASSERT_GE(page_size, 0, "get_page_size"))
+ goto cleanup;
+
+ if (!ASSERT_OK(stat(path, &st), "stat_btf"))
+ goto cleanup;
+
+ btf_size = st.st_size;
+ end = (btf_size + page_size - 1) / page_size * page_size;
+
+ fd = open(path, O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "open_btf"))
+ goto cleanup;
+
+ raw_data = mmap(NULL, btf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_writable"))
+ goto cleanup;
+
+ raw_data = mmap(NULL, btf_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_shared"))
+ goto cleanup;
+
+ raw_data = mmap(NULL, end + 1, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_invalid_size"))
+ goto cleanup;
+
+ raw_data = mmap(NULL, end, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (!ASSERT_OK_PTR(raw_data, "mmap_btf"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_WRITE), -1,
+ "mprotect_writable"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_EXEC), -1,
+ "mprotect_executable"))
+ goto cleanup;
+
+ /* Check padding is zeroed */
+ for (int i = btf_size; i < end; i++) {
+ if (((__u8 *)raw_data)[i] != 0) {
+ PRINT_FAIL("tail of BTF is not zero at page offset %d\n", i);
+ goto cleanup;
+ }
+ }
+
+ btf = btf__new_split(raw_data, btf_size, base);
+ if (!ASSERT_OK_PTR(btf, "parse_btf"))
+ goto cleanup;
+
+cleanup:
+ btf__free(btf);
+ if (raw_data && raw_data != MAP_FAILED)
+ munmap(raw_data, btf_size);
+ if (fd >= 0)
+ close(fd);
+}
+
+void test_btf_sysfs(void)
+{
+ test_btf_mmap_sysfs("/sys/kernel/btf/vmlinux", NULL);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
new file mode 100644
index 000000000000..071430cd54de
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "test_btf_decl_tag.skel.h"
+
+/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */
+struct btf_type_tag_test {
+ int **p;
+};
+#include "btf_type_tag.skel.h"
+#include "btf_type_tag_user.skel.h"
+#include "btf_type_tag_percpu.skel.h"
+
+static void test_btf_decl_tag(void)
+{
+ struct test_btf_decl_tag *skel;
+
+ skel = test_btf_decl_tag__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))
+ return;
+
+ if (skel->rodata->skip_tests) {
+ printf("%s:SKIP: btf_decl_tag attribute not supported", __func__);
+ test__skip();
+ }
+
+ test_btf_decl_tag__destroy(skel);
+}
+
+static void test_btf_type_tag(void)
+{
+ struct btf_type_tag *skel;
+
+ skel = btf_type_tag__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag"))
+ return;
+
+ if (skel->rodata->skip_tests) {
+ printf("%s:SKIP: btf_type_tag attribute not supported", __func__);
+ test__skip();
+ }
+
+ btf_type_tag__destroy(skel);
+}
+
+/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as
+ * module_btf, it will not load module btf.
+ *
+ * Returns 0 on success.
+ * Return -1 On error. In case of error, the loaded btf will be freed and the
+ * input parameters will be set to pointing to NULL.
+ */
+static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf,
+ bool needs_vmlinux_tag)
+{
+ const char *module_name = "bpf_testmod";
+ __s32 type_id;
+
+ if (!env.has_testmod) {
+ test__skip();
+ return -1;
+ }
+
+ *vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF"))
+ return -1;
+
+ if (!needs_vmlinux_tag)
+ goto load_module_btf;
+
+ /* skip the test if the vmlinux does not have __user tags */
+ type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
+ if (type_id <= 0) {
+ printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
+ test__skip();
+ goto free_vmlinux_btf;
+ }
+
+load_module_btf:
+ /* skip loading module_btf, if not requested by caller */
+ if (!module_btf)
+ return 0;
+
+ *module_btf = btf__load_module_btf(module_name, *vmlinux_btf);
+ if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF"))
+ goto free_vmlinux_btf;
+
+ /* skip the test if the module does not have __user tags */
+ type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG);
+ if (type_id <= 0) {
+ printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
+ test__skip();
+ goto free_module_btf;
+ }
+
+ return 0;
+
+free_module_btf:
+ btf__free(*module_btf);
+free_vmlinux_btf:
+ btf__free(*vmlinux_btf);
+
+ *vmlinux_btf = NULL;
+ if (module_btf)
+ *module_btf = NULL;
+ return -1;
+}
+
+static void test_btf_type_tag_mod_user(bool load_test_user1)
+{
+ struct btf *vmlinux_btf = NULL, *module_btf = NULL;
+ struct btf_type_tag_user *skel;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
+ return;
+
+ skel = btf_type_tag_user__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
+ if (load_test_user1)
+ bpf_program__set_autoload(skel->progs.test_user2, false);
+ else
+ bpf_program__set_autoload(skel->progs.test_user1, false);
+
+ err = btf_type_tag_user__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_user");
+
+ btf_type_tag_user__destroy(skel);
+
+cleanup:
+ btf__free(module_btf);
+ btf__free(vmlinux_btf);
+}
+
+static void test_btf_type_tag_vmlinux_user(void)
+{
+ struct btf_type_tag_user *skel;
+ struct btf *vmlinux_btf = NULL;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
+ return;
+
+ skel = btf_type_tag_user__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_user2, false);
+ bpf_program__set_autoload(skel->progs.test_user1, false);
+
+ err = btf_type_tag_user__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_user");
+
+ btf_type_tag_user__destroy(skel);
+
+cleanup:
+ btf__free(vmlinux_btf);
+}
+
+static void test_btf_type_tag_mod_percpu(bool load_test_percpu1)
+{
+ struct btf *vmlinux_btf, *module_btf;
+ struct btf_type_tag_percpu *skel;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
+ return;
+
+ skel = btf_type_tag_percpu__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_percpu_load, false);
+ bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
+ if (load_test_percpu1)
+ bpf_program__set_autoload(skel->progs.test_percpu2, false);
+ else
+ bpf_program__set_autoload(skel->progs.test_percpu1, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_percpu");
+
+ btf_type_tag_percpu__destroy(skel);
+
+cleanup:
+ btf__free(module_btf);
+ btf__free(vmlinux_btf);
+}
+
+static void test_btf_type_tag_vmlinux_percpu(bool load_test)
+{
+ struct btf_type_tag_percpu *skel;
+ struct btf *vmlinux_btf = NULL;
+ int err;
+
+ if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
+ return;
+
+ skel = btf_type_tag_percpu__open();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
+ goto cleanup;
+
+ bpf_program__set_autoload(skel->progs.test_percpu2, false);
+ bpf_program__set_autoload(skel->progs.test_percpu1, false);
+ if (load_test) {
+ bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_ERR(err, "btf_type_tag_percpu_load");
+ } else {
+ bpf_program__set_autoload(skel->progs.test_percpu_load, false);
+
+ err = btf_type_tag_percpu__load(skel);
+ ASSERT_OK(err, "btf_type_tag_percpu_helper");
+ }
+
+ btf_type_tag_percpu__destroy(skel);
+
+cleanup:
+ btf__free(vmlinux_btf);
+}
+
+void test_btf_tag(void)
+{
+ if (test__start_subtest("btf_decl_tag"))
+ test_btf_decl_tag();
+ if (test__start_subtest("btf_type_tag"))
+ test_btf_type_tag();
+
+ if (test__start_subtest("btf_type_tag_user_mod1"))
+ test_btf_type_tag_mod_user(true);
+ if (test__start_subtest("btf_type_tag_user_mod2"))
+ test_btf_type_tag_mod_user(false);
+ if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
+ test_btf_type_tag_vmlinux_user();
+
+ if (test__start_subtest("btf_type_tag_percpu_mod1"))
+ test_btf_type_tag_mod_percpu(true);
+ if (test__start_subtest("btf_type_tag_percpu_mod2"))
+ test_btf_type_tag_mod_percpu(false);
+ if (test__start_subtest("btf_type_tag_percpu_vmlinux_load"))
+ test_btf_type_tag_vmlinux_percpu(true);
+ if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper"))
+ test_btf_type_tag_vmlinux_percpu(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
index f36da15b134f..6e36de1302fc 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_write.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -4,21 +4,16 @@
#include <bpf/btf.h>
#include "btf_helpers.h"
-static int duration = 0;
-
-void test_btf_write() {
+static void gen_btf(struct btf *btf)
+{
const struct btf_var_secinfo *vi;
const struct btf_type *t;
const struct btf_member *m;
+ const struct btf_enum64 *v64;
const struct btf_enum *v;
const struct btf_param *p;
- struct btf *btf;
int id, err, str_off;
- btf = btf__new_empty();
- if (CHECK(IS_ERR(btf), "new_empty", "failed: %ld\n", PTR_ERR(btf)))
- return;
-
str_off = btf__find_str(btf, "int");
ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off");
@@ -177,7 +172,7 @@ void test_btf_write() {
ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name");
ASSERT_EQ(v->val, 2, "v2_val");
ASSERT_STREQ(btf_type_raw_dump(btf, 9),
- "[9] ENUM 'e1' size=4 vlen=2\n"
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2", "raw_dump");
@@ -208,7 +203,7 @@ void test_btf_write() {
ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind");
ASSERT_EQ(t->size, 4, "enum_fwd_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 12),
- "[12] ENUM 'enum_fwd' size=4 vlen=0", "raw_dump");
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", "raw_dump");
/* TYPEDEF */
id = btf__add_typedef(btf, "typedef1", 1);
@@ -283,5 +278,229 @@ void test_btf_write() {
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8", "raw_dump");
+ /* DECL_TAG */
+ id = btf__add_decl_tag(btf, "tag1", 16, -1);
+ ASSERT_EQ(id, 18, "tag_id");
+ t = btf__type_by_id(btf, 18);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind");
+ ASSERT_EQ(t->type, 16, "tag_type");
+ ASSERT_EQ(btf_decl_tag(t)->component_idx, -1, "tag_component_idx");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 18),
+ "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "raw_dump");
+
+ id = btf__add_decl_tag(btf, "tag2", 14, 1);
+ ASSERT_EQ(id, 19, "tag_id");
+ t = btf__type_by_id(btf, 19);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind");
+ ASSERT_EQ(t->type, 14, "tag_type");
+ ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 19),
+ "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump");
+
+ /* TYPE_TAG */
+ id = btf__add_type_tag(btf, "tag1", 1);
+ ASSERT_EQ(id, 20, "tag_id");
+ t = btf__type_by_id(btf, 20);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind");
+ ASSERT_EQ(t->type, 1, "tag_type");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 20),
+ "[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
+
+ /* ENUM64 */
+ id = btf__add_enum64(btf, "e1", 8, true);
+ ASSERT_EQ(id, 21, "enum64_id");
+ err = btf__add_enum64_value(btf, "v1", -1);
+ ASSERT_OK(err, "v1_res");
+ err = btf__add_enum64_value(btf, "v2", 0x123456789); /* 4886718345 */
+ ASSERT_OK(err, "v2_res");
+ t = btf__type_by_id(btf, 21);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "enum64_vlen");
+ ASSERT_EQ(t->size, 8, "enum64_sz");
+ v64 = btf_enum64(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
+ ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
+ ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
+ v64 = btf_enum64(t) + 1;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v2", "v2_name");
+ ASSERT_EQ(v64->val_hi32, 0x1, "v2_val");
+ ASSERT_EQ(v64->val_lo32, 0x23456789, "v2_val");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 21),
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345", "raw_dump");
+
+ id = btf__add_enum64(btf, "e1", 8, false);
+ ASSERT_EQ(id, 22, "enum64_id");
+ err = btf__add_enum64_value(btf, "v1", 0xffffffffFFFFFFFF); /* 18446744073709551615 */
+ ASSERT_OK(err, "v1_res");
+ t = btf__type_by_id(btf, 22);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
+ ASSERT_EQ(btf_vlen(t), 1, "enum64_vlen");
+ ASSERT_EQ(t->size, 8, "enum64_sz");
+ v64 = btf_enum64(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
+ ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
+ ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 22),
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615", "raw_dump");
+}
+
+static void test_btf_add()
+{
+ struct btf *btf;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "new_empty"))
+ return;
+
+ gen_btf(btf);
+
+ VALIDATE_RAW_BTF(
+ btf,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] CONST '(anon)' type_id=5",
+ "[4] VOLATILE '(anon)' type_id=3",
+ "[5] RESTRICT '(anon)' type_id=4",
+ "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10",
+ "[7] STRUCT 's1' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
+ "[8] UNION 'u1' size=8 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[10] FWD 'struct_fwd' fwd_kind=struct",
+ "[11] FWD 'union_fwd' fwd_kind=union",
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
+ "[13] TYPEDEF 'typedef1' type_id=1",
+ "[14] FUNC 'func1' type_id=15 linkage=global",
+ "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
+ "\t'p1' type_id=1\n"
+ "\t'p2' type_id=2",
+ "[16] VAR 'var1' type_id=1, linkage=global-alloc",
+ "[17] DATASEC 'datasec1' size=12 vlen=1\n"
+ "\ttype_id=1 offset=4 size=8",
+ "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
+ "[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
+ "[20] TYPE_TAG 'tag1' type_id=1",
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615");
+
btf__free(btf);
}
+
+static void test_btf_add_btf()
+{
+ struct btf *btf1 = NULL, *btf2 = NULL;
+ int id;
+
+ btf1 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf1, "btf1"))
+ return;
+
+ btf2 = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf2, "btf2"))
+ goto cleanup;
+
+ gen_btf(btf1);
+ gen_btf(btf2);
+
+ id = btf__add_btf(btf1, btf2);
+ if (!ASSERT_EQ(id, 23, "id"))
+ goto cleanup;
+
+ VALIDATE_RAW_BTF(
+ btf1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1",
+ "[3] CONST '(anon)' type_id=5",
+ "[4] VOLATILE '(anon)' type_id=3",
+ "[5] RESTRICT '(anon)' type_id=4",
+ "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10",
+ "[7] STRUCT 's1' size=8 vlen=2\n"
+ "\t'f1' type_id=1 bits_offset=0\n"
+ "\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
+ "[8] UNION 'u1' size=8 vlen=1\n"
+ "\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[10] FWD 'struct_fwd' fwd_kind=struct",
+ "[11] FWD 'union_fwd' fwd_kind=union",
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
+ "[13] TYPEDEF 'typedef1' type_id=1",
+ "[14] FUNC 'func1' type_id=15 linkage=global",
+ "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
+ "\t'p1' type_id=1\n"
+ "\t'p2' type_id=2",
+ "[16] VAR 'var1' type_id=1, linkage=global-alloc",
+ "[17] DATASEC 'datasec1' size=12 vlen=1\n"
+ "\ttype_id=1 offset=4 size=8",
+ "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
+ "[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
+ "[20] TYPE_TAG 'tag1' type_id=1",
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615",
+
+ /* types appended from the second BTF */
+ "[23] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[24] PTR '(anon)' type_id=23",
+ "[25] CONST '(anon)' type_id=27",
+ "[26] VOLATILE '(anon)' type_id=25",
+ "[27] RESTRICT '(anon)' type_id=26",
+ "[28] ARRAY '(anon)' type_id=24 index_type_id=23 nr_elems=10",
+ "[29] STRUCT 's1' size=8 vlen=2\n"
+ "\t'f1' type_id=23 bits_offset=0\n"
+ "\t'f2' type_id=23 bits_offset=32 bitfield_size=16",
+ "[30] UNION 'u1' size=8 vlen=1\n"
+ "\t'f1' type_id=23 bits_offset=0 bitfield_size=16",
+ "[31] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
+ "\t'v1' val=1\n"
+ "\t'v2' val=2",
+ "[32] FWD 'struct_fwd' fwd_kind=struct",
+ "[33] FWD 'union_fwd' fwd_kind=union",
+ "[34] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
+ "[35] TYPEDEF 'typedef1' type_id=23",
+ "[36] FUNC 'func1' type_id=37 linkage=global",
+ "[37] FUNC_PROTO '(anon)' ret_type_id=23 vlen=2\n"
+ "\t'p1' type_id=23\n"
+ "\t'p2' type_id=24",
+ "[38] VAR 'var1' type_id=23, linkage=global-alloc",
+ "[39] DATASEC 'datasec1' size=12 vlen=1\n"
+ "\ttype_id=23 offset=4 size=8",
+ "[40] DECL_TAG 'tag1' type_id=38 component_idx=-1",
+ "[41] DECL_TAG 'tag2' type_id=36 component_idx=1",
+ "[42] TYPE_TAG 'tag1' type_id=23",
+ "[43] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[44] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615");
+
+cleanup:
+ btf__free(btf1);
+ btf__free(btf2);
+}
+
+void test_btf_write()
+{
+ if (test__start_subtest("btf_add"))
+ test_btf_add();
+ if (test__start_subtest("btf_add_btf"))
+ test_btf_add_btf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/build_id.c b/tools/testing/selftests/bpf/prog_tests/build_id.c
new file mode 100644
index 000000000000..aec9c8d6bc96
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/build_id.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "test_build_id.skel.h"
+
+static char build_id[BPF_BUILD_ID_SIZE];
+static int build_id_sz;
+
+static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt)
+{
+ int i, j;
+
+ for (i = 0; i < frame_cnt; i++) {
+ printf("FRAME #%02d: ", i);
+ switch (stack[i].status) {
+ case BPF_STACK_BUILD_ID_EMPTY:
+ printf("<EMPTY>\n");
+ break;
+ case BPF_STACK_BUILD_ID_VALID:
+ printf("BUILD ID = ");
+ for (j = 0; j < BPF_BUILD_ID_SIZE; j++)
+ printf("%02hhx", (unsigned)stack[i].build_id[j]);
+ printf(" OFFSET = %llx", (unsigned long long)stack[i].offset);
+ break;
+ case BPF_STACK_BUILD_ID_IP:
+ printf("IP = %llx", (unsigned long long)stack[i].ip);
+ break;
+ default:
+ printf("UNEXPECTED STATUS %d ", stack[i].status);
+ break;
+ }
+ printf("\n");
+ }
+}
+
+static void subtest_nofault(bool build_id_resident)
+{
+ struct test_build_id *skel;
+ struct bpf_stack_build_id *stack;
+ int frame_cnt;
+
+ skel = test_build_id__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.uprobe_nofault = bpf_program__attach(skel->progs.uprobe_nofault);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_nofault, "link"))
+ goto cleanup;
+
+ if (build_id_resident)
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-in"), "trigger_uprobe");
+ else
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe");
+
+ if (!ASSERT_GT(skel->bss->res_nofault, 0, "res"))
+ goto cleanup;
+
+ stack = skel->bss->stack_nofault;
+ frame_cnt = skel->bss->res_nofault / sizeof(struct bpf_stack_build_id);
+ if (env.verbosity >= VERBOSE_NORMAL)
+ print_stack(stack, frame_cnt);
+
+ if (build_id_resident) {
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status");
+ ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match");
+ } else {
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_IP, "build_id_status");
+ }
+
+cleanup:
+ test_build_id__destroy(skel);
+}
+
+static void subtest_sleepable(void)
+{
+ struct test_build_id *skel;
+ struct bpf_stack_build_id *stack;
+ int frame_cnt;
+
+ skel = test_build_id__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.uprobe_sleepable = bpf_program__attach(skel->progs.uprobe_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_sleepable, "link"))
+ goto cleanup;
+
+ /* force build ID to not be paged in */
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe");
+
+ if (!ASSERT_GT(skel->bss->res_sleepable, 0, "res"))
+ goto cleanup;
+
+ stack = skel->bss->stack_sleepable;
+ frame_cnt = skel->bss->res_sleepable / sizeof(struct bpf_stack_build_id);
+ if (env.verbosity >= VERBOSE_NORMAL)
+ print_stack(stack, frame_cnt);
+
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status");
+ ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match");
+
+cleanup:
+ test_build_id__destroy(skel);
+}
+
+void serial_test_build_id(void)
+{
+ build_id_sz = read_build_id("uprobe_multi", build_id, sizeof(build_id));
+ ASSERT_EQ(build_id_sz, BPF_BUILD_ID_SIZE, "parse_build_id");
+
+ if (test__start_subtest("nofault-paged-out"))
+ subtest_nofault(false /* not resident */);
+ if (test__start_subtest("nofault-paged-in"))
+ subtest_nofault(true /* resident */);
+ if (test__start_subtest("sleepable"))
+ subtest_sleepable();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
new file mode 100644
index 000000000000..c40df623a8f7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bpf/libbpf.h"
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "cb_refs.skel.h"
+
+static char log_buf[1024 * 1024];
+
+struct {
+ const char *prog_name;
+ const char *err_msg;
+} cb_refs_tests[] = {
+ { "underflow_prog", "must point to scalar, or struct with scalar" },
+ { "leak_prog", "Possibly NULL pointer passed to helper arg2" },
+ { "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
+ { "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
+};
+
+void test_cb_refs(void)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct bpf_program *prog;
+ struct cb_refs *skel;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cb_refs_tests); i++) {
+ LIBBPF_OPTS(bpf_test_run_opts, run_opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ skel = cb_refs__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "cb_refs__open_and_load"))
+ return;
+ prog = bpf_object__find_program_by_name(skel->obj, cb_refs_tests[i].prog_name);
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_ERR(cb_refs__load(skel), "cb_refs__load"))
+ bpf_prog_test_run_opts(bpf_program__fd(prog), &run_opts);
+ if (!ASSERT_OK_PTR(strstr(log_buf, cb_refs_tests[i].err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", cb_refs_tests[i].err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+ cb_refs__destroy(skel);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
index 643dfa35419c..10224f845568 100644
--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
@@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)
static bool connect_send(const char *cgroup_path)
{
- bool res = true;
int server_fd = -1, client_fd = -1;
+ char message[] = "message";
+ bool res = true;
if (join_cgroup(cgroup_path))
goto out_clean;
@@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
if (client_fd < 0)
goto out_clean;
- if (send(client_fd, "message", strlen("message"), 0) < 0)
+ if (send(client_fd, &message, sizeof(message), 0) < 0)
+ goto out_clean;
+
+ if (read(server_fd, &message, sizeof(message)) < 0)
goto out_clean;
res = false;
@@ -102,8 +106,7 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_link = bpf_program__attach_cgroup(obj->progs.egress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_link), "parent-cg-attach",
- "err %ld", PTR_ERR(parent_link)))
+ if (!ASSERT_OK_PTR(parent_link, "parent-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -126,8 +129,7 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
*/
child_link = bpf_program__attach_cgroup(obj->progs.egress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_link), "child-cg-attach",
- "err %ld", PTR_ERR(child_link)))
+ if (!ASSERT_OK_PTR(child_link, "child-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -147,10 +149,8 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_link))
- bpf_link__destroy(parent_link);
- if (!IS_ERR(child_link))
- bpf_link__destroy(child_link);
+ bpf_link__destroy(parent_link);
+ bpf_link__destroy(child_link);
cg_storage_multi_egress_only__destroy(obj);
}
@@ -176,18 +176,15 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
- "err %ld", PTR_ERR(parent_egress1_link)))
+ if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
- "err %ld", PTR_ERR(parent_egress2_link)))
+ if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
- "err %ld", PTR_ERR(parent_ingress_link)))
+ if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -217,22 +214,19 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
- * Assert that egree and ingress storages are separate.
+ * Assert that egress and ingress storages are separate.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
- "err %ld", PTR_ERR(child_egress1_link)))
+ if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
- "err %ld", PTR_ERR(child_egress2_link)))
+ if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
- "err %ld", PTR_ERR(child_ingress_link)))
+ if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -264,18 +258,12 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_egress1_link))
- bpf_link__destroy(parent_egress1_link);
- if (!IS_ERR(parent_egress2_link))
- bpf_link__destroy(parent_egress2_link);
- if (!IS_ERR(parent_ingress_link))
- bpf_link__destroy(parent_ingress_link);
- if (!IS_ERR(child_egress1_link))
- bpf_link__destroy(child_egress1_link);
- if (!IS_ERR(child_egress2_link))
- bpf_link__destroy(child_egress2_link);
- if (!IS_ERR(child_ingress_link))
- bpf_link__destroy(child_ingress_link);
+ bpf_link__destroy(parent_egress1_link);
+ bpf_link__destroy(parent_egress2_link);
+ bpf_link__destroy(parent_ingress_link);
+ bpf_link__destroy(child_egress1_link);
+ bpf_link__destroy(child_egress2_link);
+ bpf_link__destroy(child_ingress_link);
cg_storage_multi_isolated__destroy(obj);
}
@@ -301,18 +289,15 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
- "err %ld", PTR_ERR(parent_egress1_link)))
+ if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach"))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
- "err %ld", PTR_ERR(parent_egress2_link)))
+ if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach"))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
- if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
- "err %ld", PTR_ERR(parent_ingress_link)))
+ if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
@@ -338,18 +323,15 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
- "err %ld", PTR_ERR(child_egress1_link)))
+ if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach"))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
- "err %ld", PTR_ERR(child_egress2_link)))
+ if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach"))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
- if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
- "err %ld", PTR_ERR(child_ingress_link)))
+ if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach"))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
@@ -375,23 +357,17 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
goto close_bpf_object;
close_bpf_object:
- if (!IS_ERR(parent_egress1_link))
- bpf_link__destroy(parent_egress1_link);
- if (!IS_ERR(parent_egress2_link))
- bpf_link__destroy(parent_egress2_link);
- if (!IS_ERR(parent_ingress_link))
- bpf_link__destroy(parent_ingress_link);
- if (!IS_ERR(child_egress1_link))
- bpf_link__destroy(child_egress1_link);
- if (!IS_ERR(child_egress2_link))
- bpf_link__destroy(child_egress2_link);
- if (!IS_ERR(child_ingress_link))
- bpf_link__destroy(child_ingress_link);
+ bpf_link__destroy(parent_egress1_link);
+ bpf_link__destroy(parent_egress2_link);
+ bpf_link__destroy(parent_ingress_link);
+ bpf_link__destroy(child_egress1_link);
+ bpf_link__destroy(child_egress2_link);
+ bpf_link__destroy(child_ingress_link);
cg_storage_multi_shared__destroy(obj);
}
-void test_cg_storage_multi(void)
+void serial_test_cg_storage_multi(void)
{
int parent_cgroup_fd = -1, child_cgroup_fd = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
new file mode 100644
index 000000000000..25332e596750
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "test_cgroup1_hierarchy.skel.h"
+
+static void bpf_cgroup1(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ ASSERT_NULL(fentry_link, "fentry_attach_fail");
+
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_lsm");
+}
+
+static void bpf_cgroup1_sleepable(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_s_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ ASSERT_NULL(fentry_link, "fentry_attach_fail");
+
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_lsm");
+}
+
+static void bpf_cgroup1_invalid_id(struct test_cgroup1_hierarchy *skel)
+{
+ struct bpf_link *lsm_link, *fentry_link;
+ int err;
+
+ /* Attach LSM prog first */
+ lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
+ return;
+
+ /* LSM prog will be triggered when attaching fentry */
+ fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
+ if (!ASSERT_OK_PTR(fentry_link, "fentry_attach_success"))
+ goto cleanup;
+
+ err = bpf_link__destroy(fentry_link);
+ ASSERT_OK(err, "destroy_lsm");
+
+cleanup:
+ err = bpf_link__destroy(lsm_link);
+ ASSERT_OK(err, "destroy_fentry");
+}
+
+void test_cgroup1_hierarchy(void)
+{
+ struct test_cgroup1_hierarchy *skel;
+ __u64 current_cgid;
+ int hid, err;
+
+ skel = test_cgroup1_hierarchy__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ skel->bss->target_pid = getpid();
+
+ err = bpf_program__set_attach_target(skel->progs.fentry_run, 0, "bpf_fentry_test1");
+ if (!ASSERT_OK(err, "fentry_set_target"))
+ goto destroy;
+
+ err = test_cgroup1_hierarchy__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto destroy;
+
+ /* Setup cgroup1 hierarchy */
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto destroy;
+ err = setup_classid_environment();
+ if (!ASSERT_OK(err, "setup_classid_environment"))
+ goto cleanup_cgroup;
+
+ err = join_classid();
+ if (!ASSERT_OK(err, "join_cgroup1"))
+ goto cleanup;
+
+ current_cgid = get_classid_cgroup_id();
+ if (!ASSERT_GE(current_cgid, 0, "cgroup1 id"))
+ goto cleanup;
+
+ hid = get_cgroup1_hierarchy_id("net_cls");
+ if (!ASSERT_GE(hid, 0, "cgroup1 id"))
+ goto cleanup;
+ skel->bss->target_hid = hid;
+
+ if (test__start_subtest("test_cgroup1_hierarchy")) {
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1(skel);
+ }
+
+ if (test__start_subtest("test_root_cgid")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 0;
+ bpf_cgroup1(skel);
+ }
+
+ if (test__start_subtest("test_invalid_level")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 1;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgid")) {
+ skel->bss->target_ancestor_cgid = 0;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_hid")) {
+ skel->bss->target_ancestor_cgid = 1;
+ skel->bss->target_ancestor_level = 0;
+ skel->bss->target_hid = -1;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgrp_name")) {
+ skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cl");
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_invalid_cgrp_name2")) {
+ skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cls,");
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_invalid_id(skel);
+ }
+
+ if (test__start_subtest("test_sleepable_prog")) {
+ skel->bss->target_hid = hid;
+ skel->bss->target_ancestor_cgid = current_cgid;
+ bpf_cgroup1_sleepable(skel);
+ }
+
+cleanup:
+ cleanup_classid_environment();
+cleanup_cgroup:
+ cleanup_cgroup_environment();
+destroy:
+ test_cgroup1_hierarchy__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
new file mode 100644
index 000000000000..3f9ffdf71343
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "cgroup_helpers.h"
+#include "cgroup_ancestor.skel.h"
+
+#define CGROUP_PATH "/skb_cgroup_test"
+#define TEST_NS "cgroup_ancestor_ns"
+#define NUM_CGROUP_LEVELS 4
+#define WAIT_AUTO_IP_MAX_ATTEMPT 10
+#define DST_ADDR "::1"
+#define DST_PORT 1234
+#define MAX_ASSERT_NAME 32
+
+struct test_data {
+ struct cgroup_ancestor *skel;
+ struct bpf_tc_hook qdisc;
+ struct bpf_tc_opts tc_attach;
+ struct nstoken *ns;
+};
+
+static int send_datagram(void)
+{
+ unsigned char buf[] = "some random test data";
+ struct sockaddr_in6 addr = { .sin6_family = AF_INET6,
+ .sin6_port = htons(DST_PORT), };
+ int sock, n;
+
+ if (!ASSERT_EQ(inet_pton(AF_INET6, DST_ADDR, &addr.sin6_addr), 1,
+ "inet_pton"))
+ return -1;
+
+ sock = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_OK_FD(sock, "create socket"))
+ return sock;
+
+ if (!ASSERT_OK(connect(sock, (struct sockaddr *)&addr, sizeof(addr)), "connect")) {
+ close(sock);
+ return -1;
+ }
+
+ n = sendto(sock, buf, sizeof(buf), 0, (const struct sockaddr *)&addr,
+ sizeof(addr));
+ close(sock);
+ return ASSERT_EQ(n, sizeof(buf), "send data") ? 0 : -1;
+}
+
+static int setup_network(struct test_data *t)
+{
+ SYS(fail, "ip netns add %s", TEST_NS);
+ t->ns = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(t->ns, "open netns"))
+ goto cleanup_ns;
+
+ SYS(close_ns, "ip link set lo up");
+
+ memset(&t->qdisc, 0, sizeof(t->qdisc));
+ t->qdisc.sz = sizeof(t->qdisc);
+ t->qdisc.attach_point = BPF_TC_EGRESS;
+ t->qdisc.ifindex = if_nametoindex("lo");
+ if (!ASSERT_NEQ(t->qdisc.ifindex, 0, "if_nametoindex"))
+ goto close_ns;
+ if (!ASSERT_OK(bpf_tc_hook_create(&t->qdisc), "qdisc add"))
+ goto close_ns;
+
+ memset(&t->tc_attach, 0, sizeof(t->tc_attach));
+ t->tc_attach.sz = sizeof(t->tc_attach);
+ t->tc_attach.prog_fd = bpf_program__fd(t->skel->progs.log_cgroup_id);
+ if (!ASSERT_OK(bpf_tc_attach(&t->qdisc, &t->tc_attach), "filter add"))
+ goto cleanup_qdisc;
+
+ return 0;
+
+cleanup_qdisc:
+ bpf_tc_hook_destroy(&t->qdisc);
+close_ns:
+ close_netns(t->ns);
+cleanup_ns:
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+fail:
+ return 1;
+}
+
+static void cleanup_network(struct test_data *t)
+{
+ bpf_tc_detach(&t->qdisc, &t->tc_attach);
+ bpf_tc_hook_destroy(&t->qdisc);
+ close_netns(t->ns);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+}
+
+static void check_ancestors_ids(struct test_data *t)
+{
+ __u64 expected_ids[NUM_CGROUP_LEVELS];
+ char assert_name[MAX_ASSERT_NAME];
+ __u32 level;
+
+ expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
+ expected_ids[1] = get_cgroup_id("");
+ expected_ids[2] = get_cgroup_id(CGROUP_PATH);
+ expected_ids[3] = 0; /* non-existent cgroup */
+
+ for (level = 0; level < NUM_CGROUP_LEVELS; level++) {
+ snprintf(assert_name, MAX_ASSERT_NAME,
+ "ancestor id at level %d", level);
+ ASSERT_EQ(t->skel->bss->cgroup_ids[level], expected_ids[level],
+ assert_name);
+ }
+}
+
+void test_cgroup_ancestor(void)
+{
+ struct test_data t;
+ int cgroup_fd;
+
+ t.skel = cgroup_ancestor__open_and_load();
+ if (!ASSERT_OK_PTR(t.skel, "open and load"))
+ return;
+
+ t.skel->bss->dport = htons(DST_PORT);
+ cgroup_fd = cgroup_setup_and_join(CGROUP_PATH);
+ if (cgroup_fd < 0)
+ goto cleanup_progs;
+
+ if (setup_network(&t))
+ goto cleanup_cgroups;
+
+ if (send_datagram())
+ goto cleanup_network;
+
+ check_ancestors_ids(&t);
+
+cleanup_network:
+ cleanup_network(&t);
+cleanup_cgroups:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+cleanup_progs:
+ cgroup_ancestor__destroy(t.skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
index 70e94e783070..9367bd2f0ae1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -14,14 +14,14 @@ static int prog_load(void)
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
- return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
-void test_cgroup_attach_autodetach(void)
+void serial_test_cgroup_attach_autodetach(void)
{
__u32 duration = 0, prog_cnt = 4, attach_flags;
int allow_prog[2] = {-1};
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index b549fcfacc0b..db0b7bac78d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -15,22 +15,22 @@ static int prog_load_cnt(int verdict, int val)
int cgroup_storage_fd, percpu_cgroup_storage_fd;
if (map_fd < 0)
- map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (map_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
- cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
- sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL);
if (cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
- percpu_cgroup_storage_fd = bpf_create_map(
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
- sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ percpu_cgroup_storage_fd = bpf_map_create(
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL);
if (percpu_cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
@@ -45,13 +45,13 @@ static int prog_load_cnt(int verdict, int val)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -63,10 +63,10 @@ static int prog_load_cnt(int verdict, int val)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
int ret;
- ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
@@ -74,7 +74,7 @@ static int prog_load_cnt(int verdict, int val)
return ret;
}
-void test_cgroup_attach_multi(void)
+void serial_test_cgroup_attach_multi(void)
{
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
@@ -167,7 +167,7 @@ void test_cgroup_attach_multi(void)
prog_cnt = 2;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
- prog_ids, &prog_cnt) != -1);
+ prog_ids, &prog_cnt) >= 0);
CHECK_FAIL(errno != ENOSPC);
CHECK_FAIL(prog_cnt != 4);
/* check that prog_ids are returned even when buffer is too small */
@@ -194,14 +194,14 @@ void test_cgroup_attach_multi(void)
attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE;
attach_opts.replace_prog_fd = allow_prog[0];
- if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_override", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EINVAL);
attach_opts.flags = BPF_F_REPLACE;
- if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_multi", "unexpected success\n"))
goto err;
@@ -209,7 +209,7 @@ void test_cgroup_attach_multi(void)
attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE;
attach_opts.replace_prog_fd = -1;
- if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_bad_fd", "unexpected success\n"))
goto err;
@@ -217,7 +217,7 @@ void test_cgroup_attach_multi(void)
/* replacing a program that is not attached to cgroup should fail */
attach_opts.replace_prog_fd = allow_prog[3];
- if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_ent", "unexpected success\n"))
goto err;
@@ -225,14 +225,14 @@ void test_cgroup_attach_multi(void)
/* replace 1st from the top program */
attach_opts.replace_prog_fd = allow_prog[0];
- if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"prog_replace", "errno=%d\n", errno))
goto err;
/* replace program with itself */
attach_opts.replace_prog_fd = allow_prog[6];
- if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1,
+ if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"prog_replace", "errno=%d\n", errno))
goto err;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
index 9e96f8d87fea..9421a5b7f4e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -16,14 +16,14 @@ static int prog_load(int verdict)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+ size_t insns_cnt = ARRAY_SIZE(prog);
- return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
-void test_cgroup_attach_override(void)
+void serial_test_cgroup_attach_override(void)
{
int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1;
__u32 duration = 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c
new file mode 100644
index 000000000000..5ab7547e38c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <errno.h>
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "dev_cgroup.skel.h"
+
+#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
+#define TEST_BUFFER_SIZE 64
+
+static void test_mknod(const char *path, mode_t mode, int dev_major,
+ int dev_minor, int expected_ret, int expected_errno)
+{
+ int ret;
+
+ unlink(path);
+ ret = mknod(path, mode, makedev(dev_major, dev_minor));
+ ASSERT_EQ(ret, expected_ret, "mknod");
+ if (expected_ret)
+ ASSERT_EQ(errno, expected_errno, "mknod errno");
+ else
+ unlink(path);
+}
+
+static void test_read(const char *path, char *buf, int buf_size,
+ int expected_ret, int expected_errno)
+{
+ int ret, fd;
+
+ fd = open(path, O_RDONLY);
+
+ /* A bare open on unauthorized device should fail */
+ if (expected_ret < 0) {
+ ASSERT_EQ(fd, expected_ret, "open ret for read");
+ ASSERT_EQ(errno, expected_errno, "open errno for read");
+ if (fd >= 0)
+ close(fd);
+ return;
+ }
+
+ if (!ASSERT_OK_FD(fd, "open ret for read"))
+ return;
+
+ ret = read(fd, buf, buf_size);
+ ASSERT_EQ(ret, expected_ret, "read");
+
+ close(fd);
+}
+
+static void test_write(const char *path, char *buf, int buf_size,
+ int expected_ret, int expected_errno)
+{
+ int ret, fd;
+
+ fd = open(path, O_WRONLY);
+
+ /* A bare open on unauthorized device should fail */
+ if (expected_ret < 0) {
+ ASSERT_EQ(fd, expected_ret, "open ret for write");
+ ASSERT_EQ(errno, expected_errno, "open errno for write");
+ if (fd >= 0)
+ close(fd);
+ return;
+ }
+
+ if (!ASSERT_OK_FD(fd, "open ret for write"))
+ return;
+
+ ret = write(fd, buf, buf_size);
+ ASSERT_EQ(ret, expected_ret, "write");
+
+ close(fd);
+}
+
+void test_cgroup_dev(void)
+{
+ char buf[TEST_BUFFER_SIZE] = "some random test data";
+ struct dev_cgroup *skel;
+ int cgroup_fd;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
+ return;
+
+ skel = dev_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_cgroup;
+
+ skel->links.bpf_prog1 =
+ bpf_program__attach_cgroup(skel->progs.bpf_prog1, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bpf_prog1, "attach_program"))
+ goto cleanup_progs;
+
+ if (test__start_subtest("allow-mknod"))
+ test_mknod("/dev/test_dev_cgroup_null", S_IFCHR, 1, 3, 0, 0);
+
+ if (test__start_subtest("allow-read"))
+ test_read("/dev/urandom", buf, TEST_BUFFER_SIZE,
+ TEST_BUFFER_SIZE, 0);
+
+ if (test__start_subtest("allow-write"))
+ test_write("/dev/null", buf, TEST_BUFFER_SIZE,
+ TEST_BUFFER_SIZE, 0);
+
+ if (test__start_subtest("deny-mknod"))
+ test_mknod("/dev/test_dev_cgroup_zero", S_IFCHR, 1, 5, -1,
+ EPERM);
+
+ if (test__start_subtest("deny-read"))
+ test_read("/dev/random", buf, TEST_BUFFER_SIZE, -1, EPERM);
+
+ if (test__start_subtest("deny-write"))
+ test_write("/dev/zero", buf, TEST_BUFFER_SIZE, -1, EPERM);
+
+ if (test__start_subtest("deny-mknod-wrong-type"))
+ test_mknod("/dev/test_dev_cgroup_block", S_IFBLK, 1, 3, -1,
+ EPERM);
+
+cleanup_progs:
+ dev_cgroup__destroy(skel);
+cleanup_cgroup:
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c
new file mode 100644
index 000000000000..7a1643b03bf3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "get_cgroup_id_kern.skel.h"
+
+#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
+
+void test_cgroup_get_current_cgroup_id(void)
+{
+ struct get_cgroup_id_kern *skel;
+ const struct timespec req = {
+ .tv_sec = 0,
+ .tv_nsec = 1,
+ };
+ int cgroup_fd;
+ __u64 ucgid;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
+ return;
+
+ skel = get_cgroup_id_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_cgroup;
+
+ if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program"))
+ goto cleanup_progs;
+
+ skel->bss->expected_pid = getpid();
+ /* trigger the syscall on which is attached the tested prog */
+ if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep"))
+ goto cleanup_progs;
+
+ ucgid = get_cgroup_id(TEST_CGROUP);
+
+ ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids");
+
+cleanup_progs:
+ get_cgroup_id_kern__destroy(skel);
+cleanup_cgroup:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c
new file mode 100644
index 000000000000..2bb5773d6f99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include <network_helpers.h>
+
+#include "cgroup_getset_retval_setsockopt.skel.h"
+#include "cgroup_getset_retval_getsockopt.skel.h"
+#include "cgroup_getset_retval_hooks.skel.h"
+
+#define SOL_CUSTOM 0xdeadbeef
+
+static int zero;
+
+static void test_setsockopt_set(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_set_eunatch = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that sets EUNATCH, assert that
+ * we actually get that error when we run setsockopt()
+ */
+ link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eunatch);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_set_eunatch = NULL, *link_get_retval = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that sets EUNATCH, and one that gets the
+ * previously set errno. Assert that we get the same errno back.
+ */
+ link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
+ goto close_bpf_object;
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eunatch);
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_get_retval = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that gets the previously set errno.
+ * Assert that, without anything setting one, we get 0.
+ */
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_OK(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_get_retval = NULL, *link_set_eunatch = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that gets the previously set errno, and then
+ * one that sets the errno to EUNATCH. Assert that the get does not
+ * see EUNATCH set later, and does not prevent EUNATCH from being set.
+ */
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+ link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_get_retval);
+ bpf_link__destroy(link_set_eunatch);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_override(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_set_eunatch = NULL, *link_set_eisconn = NULL;
+ struct bpf_link *link_get_retval = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN,
+ * and then one that gets the exported errno. Assert both the syscall
+ * and the helper sees the last set errno.
+ */
+ link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
+ goto close_bpf_object;
+ link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
+ goto close_bpf_object;
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EISCONN, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, -EISCONN, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eunatch);
+ bpf_link__destroy(link_set_eisconn);
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_legacy_eperm = NULL, *link_get_retval = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that return a reject without setting errno
+ * (legacy reject), and one that gets the errno. Assert that for
+ * backward compatibility the syscall result in EPERM, and this
+ * is also visible to the helper.
+ */
+ link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm"))
+ goto close_bpf_object;
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EPERM, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, -EPERM, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_legacy_eperm);
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_setsockopt *obj;
+ struct bpf_link *link_set_eunatch = NULL, *link_legacy_eperm = NULL;
+ struct bpf_link *link_get_retval = NULL;
+
+ obj = cgroup_getset_retval_setsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach setsockopt that sets EUNATCH, then one that return a reject
+ * without setting errno, and then one that gets the exported errno.
+ * Assert both the syscall and the helper's errno are unaffected by
+ * the second prog (i.e. legacy rejects does not override the errno
+ * to EPERM).
+ */
+ link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch"))
+ goto close_bpf_object;
+ link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm"))
+ goto close_bpf_object;
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+ &zero, sizeof(int)), "setsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eunatch);
+ bpf_link__destroy(link_legacy_eperm);
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_setsockopt__destroy(obj);
+}
+
+static void test_getsockopt_get(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_getsockopt *obj;
+ struct bpf_link *link_get_retval = NULL;
+ int buf;
+ socklen_t optlen = sizeof(buf);
+
+ obj = cgroup_getset_retval_getsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach getsockopt that gets previously set errno. Assert that the
+ * error from kernel is in both ctx_retval_value and retval_value.
+ */
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0,
+ &buf, &optlen), "getsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EOPNOTSUPP, "getsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, -EOPNOTSUPP, "retval_value"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->ctx_retval_value, -EOPNOTSUPP, "ctx_retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_getsockopt__destroy(obj);
+}
+
+static void test_getsockopt_override(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_getsockopt *obj;
+ struct bpf_link *link_set_eisconn = NULL;
+ int buf;
+ socklen_t optlen = sizeof(buf);
+
+ obj = cgroup_getset_retval_getsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach getsockopt that sets retval to -EISCONN. Assert that this
+ * overrides the value from kernel.
+ */
+ link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
+ goto close_bpf_object;
+
+ if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0,
+ &buf, &optlen), "getsockopt"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(errno, EISCONN, "getsockopt-errno"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eisconn);
+
+ cgroup_getset_retval_getsockopt__destroy(obj);
+}
+
+static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_getsockopt *obj;
+ struct bpf_link *link_set_eisconn = NULL, *link_clear_retval = NULL;
+ struct bpf_link *link_get_retval = NULL;
+ int buf;
+ socklen_t optlen = sizeof(buf);
+
+ obj = cgroup_getset_retval_getsockopt__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
+ return;
+
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ /* Attach getsockopt that sets retval to -EISCONN, and one that clears
+ * ctx retval. Assert that the clearing ctx retval is synced to helper
+ * and clears any errors both from kernel and BPF..
+ */
+ link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn"))
+ goto close_bpf_object;
+ link_clear_retval = bpf_program__attach_cgroup(obj->progs.clear_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_clear_retval, "cg-attach-clear_retval"))
+ goto close_bpf_object;
+ link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval"))
+ goto close_bpf_object;
+
+ if (!ASSERT_OK(getsockopt(sock_fd, SOL_CUSTOM, 0,
+ &buf, &optlen), "getsockopt"))
+ goto close_bpf_object;
+
+ if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations"))
+ goto close_bpf_object;
+ if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value"))
+ goto close_bpf_object;
+ if (!ASSERT_EQ(obj->bss->ctx_retval_value, 0, "ctx_retval_value"))
+ goto close_bpf_object;
+
+close_bpf_object:
+ bpf_link__destroy(link_set_eisconn);
+ bpf_link__destroy(link_clear_retval);
+ bpf_link__destroy(link_get_retval);
+
+ cgroup_getset_retval_getsockopt__destroy(obj);
+}
+
+struct exposed_hook {
+ const char *name;
+ int expected_err;
+} exposed_hooks[] = {
+
+#define BPF_RETVAL_HOOK(NAME, SECTION, CTX, EXPECTED_ERR) \
+ { \
+ .name = #NAME, \
+ .expected_err = EXPECTED_ERR, \
+ },
+
+#include "cgroup_getset_retval_hooks.h"
+
+#undef BPF_RETVAL_HOOK
+};
+
+static void test_exposed_hooks(int cgroup_fd, int sock_fd)
+{
+ struct cgroup_getset_retval_hooks *skel;
+ struct bpf_program *prog;
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(exposed_hooks); i++) {
+ skel = cgroup_getset_retval_hooks__open();
+ if (!ASSERT_OK_PTR(skel, "cgroup_getset_retval_hooks__open"))
+ continue;
+
+ prog = bpf_object__find_program_by_name(skel->obj, exposed_hooks[i].name);
+ if (!ASSERT_NEQ(prog, NULL, "bpf_object__find_program_by_name"))
+ goto close_skel;
+
+ err = bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(err, "bpf_program__set_autoload"))
+ goto close_skel;
+
+ err = cgroup_getset_retval_hooks__load(skel);
+ ASSERT_EQ(err, exposed_hooks[i].expected_err, "expected_err");
+
+close_skel:
+ cgroup_getset_retval_hooks__destroy(skel);
+ }
+}
+
+void test_cgroup_getset_retval(void)
+{
+ int cgroup_fd = -1;
+ int sock_fd = -1;
+
+ cgroup_fd = test__join_cgroup("/cgroup_getset_retval");
+ if (!ASSERT_GE(cgroup_fd, 0, "cg-create"))
+ goto close_fd;
+
+ sock_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0);
+ if (!ASSERT_GE(sock_fd, 0, "start-server"))
+ goto close_fd;
+
+ if (test__start_subtest("setsockopt-set"))
+ test_setsockopt_set(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-set_and_get"))
+ test_setsockopt_set_and_get(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-default_zero"))
+ test_setsockopt_default_zero(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-default_zero_and_set"))
+ test_setsockopt_default_zero_and_set(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-override"))
+ test_setsockopt_override(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-legacy_eperm"))
+ test_setsockopt_legacy_eperm(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("setsockopt-legacy_no_override"))
+ test_setsockopt_legacy_no_override(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("getsockopt-get"))
+ test_getsockopt_get(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("getsockopt-override"))
+ test_getsockopt_override(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("getsockopt-retval_sync"))
+ test_getsockopt_retval_sync(cgroup_fd, sock_fd);
+
+ if (test__start_subtest("exposed_hooks"))
+ test_exposed_hooks(cgroup_fd, sock_fd);
+
+close_fd:
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c
new file mode 100644
index 000000000000..3bd27d2ea668
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This test makes sure BPF stats collection using rstat works correctly.
+ * The test uses 3 BPF progs:
+ * (a) counter: This BPF prog is invoked every time we attach a process to a
+ * cgroup and locklessly increments a percpu counter.
+ * The program then calls cgroup_rstat_updated() to inform rstat
+ * of an update on the (cpu, cgroup) pair.
+ *
+ * (b) flusher: This BPF prog is invoked when an rstat flush is ongoing, it
+ * aggregates all percpu counters to a total counter, and also
+ * propagates the changes to the ancestor cgroups.
+ *
+ * (c) dumper: This BPF prog is a cgroup_iter. It is used to output the total
+ * counter of a cgroup through reading a file in userspace.
+ *
+ * The test sets up a cgroup hierarchy, and the above programs. It spawns a few
+ * processes in the leaf cgroups and makes sure all the counters are aggregated
+ * correctly.
+ *
+ * Copyright 2022 Google LLC.
+ */
+#include <asm-generic/errno.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+
+#include "cgroup_helpers.h"
+#include "cgroup_hierarchical_stats.skel.h"
+
+#define PAGE_SIZE 4096
+#define MB(x) (x << 20)
+
+#define PROCESSES_PER_CGROUP 3
+
+#define BPFFS_ROOT "/sys/fs/bpf/"
+#define BPFFS_ATTACH_COUNTERS BPFFS_ROOT "attach_counters/"
+
+#define CG_ROOT_NAME "root"
+#define CG_ROOT_ID 1
+
+#define CGROUP_PATH(p, n) {.path = p"/"n, .name = n}
+
+static struct {
+ const char *path, *name;
+ unsigned long long id;
+ int fd;
+} cgroups[] = {
+ CGROUP_PATH("/", "test"),
+ CGROUP_PATH("/test", "child1"),
+ CGROUP_PATH("/test", "child2"),
+ CGROUP_PATH("/test/child1", "child1_1"),
+ CGROUP_PATH("/test/child1", "child1_2"),
+ CGROUP_PATH("/test/child2", "child2_1"),
+ CGROUP_PATH("/test/child2", "child2_2"),
+};
+
+#define N_CGROUPS ARRAY_SIZE(cgroups)
+#define N_NON_LEAF_CGROUPS 3
+
+static int root_cgroup_fd;
+static bool mounted_bpffs;
+
+/* reads file at 'path' to 'buf', returns 0 on success. */
+static int read_from_file(const char *path, char *buf, size_t size)
+{
+ int fd, len;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ len = read(fd, buf, size);
+ close(fd);
+ if (len < 0)
+ return len;
+
+ buf[len] = 0;
+ return 0;
+}
+
+/* mounts bpffs and mkdir for reading stats, returns 0 on success. */
+static int setup_bpffs(void)
+{
+ int err;
+
+ /* Mount bpffs */
+ err = mount("bpf", BPFFS_ROOT, "bpf", 0, NULL);
+ mounted_bpffs = !err;
+ if (ASSERT_FALSE(err && errno != EBUSY, "mount"))
+ return err;
+
+ /* Create a directory to contain stat files in bpffs */
+ err = mkdir(BPFFS_ATTACH_COUNTERS, 0755);
+ if (!ASSERT_OK(err, "mkdir"))
+ return err;
+
+ return 0;
+}
+
+static void cleanup_bpffs(void)
+{
+ /* Remove created directory in bpffs */
+ ASSERT_OK(rmdir(BPFFS_ATTACH_COUNTERS), "rmdir "BPFFS_ATTACH_COUNTERS);
+
+ /* Unmount bpffs, if it wasn't already mounted when we started */
+ if (mounted_bpffs)
+ return;
+
+ ASSERT_OK(umount(BPFFS_ROOT), "unmount bpffs");
+}
+
+/* sets up cgroups, returns 0 on success. */
+static int setup_cgroups(void)
+{
+ int i, fd, err;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ return err;
+
+ root_cgroup_fd = get_root_cgroup();
+ if (!ASSERT_GE(root_cgroup_fd, 0, "get_root_cgroup"))
+ return root_cgroup_fd;
+
+ for (i = 0; i < N_CGROUPS; i++) {
+ fd = create_and_get_cgroup(cgroups[i].path);
+ if (!ASSERT_GE(fd, 0, "create_and_get_cgroup"))
+ return fd;
+
+ cgroups[i].fd = fd;
+ cgroups[i].id = get_cgroup_id(cgroups[i].path);
+ }
+ return 0;
+}
+
+static void cleanup_cgroups(void)
+{
+ close(root_cgroup_fd);
+ for (int i = 0; i < N_CGROUPS; i++)
+ close(cgroups[i].fd);
+ cleanup_cgroup_environment();
+}
+
+/* Sets up cgroup hiearchary, returns 0 on success. */
+static int setup_hierarchy(void)
+{
+ return setup_bpffs() || setup_cgroups();
+}
+
+static void destroy_hierarchy(void)
+{
+ cleanup_cgroups();
+ cleanup_bpffs();
+}
+
+static int attach_processes(void)
+{
+ int i, j, status;
+
+ /* In every leaf cgroup, attach 3 processes */
+ for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) {
+ for (j = 0; j < PROCESSES_PER_CGROUP; j++) {
+ pid_t pid;
+
+ /* Create child and attach to cgroup */
+ pid = fork();
+ if (pid == 0) {
+ if (join_parent_cgroup(cgroups[i].path))
+ exit(EACCES);
+ exit(0);
+ }
+
+ /* Cleanup child */
+ waitpid(pid, &status, 0);
+ if (!ASSERT_TRUE(WIFEXITED(status), "child process exited"))
+ return 1;
+ if (!ASSERT_EQ(WEXITSTATUS(status), 0,
+ "child process exit code"))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static unsigned long long
+get_attach_counter(unsigned long long cgroup_id, const char *file_name)
+{
+ unsigned long long attach_counter = 0, id = 0;
+ static char buf[128], path[128];
+
+ /* For every cgroup, read the file generated by cgroup_iter */
+ snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name);
+ if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter"))
+ return 0;
+
+ /* Check the output file formatting */
+ ASSERT_EQ(sscanf(buf, "cg_id: %llu, attach_counter: %llu\n",
+ &id, &attach_counter), 2, "output format");
+
+ /* Check that the cgroup_id is displayed correctly */
+ ASSERT_EQ(id, cgroup_id, "cgroup_id");
+ /* Check that the counter is non-zero */
+ ASSERT_GT(attach_counter, 0, "attach counter non-zero");
+ return attach_counter;
+}
+
+static void check_attach_counters(void)
+{
+ unsigned long long attach_counters[N_CGROUPS], root_attach_counter;
+ int i;
+
+ for (i = 0; i < N_CGROUPS; i++)
+ attach_counters[i] = get_attach_counter(cgroups[i].id,
+ cgroups[i].name);
+
+ /* Read stats for root too */
+ root_attach_counter = get_attach_counter(CG_ROOT_ID, CG_ROOT_NAME);
+
+ /* Check that all leafs cgroups have an attach counter of 3 */
+ for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++)
+ ASSERT_EQ(attach_counters[i], PROCESSES_PER_CGROUP,
+ "leaf cgroup attach counter");
+
+ /* Check that child1 == child1_1 + child1_2 */
+ ASSERT_EQ(attach_counters[1], attach_counters[3] + attach_counters[4],
+ "child1_counter");
+ /* Check that child2 == child2_1 + child2_2 */
+ ASSERT_EQ(attach_counters[2], attach_counters[5] + attach_counters[6],
+ "child2_counter");
+ /* Check that test == child1 + child2 */
+ ASSERT_EQ(attach_counters[0], attach_counters[1] + attach_counters[2],
+ "test_counter");
+ /* Check that root >= test */
+ ASSERT_GE(root_attach_counter, attach_counters[1], "root_counter");
+}
+
+/* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure.
+ */
+static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj,
+ int cgroup_fd, const char *file_name)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo = {};
+ struct bpf_link *link;
+ static char path[128];
+ int err;
+
+ /*
+ * Create an iter link, parameterized by cgroup_fd. We only want to
+ * traverse one cgroup, so set the traversal order to "self".
+ */
+ linfo.cgroup.cgroup_fd = cgroup_fd;
+ linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(obj->progs.dumper, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ return -EFAULT;
+
+ /* Pin the link to a bpffs file */
+ snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name);
+ err = bpf_link__pin(link, path);
+ ASSERT_OK(err, "pin cgroup_iter");
+
+ /* Remove the link, leaving only the ref held by the pinned file */
+ bpf_link__destroy(link);
+ return err;
+}
+
+/* Sets up programs for collecting stats, returns 0 on success. */
+static int setup_progs(struct cgroup_hierarchical_stats **skel)
+{
+ int i, err;
+
+ *skel = cgroup_hierarchical_stats__open_and_load();
+ if (!ASSERT_OK_PTR(*skel, "open_and_load"))
+ return 1;
+
+ /* Attach cgroup_iter program that will dump the stats to cgroups */
+ for (i = 0; i < N_CGROUPS; i++) {
+ err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name);
+ if (!ASSERT_OK(err, "setup_cgroup_iter"))
+ return err;
+ }
+
+ /* Also dump stats for root */
+ err = setup_cgroup_iter(*skel, root_cgroup_fd, CG_ROOT_NAME);
+ if (!ASSERT_OK(err, "setup_cgroup_iter"))
+ return err;
+
+ bpf_program__set_autoattach((*skel)->progs.dumper, false);
+ err = cgroup_hierarchical_stats__attach(*skel);
+ if (!ASSERT_OK(err, "attach"))
+ return err;
+
+ return 0;
+}
+
+static void destroy_progs(struct cgroup_hierarchical_stats *skel)
+{
+ static char path[128];
+ int i;
+
+ for (i = 0; i < N_CGROUPS; i++) {
+ /* Delete files in bpffs that cgroup_iters are pinned in */
+ snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS,
+ cgroups[i].name);
+ ASSERT_OK(remove(path), "remove cgroup_iter pin");
+ }
+
+ /* Delete root file in bpffs */
+ snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, CG_ROOT_NAME);
+ ASSERT_OK(remove(path), "remove cgroup_iter root pin");
+ cgroup_hierarchical_stats__destroy(skel);
+}
+
+void test_cgroup_hierarchical_stats(void)
+{
+ struct cgroup_hierarchical_stats *skel = NULL;
+
+ if (setup_hierarchy())
+ goto hierarchy_cleanup;
+ if (setup_progs(&skel))
+ goto cleanup;
+ if (attach_processes())
+ goto cleanup;
+ check_attach_counters();
+cleanup:
+ destroy_progs(skel);
+hierarchy_cleanup:
+ destroy_hierarchy();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
new file mode 100644
index 000000000000..574d9a0cdc8e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "iters_css_task.skel.h"
+#include "cgroup_iter.skel.h"
+#include "cgroup_helpers.h"
+
+#define ROOT 0
+#define PARENT 1
+#define CHILD1 2
+#define CHILD2 3
+#define NUM_CGROUPS 4
+
+#define PROLOGUE "prologue\n"
+#define EPILOGUE "epilogue\n"
+
+static const char *cg_path[] = {
+ "/", "/parent", "/parent/child1", "/parent/child2"
+};
+
+static int cg_fd[] = {-1, -1, -1, -1};
+static unsigned long long cg_id[] = {0, 0, 0, 0};
+static char expected_output[64];
+
+static int setup_cgroups(void)
+{
+ int fd, i = 0;
+
+ for (i = 0; i < NUM_CGROUPS; i++) {
+ fd = create_and_get_cgroup(cg_path[i]);
+ if (fd < 0)
+ return fd;
+
+ cg_fd[i] = fd;
+ cg_id[i] = get_cgroup_id(cg_path[i]);
+ }
+ return 0;
+}
+
+static void cleanup_cgroups(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CGROUPS; i++)
+ close(cg_fd[i]);
+}
+
+static void read_from_cgroup_iter(struct bpf_program *prog, int cgroup_fd,
+ int order, const char *testname)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+ int len, iter_fd;
+ static char buf[128];
+ size_t left;
+ char *p;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = cgroup_fd;
+ linfo.cgroup.order = order;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (iter_fd < 0)
+ goto free_link;
+
+ memset(buf, 0, sizeof(buf));
+ left = ARRAY_SIZE(buf);
+ p = buf;
+ while ((len = read(iter_fd, p, left)) > 0) {
+ p += len;
+ left -= len;
+ }
+
+ ASSERT_STREQ(buf, expected_output, testname);
+
+ /* read() after iter finishes should be ok. */
+ if (len == 0)
+ ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
+
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+}
+
+/* Invalid cgroup. */
+static void test_invalid_cgroup(struct cgroup_iter *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = (__u32)-1;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
+ ASSERT_ERR_PTR(link, "attach_iter");
+ bpf_link__destroy(link);
+}
+
+/* Specifying both cgroup_fd and cgroup_id is invalid. */
+static void test_invalid_cgroup_spec(struct cgroup_iter *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = (__u32)cg_fd[PARENT];
+ linfo.cgroup.cgroup_id = (__u64)cg_id[PARENT];
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
+ ASSERT_ERR_PTR(link, "attach_iter");
+ bpf_link__destroy(link);
+}
+
+/* Preorder walk prints parent and child in order. */
+static void test_walk_preorder(struct cgroup_iter *skel)
+{
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
+ cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]);
+
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, "preorder");
+}
+
+/* Postorder walk prints child and parent in order. */
+static void test_walk_postorder(struct cgroup_iter *skel)
+{
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
+ cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]);
+
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
+ BPF_CGROUP_ITER_DESCENDANTS_POST, "postorder");
+}
+
+/* Walking parents prints parent and then root. */
+static void test_walk_ancestors_up(struct cgroup_iter *skel)
+{
+ /* terminate the walk when ROOT is met. */
+ skel->bss->terminal_cgroup = cg_id[ROOT];
+
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n%8llu\n" EPILOGUE,
+ cg_id[PARENT], cg_id[ROOT]);
+
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
+ BPF_CGROUP_ITER_ANCESTORS_UP, "ancestors_up");
+
+ skel->bss->terminal_cgroup = 0;
+}
+
+/* Early termination prints parent only. */
+static void test_early_termination(struct cgroup_iter *skel)
+{
+ /* terminate the walk after the first element is processed. */
+ skel->bss->terminate_early = 1;
+
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
+
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, "early_termination");
+
+ skel->bss->terminate_early = 0;
+}
+
+/* Waling self prints self only. */
+static void test_walk_self_only(struct cgroup_iter *skel)
+{
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
+
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
+ BPF_CGROUP_ITER_SELF_ONLY, "self_only");
+}
+
+static void test_walk_dead_self_only(struct cgroup_iter *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ char expected_output[128], buf[128];
+ const char *cgrp_name = "/dead";
+ union bpf_iter_link_info linfo;
+ int len, cgrp_fd, iter_fd;
+ struct bpf_link *link;
+ size_t left;
+ char *p;
+
+ cgrp_fd = create_and_get_cgroup(cgrp_name);
+ if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+ return;
+
+ /* The cgroup will be dead during read() iteration, so it only has
+ * epilogue in the output
+ */
+ snprintf(expected_output, sizeof(expected_output), EPILOGUE);
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = cgrp_fd;
+ linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+
+ link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto close_cgrp;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "iter_create"))
+ goto free_link;
+
+ /* Close link fd and cgroup fd */
+ bpf_link__destroy(link);
+ close(cgrp_fd);
+
+ /* Remove cgroup to mark it as dead */
+ remove_cgroup(cgrp_name);
+
+ /* Two kern_sync_rcu() and usleep() pairs are used to wait for the
+ * releases of cgroup css, and the last kern_sync_rcu() and usleep()
+ * pair is used to wait for the free of cgroup itself.
+ */
+ kern_sync_rcu();
+ usleep(8000);
+ kern_sync_rcu();
+ usleep(8000);
+ kern_sync_rcu();
+ usleep(1000);
+
+ memset(buf, 0, sizeof(buf));
+ left = ARRAY_SIZE(buf);
+ p = buf;
+ while ((len = read(iter_fd, p, left)) > 0) {
+ p += len;
+ left -= len;
+ }
+
+ ASSERT_STREQ(buf, expected_output, "dead cgroup output");
+
+ /* read() after iter finishes should be ok. */
+ if (len == 0)
+ ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
+
+ close(iter_fd);
+ return;
+free_link:
+ bpf_link__destroy(link);
+close_cgrp:
+ close(cgrp_fd);
+}
+
+static void test_walk_self_only_css_task(void)
+{
+ struct iters_css_task *skel;
+ int err;
+
+ skel = iters_css_task__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.cgroup_id_printer, true);
+
+ err = iters_css_task__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = join_cgroup(cg_path[CHILD2]);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ snprintf(expected_output, sizeof(expected_output),
+ PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]);
+ read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2],
+ BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task");
+ ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+cleanup:
+ iters_css_task__destroy(skel);
+}
+
+void test_cgroup_iter(void)
+{
+ struct cgroup_iter *skel = NULL;
+
+ if (setup_cgroup_environment())
+ return;
+
+ if (setup_cgroups())
+ goto out;
+
+ skel = cgroup_iter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_iter__open_and_load"))
+ goto out;
+
+ if (test__start_subtest("cgroup_iter__invalid_cgroup"))
+ test_invalid_cgroup(skel);
+ if (test__start_subtest("cgroup_iter__invalid_cgroup_spec"))
+ test_invalid_cgroup_spec(skel);
+ if (test__start_subtest("cgroup_iter__preorder"))
+ test_walk_preorder(skel);
+ if (test__start_subtest("cgroup_iter__postorder"))
+ test_walk_postorder(skel);
+ if (test__start_subtest("cgroup_iter__ancestors_up_walk"))
+ test_walk_ancestors_up(skel);
+ if (test__start_subtest("cgroup_iter__early_termination"))
+ test_early_termination(skel);
+ if (test__start_subtest("cgroup_iter__self_only"))
+ test_walk_self_only(skel);
+ if (test__start_subtest("cgroup_iter__dead_self_only"))
+ test_walk_dead_self_only(skel);
+ if (test__start_subtest("cgroup_iter__self_only_css_task"))
+ test_walk_self_only_css_task();
+
+out:
+ cgroup_iter__destroy(skel);
+ cleanup_cgroups();
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
index 4d9b514b3fd9..15093a69510e 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
@@ -24,7 +24,7 @@ int ping_and_check(int exp_calls, int exp_alt_calls)
return 0;
}
-void test_cgroup_link(void)
+void serial_test_cgroup_link(void)
{
struct {
const char *path;
@@ -54,7 +54,7 @@ void test_cgroup_link(void)
for (i = 0; i < cg_nr; i++) {
cgs[i].fd = create_and_get_cgroup(cgs[i].path);
- if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
+ if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
goto cleanup;
}
@@ -65,17 +65,15 @@ void test_cgroup_link(void)
for (i = 0; i < cg_nr; i++) {
links[i] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[i].fd);
- if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
- i, PTR_ERR(links[i])))
+ if (!ASSERT_OK_PTR(links[i], "cg_attach"))
goto cleanup;
}
ping_and_check(cg_nr, 0);
- /* query the number of effective progs and attach flags in root cg */
+ /* query the number of attached progs and attach flags in root cg */
err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
- BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
- &prog_cnt);
+ 0, &attach_flags, NULL, &prog_cnt);
CHECK_FAIL(err);
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
@@ -86,17 +84,15 @@ void test_cgroup_link(void)
BPF_F_QUERY_EFFECTIVE, NULL, NULL,
&prog_cnt);
CHECK_FAIL(err);
- CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
/* query the effective prog IDs in last cg */
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
- BPF_F_QUERY_EFFECTIVE, &attach_flags,
- prog_ids, &prog_cnt);
+ BPF_F_QUERY_EFFECTIVE, NULL, prog_ids,
+ &prog_cnt);
CHECK_FAIL(err);
- CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
@@ -121,8 +117,7 @@ void test_cgroup_link(void)
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
- PTR_ERR(links[last_cg])))
+ if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr + 1, 0);
@@ -147,7 +142,7 @@ void test_cgroup_link(void)
/* attempt to mix in with multi-attach bpf_link */
tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
+ if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) {
bpf_link__destroy(tmp_link);
goto cleanup;
}
@@ -165,8 +160,7 @@ void test_cgroup_link(void)
/* attach back link-based one */
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
- if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
- PTR_ERR(links[last_cg])))
+ if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
goto cleanup;
ping_and_check(cg_nr, 0);
@@ -249,8 +243,7 @@ cleanup:
BPF_CGROUP_INET_EGRESS);
for (i = 0; i < cg_nr; i++) {
- if (!IS_ERR(links[i]))
- bpf_link__destroy(links[i]);
+ bpf_link__destroy(links[i]);
}
test_cgroup_link__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c
new file mode 100644
index 000000000000..bb60704a3ef9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "cgroup_mprog.skel.h"
+
+static void assert_mprog_count(int cg, int atype, int expected)
+{
+ __u32 count = 0, attach_flags = 0;
+ int err;
+
+ err = bpf_prog_query(cg, atype, 0, &attach_flags,
+ NULL, &count);
+ ASSERT_EQ(count, expected, "count");
+ ASSERT_EQ(err, 0, "prog_query");
+}
+
+static void test_prog_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct cgroup_mprog *skel;
+ __u32 prog_ids[10];
+ int cg, err;
+
+ cg = test__join_cgroup("/prog_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /prog_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd2, fd1] */
+ err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd2, fd3, fd1] */
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ /* retrieve optq.prog_cnt */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ /* optq.prog_cnt will be used in below query */
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+cleanup4:
+ optd.expected_revision = 5;
+ err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ LIBBPF_OPTS_RESET(optd);
+ err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 2);
+
+ /* Check revision after two detach operations */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ ASSERT_OK(err, "prog_query");
+ ASSERT_EQ(optq.revision, 7, "revision");
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_link_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_cgroup_opts, opta);
+ LIBBPF_OPTS(bpf_cgroup_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ struct bpf_link *link1, *link2, *link3, *link4;
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct cgroup_mprog *skel;
+ __u32 prog_ids[10];
+ int cg, err;
+
+ cg = test__join_cgroup("/link_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /link_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
+ if (!ASSERT_OK_PTR(link1, "link_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_LINK,
+ .relative_id = id_from_link_fd(bpf_link__fd(link1)),
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd2, fd1] */
+ link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
+ if (!ASSERT_OK_PTR(link2, "link_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(link2),
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd2, fd3, fd1] */
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_OK_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
+ if (!ASSERT_OK_PTR(link4, "link_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ /* retrieve optq.prog_cnt */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ /* optq.prog_cnt will be used in below query */
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+cleanup4:
+ bpf_link__destroy(link4);
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ bpf_link__destroy(link3);
+ assert_mprog_count(cg, atype, 2);
+
+ /* Check revision after two detach operations */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ ASSERT_OK(err, "prog_query");
+ ASSERT_EQ(optq.revision, 7, "revision");
+
+cleanup2:
+ bpf_link__destroy(link2);
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ bpf_link__destroy(link1);
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_preorder_prog_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ __u32 fd1, fd2, fd3, fd4;
+ struct cgroup_mprog *skel;
+ int cg, err;
+
+ cg = test__join_cgroup("/preorder_prog_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_prog_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd1, fd2] */
+ err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, -EINVAL, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_PREORDER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd1, fd2, fd3] */
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_preorder_link_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_cgroup_opts, opta);
+ struct bpf_link *link1, *link2, *link3, *link4;
+ struct cgroup_mprog *skel;
+ __u32 fd2;
+ int cg;
+
+ cg = test__join_cgroup("/preorder_link_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_link_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
+ if (!ASSERT_OK_PTR(link1, "link_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_PREORDER,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd1, fd2] */
+ link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
+ if (!ASSERT_OK_PTR(link2, "link_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_ERR_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER | BPF_F_PREORDER | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(link2),
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd1, fd2, fd3] */
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_OK_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
+ if (!ASSERT_OK_PTR(link4, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ bpf_link__destroy(link4);
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ bpf_link__destroy(link3);
+ assert_mprog_count(cg, atype, 2);
+
+cleanup2:
+ bpf_link__destroy(link2);
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ bpf_link__destroy(link1);
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_invalid_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ __u32 fd1, fd2, id2;
+ struct cgroup_mprog *skel;
+ int cg, err;
+
+ cg = test__join_cgroup("/invalid_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /invalid_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+
+ id2 = id_from_prog_fd(fd2);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_LINK,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE | BPF_F_AFTER,
+ .replace_prog_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 1);
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+void test_cgroup_mprog_opts(void)
+{
+ if (test__start_subtest("prog_attach_detach"))
+ test_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("link_attach_detach"))
+ test_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("preorder_prog_attach_detach"))
+ test_preorder_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("preorder_link_attach_detach"))
+ test_preorder_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("invalid_attach_detach"))
+ test_invalid_attach_detach(BPF_CGROUP_GETSOCKOPT);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c
new file mode 100644
index 000000000000..a36d2e968bc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "cgroup_preorder.skel.h"
+
+static int run_getsockopt_test(int cg_parent, int sock_fd, bool has_relative_fd)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opts);
+ enum bpf_attach_type prog_p_atype, prog_p2_atype;
+ int prog_p_fd, prog_p2_fd;
+ struct cgroup_preorder *skel = NULL;
+ struct bpf_program *prog;
+ __u8 *result, buf;
+ socklen_t optlen = 1;
+ int err = 0;
+
+ skel = cgroup_preorder__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load"))
+ return 0;
+
+ LIBBPF_OPTS_RESET(opts);
+ opts.flags = BPF_F_ALLOW_MULTI;
+ prog = skel->progs.parent;
+ prog_p_fd = bpf_program__fd(prog);
+ prog_p_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent"))
+ goto close_skel;
+
+ opts.flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE;
+ if (has_relative_fd)
+ opts.relative_fd = prog_p_fd;
+ prog = skel->progs.parent_2;
+ prog_p2_fd = bpf_program__fd(prog);
+ prog_p2_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2"))
+ goto detach_parent;
+
+ err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ goto detach_parent_2;
+
+ result = skel->bss->result;
+ ASSERT_TRUE(result[0] == 4 && result[1] == 3, "result values");
+
+detach_parent_2:
+ ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype),
+ "bpf_prog_detach2-parent_2");
+detach_parent:
+ ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype),
+ "bpf_prog_detach2-parent");
+close_skel:
+ cgroup_preorder__destroy(skel);
+ return err;
+}
+
+void test_cgroup_mprog_ordering(void)
+{
+ int cg_parent = -1, sock_fd = -1;
+
+ cg_parent = test__join_cgroup("/parent");
+ if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
+ goto out;
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
+ goto out;
+
+ ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, false), "getsockopt_test_1");
+ ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, true), "getsockopt_test_2");
+
+out:
+ close(sock_fd);
+ close(cg_parent);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c b/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c
new file mode 100644
index 000000000000..d4d583872fa2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_preorder.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "cgroup_preorder.skel.h"
+
+static int run_getsockopt_test(int cg_parent, int cg_child, int sock_fd, bool all_preorder)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opts);
+ enum bpf_attach_type prog_c_atype, prog_c2_atype, prog_p_atype, prog_p2_atype;
+ int prog_c_fd, prog_c2_fd, prog_p_fd, prog_p2_fd;
+ struct cgroup_preorder *skel = NULL;
+ struct bpf_program *prog;
+ __u8 *result, buf;
+ socklen_t optlen;
+ int err = 0;
+
+ skel = cgroup_preorder__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load"))
+ return 0;
+
+ buf = 0x00;
+ err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
+ if (!ASSERT_OK(err, "setsockopt"))
+ goto close_skel;
+
+ opts.flags = BPF_F_ALLOW_MULTI;
+ if (all_preorder)
+ opts.flags |= BPF_F_PREORDER;
+ prog = skel->progs.child;
+ prog_c_fd = bpf_program__fd(prog);
+ prog_c_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_c_fd, cg_child, prog_c_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-child"))
+ goto close_skel;
+
+ opts.flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER;
+ prog = skel->progs.child_2;
+ prog_c2_fd = bpf_program__fd(prog);
+ prog_c2_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_c2_fd, cg_child, prog_c2_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-child_2"))
+ goto detach_child;
+
+ optlen = 1;
+ err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ goto detach_child_2;
+
+ result = skel->bss->result;
+ if (all_preorder)
+ ASSERT_TRUE(result[0] == 1 && result[1] == 2, "child only");
+ else
+ ASSERT_TRUE(result[0] == 2 && result[1] == 1, "child only");
+
+ skel->bss->idx = 0;
+ memset(result, 0, 4);
+
+ opts.flags = BPF_F_ALLOW_MULTI;
+ if (all_preorder)
+ opts.flags |= BPF_F_PREORDER;
+ prog = skel->progs.parent;
+ prog_p_fd = bpf_program__fd(prog);
+ prog_p_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent"))
+ goto detach_child_2;
+
+ opts.flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER;
+ prog = skel->progs.parent_2;
+ prog_p2_fd = bpf_program__fd(prog);
+ prog_p2_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2"))
+ goto detach_parent;
+
+ err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ goto detach_parent_2;
+
+ if (all_preorder)
+ ASSERT_TRUE(result[0] == 3 && result[1] == 4 && result[2] == 1 && result[3] == 2,
+ "parent and child");
+ else
+ ASSERT_TRUE(result[0] == 4 && result[1] == 2 && result[2] == 1 && result[3] == 3,
+ "parent and child");
+
+detach_parent_2:
+ ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype),
+ "bpf_prog_detach2-parent_2");
+detach_parent:
+ ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype),
+ "bpf_prog_detach2-parent");
+detach_child_2:
+ ASSERT_OK(bpf_prog_detach2(prog_c2_fd, cg_child, prog_c2_atype),
+ "bpf_prog_detach2-child_2");
+detach_child:
+ ASSERT_OK(bpf_prog_detach2(prog_c_fd, cg_child, prog_c_atype),
+ "bpf_prog_detach2-child");
+close_skel:
+ cgroup_preorder__destroy(skel);
+ return err;
+}
+
+void test_cgroup_preorder(void)
+{
+ int cg_parent = -1, cg_child = -1, sock_fd = -1;
+
+ cg_parent = test__join_cgroup("/parent");
+ if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
+ goto out;
+
+ cg_child = test__join_cgroup("/parent/child");
+ if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child"))
+ goto out;
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
+ goto out;
+
+ ASSERT_OK(run_getsockopt_test(cg_parent, cg_child, sock_fd, false), "getsockopt_test_1");
+ ASSERT_OK(run_getsockopt_test(cg_parent, cg_child, sock_fd, true), "getsockopt_test_2");
+
+out:
+ close(sock_fd);
+ close(cg_child);
+ close(cg_parent);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c
new file mode 100644
index 000000000000..e1a90c10db8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_skb_direct_packet_access.skel.h"
+
+void test_cgroup_skb_prog_run_direct_packet_access(void)
+{
+ int err;
+ struct cgroup_skb_direct_packet_access *skel;
+ char test_skb[64] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = test_skb,
+ .data_size_in = sizeof(test_skb),
+ );
+
+ skel = cgroup_skb_direct_packet_access__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_skb_direct_packet_access__open_and_load"))
+ return;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.direct_packet_access), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts err");
+ ASSERT_EQ(topts.retval, 1, "retval");
+
+ ASSERT_NEQ(skel->bss->data_end, 0, "data_end");
+
+ cgroup_skb_direct_packet_access__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
index 464edc1c1708..b9dc4ec655b5 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
@@ -60,7 +60,7 @@ static void run_cgroup_bpf_test(const char *cg_path, int out_sk)
goto cleanup;
link = bpf_program__attach_cgroup(skel->progs.ingress_lookup, cgfd);
- if (CHECK(IS_ERR(link), "cgroup_attach", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "cgroup_attach"))
goto cleanup;
run_lookup_test(&skel->bss->g_serv_port, out_sk);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
new file mode 100644
index 000000000000..cf395715ced4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "cgroup_storage.skel.h"
+
+#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
+#define TEST_NS "cgroup_storage_ns"
+#define PING_CMD "ping localhost -c 1 -W 1 -q"
+
+static int setup_network(struct nstoken **token)
+{
+ SYS(fail, "ip netns add %s", TEST_NS);
+ *token = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(*token, "open netns"))
+ goto cleanup_ns;
+ SYS(cleanup_ns, "ip link set lo up");
+
+ return 0;
+
+cleanup_ns:
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+fail:
+ return -1;
+}
+
+static void cleanup_network(struct nstoken *ns)
+{
+ close_netns(ns);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+}
+
+void test_cgroup_storage(void)
+{
+ struct bpf_cgroup_storage_key key;
+ struct cgroup_storage *skel;
+ struct nstoken *ns = NULL;
+ unsigned long long value;
+ int cgroup_fd;
+ int err;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "create cgroup"))
+ return;
+
+ if (!ASSERT_OK(setup_network(&ns), "setup network"))
+ goto cleanup_cgroup;
+
+ skel = cgroup_storage__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_network;
+
+ skel->links.bpf_prog =
+ bpf_program__attach_cgroup(skel->progs.bpf_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bpf_prog, "attach program"))
+ goto cleanup_progs;
+
+ /* Check that one out of every two packets is dropped */
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "first ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_NEQ(err, 0, "second ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "third ping");
+
+ err = bpf_map__get_next_key(skel->maps.cgroup_storage, NULL, &key,
+ sizeof(key));
+ if (!ASSERT_OK(err, "get first key"))
+ goto cleanup_progs;
+ err = bpf_map__lookup_elem(skel->maps.cgroup_storage, &key, sizeof(key),
+ &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "first packet count read"))
+ goto cleanup_progs;
+
+ /* Add one to the packet counter, check again packet filtering */
+ value++;
+ err = bpf_map__update_elem(skel->maps.cgroup_storage, &key, sizeof(key),
+ &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "increment packet counter"))
+ goto cleanup_progs;
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "fourth ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_NEQ(err, 0, "fifth ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "sixth ping");
+
+cleanup_progs:
+ cgroup_storage__destroy(skel);
+cleanup_network:
+ cleanup_network(ns);
+cleanup_cgroup:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c
new file mode 100644
index 000000000000..a1542faf7873
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include <test_progs.h>
+#include <linux/in6.h>
+#include <sys/socket.h>
+#include <sched.h>
+#include <unistd.h>
+#include "cgroup_helpers.h"
+#include "testing_helpers.h"
+#include "cgroup_tcp_skb.skel.h"
+#include "cgroup_tcp_skb.h"
+#include "network_helpers.h"
+
+#define CGROUP_TCP_SKB_PATH "/test_cgroup_tcp_skb"
+
+static int install_filters(int cgroup_fd,
+ struct bpf_link **egress_link,
+ struct bpf_link **ingress_link,
+ struct bpf_program *egress_prog,
+ struct bpf_program *ingress_prog,
+ struct cgroup_tcp_skb *skel)
+{
+ /* Prepare filters */
+ skel->bss->g_sock_state = 0;
+ skel->bss->g_unexpected = 0;
+ *egress_link =
+ bpf_program__attach_cgroup(egress_prog,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(egress_link, "egress_link"))
+ return -1;
+ *ingress_link =
+ bpf_program__attach_cgroup(ingress_prog,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(ingress_link, "ingress_link"))
+ return -1;
+
+ return 0;
+}
+
+static void uninstall_filters(struct bpf_link **egress_link,
+ struct bpf_link **ingress_link)
+{
+ bpf_link__destroy(*egress_link);
+ *egress_link = NULL;
+ bpf_link__destroy(*ingress_link);
+ *ingress_link = NULL;
+}
+
+static int create_client_sock_v6(void)
+{
+ int fd;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("socket");
+ return -1;
+ }
+
+ return fd;
+}
+
+/* Connect to the server in a cgroup from the outside of the cgroup. */
+static int talk_to_cgroup(int *client_fd, int *listen_fd, int *service_fd,
+ struct cgroup_tcp_skb *skel)
+{
+ int err, cp;
+ char buf[5];
+ int port;
+
+ /* Create client & server socket */
+ err = join_root_cgroup();
+ if (!ASSERT_OK(err, "join_root_cgroup"))
+ return -1;
+ *client_fd = create_client_sock_v6();
+ if (!ASSERT_GE(*client_fd, 0, "client_fd"))
+ return -1;
+ err = join_cgroup(CGROUP_TCP_SKB_PATH);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ return -1;
+ *listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(*listen_fd, 0, "listen_fd"))
+ return -1;
+ port = get_socket_local_port(*listen_fd);
+ if (!ASSERT_GE(port, 0, "get_socket_local_port"))
+ return -1;
+ skel->bss->g_sock_port = ntohs(port);
+
+ /* Connect client to server */
+ err = connect_fd_to_fd(*client_fd, *listen_fd, 0);
+ if (!ASSERT_OK(err, "connect_fd_to_fd"))
+ return -1;
+ *service_fd = accept(*listen_fd, NULL, NULL);
+ if (!ASSERT_GE(*service_fd, 0, "service_fd"))
+ return -1;
+ err = join_root_cgroup();
+ if (!ASSERT_OK(err, "join_root_cgroup"))
+ return -1;
+ cp = write(*client_fd, "hello", 5);
+ if (!ASSERT_EQ(cp, 5, "write"))
+ return -1;
+ cp = read(*service_fd, buf, 5);
+ if (!ASSERT_EQ(cp, 5, "read"))
+ return -1;
+
+ return 0;
+}
+
+/* Connect to the server out of a cgroup from inside the cgroup. */
+static int talk_to_outside(int *client_fd, int *listen_fd, int *service_fd,
+ struct cgroup_tcp_skb *skel)
+
+{
+ int err, cp;
+ char buf[5];
+ int port;
+
+ /* Create client & server socket */
+ err = join_root_cgroup();
+ if (!ASSERT_OK(err, "join_root_cgroup"))
+ return -1;
+ *listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(*listen_fd, 0, "listen_fd"))
+ return -1;
+ err = join_cgroup(CGROUP_TCP_SKB_PATH);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ return -1;
+ *client_fd = create_client_sock_v6();
+ if (!ASSERT_GE(*client_fd, 0, "client_fd"))
+ return -1;
+ err = join_root_cgroup();
+ if (!ASSERT_OK(err, "join_root_cgroup"))
+ return -1;
+ port = get_socket_local_port(*listen_fd);
+ if (!ASSERT_GE(port, 0, "get_socket_local_port"))
+ return -1;
+ skel->bss->g_sock_port = ntohs(port);
+
+ /* Connect client to server */
+ err = connect_fd_to_fd(*client_fd, *listen_fd, 0);
+ if (!ASSERT_OK(err, "connect_fd_to_fd"))
+ return -1;
+ *service_fd = accept(*listen_fd, NULL, NULL);
+ if (!ASSERT_GE(*service_fd, 0, "service_fd"))
+ return -1;
+ cp = write(*client_fd, "hello", 5);
+ if (!ASSERT_EQ(cp, 5, "write"))
+ return -1;
+ cp = read(*service_fd, buf, 5);
+ if (!ASSERT_EQ(cp, 5, "read"))
+ return -1;
+
+ return 0;
+}
+
+static int close_connection(int *closing_fd, int *peer_fd, int *listen_fd,
+ struct cgroup_tcp_skb *skel)
+{
+ __u32 saved_packet_count = 0;
+ int err;
+ int i;
+
+ /* Wait for ACKs to be sent */
+ saved_packet_count = skel->bss->g_packet_count;
+ usleep(100000); /* 0.1s */
+ for (i = 0;
+ skel->bss->g_packet_count != saved_packet_count && i < 10;
+ i++) {
+ saved_packet_count = skel->bss->g_packet_count;
+ usleep(100000); /* 0.1s */
+ }
+ if (!ASSERT_EQ(skel->bss->g_packet_count, saved_packet_count,
+ "packet_count"))
+ return -1;
+
+ skel->bss->g_packet_count = 0;
+ saved_packet_count = 0;
+
+ /* Half shutdown to make sure the closing socket having a chance to
+ * receive a FIN from the peer.
+ */
+ err = shutdown(*closing_fd, SHUT_WR);
+ if (!ASSERT_OK(err, "shutdown closing_fd"))
+ return -1;
+
+ /* Wait for FIN and the ACK of the FIN to be observed */
+ for (i = 0;
+ skel->bss->g_packet_count < saved_packet_count + 2 && i < 10;
+ i++)
+ usleep(100000); /* 0.1s */
+ if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2,
+ "packet_count"))
+ return -1;
+
+ saved_packet_count = skel->bss->g_packet_count;
+
+ /* Fully shutdown the connection */
+ err = close(*peer_fd);
+ if (!ASSERT_OK(err, "close peer_fd"))
+ return -1;
+ *peer_fd = -1;
+
+ /* Wait for FIN and the ACK of the FIN to be observed */
+ for (i = 0;
+ skel->bss->g_packet_count < saved_packet_count + 2 && i < 10;
+ i++)
+ usleep(100000); /* 0.1s */
+ if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2,
+ "packet_count"))
+ return -1;
+
+ err = close(*closing_fd);
+ if (!ASSERT_OK(err, "close closing_fd"))
+ return -1;
+ *closing_fd = -1;
+
+ close(*listen_fd);
+ *listen_fd = -1;
+
+ return 0;
+}
+
+/* This test case includes four scenarios:
+ * 1. Connect to the server from outside the cgroup and close the connection
+ * from outside the cgroup.
+ * 2. Connect to the server from outside the cgroup and close the connection
+ * from inside the cgroup.
+ * 3. Connect to the server from inside the cgroup and close the connection
+ * from outside the cgroup.
+ * 4. Connect to the server from inside the cgroup and close the connection
+ * from inside the cgroup.
+ *
+ * The test case is to verify that cgroup_skb/{egress,ingress} filters
+ * receive expected packets including SYN, SYN/ACK, ACK, FIN, and FIN/ACK.
+ */
+void test_cgroup_tcp_skb(void)
+{
+ struct bpf_link *ingress_link = NULL;
+ struct bpf_link *egress_link = NULL;
+ int client_fd = -1, listen_fd = -1;
+ struct cgroup_tcp_skb *skel;
+ int service_fd = -1;
+ int cgroup_fd = -1;
+ int err;
+
+ skel = cgroup_tcp_skb__open_and_load();
+ if (!ASSERT_OK(!skel, "skel_open_load"))
+ return;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto cleanup;
+
+ cgroup_fd = create_and_get_cgroup(CGROUP_TCP_SKB_PATH);
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ goto cleanup;
+
+ /* Scenario 1 */
+ err = install_filters(cgroup_fd, &egress_link, &ingress_link,
+ skel->progs.server_egress,
+ skel->progs.server_ingress,
+ skel);
+ if (!ASSERT_OK(err, "install_filters"))
+ goto cleanup;
+
+ err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel);
+ if (!ASSERT_OK(err, "talk_to_cgroup"))
+ goto cleanup;
+
+ err = close_connection(&client_fd, &service_fd, &listen_fd, skel);
+ if (!ASSERT_OK(err, "close_connection"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
+ ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state");
+
+ uninstall_filters(&egress_link, &ingress_link);
+
+ /* Scenario 2 */
+ err = install_filters(cgroup_fd, &egress_link, &ingress_link,
+ skel->progs.server_egress_srv,
+ skel->progs.server_ingress_srv,
+ skel);
+
+ err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel);
+ if (!ASSERT_OK(err, "talk_to_cgroup"))
+ goto cleanup;
+
+ err = close_connection(&service_fd, &client_fd, &listen_fd, skel);
+ if (!ASSERT_OK(err, "close_connection"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
+ ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state");
+
+ uninstall_filters(&egress_link, &ingress_link);
+
+ /* Scenario 3 */
+ err = install_filters(cgroup_fd, &egress_link, &ingress_link,
+ skel->progs.client_egress_srv,
+ skel->progs.client_ingress_srv,
+ skel);
+
+ err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel);
+ if (!ASSERT_OK(err, "talk_to_outside"))
+ goto cleanup;
+
+ err = close_connection(&service_fd, &client_fd, &listen_fd, skel);
+ if (!ASSERT_OK(err, "close_connection"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
+ ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state");
+
+ uninstall_filters(&egress_link, &ingress_link);
+
+ /* Scenario 4 */
+ err = install_filters(cgroup_fd, &egress_link, &ingress_link,
+ skel->progs.client_egress,
+ skel->progs.client_ingress,
+ skel);
+
+ err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel);
+ if (!ASSERT_OK(err, "talk_to_outside"))
+ goto cleanup;
+
+ err = close_connection(&client_fd, &service_fd, &listen_fd, skel);
+ if (!ASSERT_OK(err, "close_connection"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected");
+ ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state");
+
+ uninstall_filters(&egress_link, &ingress_link);
+
+cleanup:
+ close(client_fd);
+ close(listen_fd);
+ close(service_fd);
+ close(cgroup_fd);
+ bpf_link__destroy(egress_link);
+ bpf_link__destroy(ingress_link);
+ cleanup_cgroup_environment();
+ cgroup_tcp_skb__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
new file mode 100644
index 000000000000..37c1cc52ed98
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "connect4_dropper.skel.h"
+
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static int run_test(int cgroup_fd, int server_fd, bool classid)
+{
+ struct connect4_dropper *skel;
+ int fd, err = 0, port;
+
+ skel = connect4_dropper__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return -1;
+
+ port = get_socket_local_port(server_fd);
+ if (!ASSERT_GE(port, 0, "get_socket_local_port"))
+ return -1;
+
+ skel->bss->port = ntohs(port);
+
+ skel->links.connect_v4_dropper =
+ bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
+ err = -1;
+ goto out;
+ }
+
+ if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
+ err = -1;
+ goto out;
+ }
+
+ errno = 0;
+ fd = connect_to_fd_opts(server_fd, NULL);
+ if (fd >= 0) {
+ log_err("Unexpected success to connect to server");
+ err = -1;
+ close(fd);
+ } else if (errno != EPERM) {
+ log_err("Unexpected errno from connect to server");
+ err = -1;
+ }
+out:
+ connect4_dropper__destroy(skel);
+ return err;
+}
+
+void test_cgroup_v1v2(void)
+{
+ struct network_helper_opts opts = {};
+ int server_fd, client_fd, cgroup_fd;
+
+ /* Step 1: Check base connectivity works without any BPF. */
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "server_fd"))
+ return;
+ client_fd = connect_to_fd_opts(server_fd, &opts);
+ if (!ASSERT_GE(client_fd, 0, "client_fd")) {
+ close(server_fd);
+ return;
+ }
+ close(client_fd);
+ close(server_fd);
+
+ /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
+ cgroup_fd = test__join_cgroup("/connect_dropper");
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ return;
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "server_fd")) {
+ close(cgroup_fd);
+ return;
+ }
+ ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
+ setup_classid_environment();
+ set_classid();
+ ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
+ cleanup_classid_environment();
+ close(server_fd);
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c
new file mode 100644
index 000000000000..5ad904e9d15d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+#include "read_cgroupfs_xattr.skel.h"
+#include "cgroup_read_xattr.skel.h"
+
+#define CGROUP_FS_PARENT "foo/"
+#define CGROUP_FS_CHILD CGROUP_FS_PARENT "bar/"
+#define TMP_FILE "/tmp/selftests_cgroup_xattr"
+
+static const char xattr_value_a[] = "bpf_selftest_value_a";
+static const char xattr_value_b[] = "bpf_selftest_value_b";
+static const char xattr_name[] = "user.bpf_test";
+
+static void test_read_cgroup_xattr(void)
+{
+ int tmp_fd, parent_cgroup_fd = -1, child_cgroup_fd = -1;
+ struct read_cgroupfs_xattr *skel = NULL;
+
+ parent_cgroup_fd = test__join_cgroup(CGROUP_FS_PARENT);
+ if (!ASSERT_OK_FD(parent_cgroup_fd, "create parent cgroup"))
+ return;
+ if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_PARENT, xattr_name, xattr_value_a),
+ "set parent xattr"))
+ goto out;
+
+ child_cgroup_fd = test__join_cgroup(CGROUP_FS_CHILD);
+ if (!ASSERT_OK_FD(child_cgroup_fd, "create child cgroup"))
+ goto out;
+ if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_CHILD, xattr_name, xattr_value_b),
+ "set child xattr"))
+ goto out;
+
+ skel = read_cgroupfs_xattr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "read_cgroupfs_xattr__open_and_load"))
+ goto out;
+
+ skel->bss->target_pid = sys_gettid();
+
+ if (!ASSERT_OK(read_cgroupfs_xattr__attach(skel), "read_cgroupfs_xattr__attach"))
+ goto out;
+
+ tmp_fd = open(TMP_FILE, O_RDONLY | O_CREAT);
+ ASSERT_OK_FD(tmp_fd, "open tmp file");
+ close(tmp_fd);
+
+ ASSERT_TRUE(skel->bss->found_value_a, "found_value_a");
+ ASSERT_TRUE(skel->bss->found_value_b, "found_value_b");
+
+out:
+ close(child_cgroup_fd);
+ close(parent_cgroup_fd);
+ read_cgroupfs_xattr__destroy(skel);
+ unlink(TMP_FILE);
+}
+
+void test_cgroup_xattr(void)
+{
+ RUN_TESTS(cgroup_read_xattr);
+
+ if (test__start_subtest("read_cgroupfs_xattr"))
+ test_read_cgroup_xattr();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
new file mode 100644
index 000000000000..4b42fbc96efc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <cgroup_helpers.h>
+#include <test_progs.h>
+#include <sched.h>
+#include <sys/wait.h>
+
+#include "cgrp_kfunc_failure.skel.h"
+#include "cgrp_kfunc_success.skel.h"
+
+static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
+{
+ struct cgrp_kfunc_success *skel;
+ int err;
+
+ skel = cgrp_kfunc_success__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ err = cgrp_kfunc_success__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ cgrp_kfunc_success__destroy(skel);
+ return NULL;
+}
+
+static int mkdir_rm_test_dir(void)
+{
+ int fd;
+ const char *cgrp_path = "cgrp_kfunc";
+
+ fd = create_and_get_cgroup(cgrp_path);
+ if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd"))
+ return -1;
+
+ close(fd);
+ remove_cgroup(cgrp_path);
+
+ return 0;
+}
+
+static void run_success_test(const char *prog_name)
+{
+ struct cgrp_kfunc_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+
+ skel = open_load_cgrp_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attached_link"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count");
+ if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count");
+ ASSERT_OK(skel->bss->err, "post_rmdir_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ cgrp_kfunc_success__destroy(skel);
+}
+
+static const char * const success_tests[] = {
+ "test_cgrp_acquire_release_argument",
+ "test_cgrp_acquire_leave_in_map",
+ "test_cgrp_xchg_release",
+ "test_cgrp_get_release",
+ "test_cgrp_get_ancestors",
+ "test_cgrp_from_id",
+};
+
+static void test_cgrp_from_id_ns(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct cgrp_kfunc_success *skel;
+ struct bpf_program *prog;
+ int pid, pipe_fd[2];
+
+ skel = open_load_cgrp_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
+ goto cleanup;
+
+ prog = skel->progs.test_cgrp_from_id_ns;
+
+ if (!ASSERT_OK(pipe(pipe_fd), "pipe"))
+ goto cleanup;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork result")) {
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
+ goto cleanup;
+ }
+
+ if (pid == 0) {
+ int ret = 0;
+
+ close(pipe_fd[0]);
+
+ if (!ASSERT_GE(cgroup_setup_and_join("cgrp_from_id_ns"), 0, "join cgroup"))
+ exit(1);
+
+ if (!ASSERT_OK(unshare(CLONE_NEWCGROUP), "unshare cgns"))
+ exit(1);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
+ if (!ASSERT_OK(ret, "test run ret"))
+ exit(1);
+
+ if (!ASSERT_OK(opts.retval, "test run retval"))
+ exit(1);
+
+ if (!ASSERT_EQ(write(pipe_fd[1], &ret, sizeof(ret)), sizeof(ret), "write pipe"))
+ exit(1);
+
+ exit(0);
+ } else {
+ int res;
+
+ close(pipe_fd[1]);
+
+ ASSERT_EQ(read(pipe_fd[0], &res, sizeof(res)), sizeof(res), "read res");
+ ASSERT_EQ(waitpid(pid, NULL, 0), pid, "wait on child");
+
+ remove_cgroup_pid("cgrp_from_id_ns", pid);
+
+ ASSERT_OK(res, "result from run");
+ }
+
+ close(pipe_fd[0]);
+cleanup:
+ cgrp_kfunc_success__destroy(skel);
+}
+
+void test_cgrp_kfunc(void)
+{
+ int i, err;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "cgrp_env_setup"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i]))
+ continue;
+
+ run_success_test(success_tests[i]);
+ }
+
+ if (test__start_subtest("test_cgrp_from_id_ns"))
+ test_cgrp_from_id_ns();
+
+ RUN_TESTS(cgrp_kfunc_failure);
+
+cleanup:
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
new file mode 100644
index 000000000000..9015e2c2ab12
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <test_progs.h>
+#include "cgrp_ls_tp_btf.skel.h"
+#include "cgrp_ls_recursion.skel.h"
+#include "cgrp_ls_attach_cgroup.skel.h"
+#include "cgrp_ls_negative.skel.h"
+#include "cgrp_ls_sleepable.skel.h"
+#include "network_helpers.h"
+#include "cgroup_helpers.h"
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u64 cookie_value;
+};
+
+static bool is_cgroup1;
+static int target_hid;
+
+#define CGROUP_MODE_SET(skel) \
+{ \
+ skel->bss->is_cgroup1 = is_cgroup1; \
+ skel->bss->target_hid = target_hid; \
+}
+
+static void cgroup_mode_value_init(bool cgroup, int hid)
+{
+ is_cgroup1 = cgroup;
+ target_hid = hid;
+}
+
+static void test_tp_btf(int cgroup_fd)
+{
+ struct cgrp_ls_tp_btf *skel;
+ long val1 = 1, val2 = 0;
+ int err;
+
+ skel = cgrp_ls_tp_btf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ /* populate a value in map_b */
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
+ if (!ASSERT_OK(err, "map_update_elem"))
+ goto out;
+
+ /* check value */
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
+ if (!ASSERT_OK(err, "map_lookup_elem"))
+ goto out;
+ if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
+ goto out;
+
+ /* delete value */
+ err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
+ if (!ASSERT_OK(err, "map_delete_elem"))
+ goto out;
+
+ skel->bss->target_pid = sys_gettid();
+
+ err = cgrp_ls_tp_btf__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ sys_gettid();
+ sys_gettid();
+
+ skel->bss->target_pid = 0;
+
+ /* 3x syscalls: 1x attach and 2x gettid */
+ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
+ ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
+ ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
+out:
+ cgrp_ls_tp_btf__destroy(skel);
+}
+
+static void test_attach_cgroup(int cgroup_fd)
+{
+ int server_fd = 0, client_fd = 0, err = 0;
+ socklen_t addr_len = sizeof(struct sockaddr_in6);
+ struct cgrp_ls_attach_cgroup *skel;
+ __u32 cookie_expected_value;
+ struct sockaddr_in6 addr;
+ struct socket_cookie val;
+
+ skel = cgrp_ls_attach_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.set_cookie = bpf_program__attach_cgroup(
+ skel->progs.set_cookie, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
+ goto out;
+
+ skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
+ skel->progs.update_cookie_sockops, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
+ goto out;
+
+ skel->links.update_cookie_tracing = bpf_program__attach(
+ skel->progs.update_cookie_tracing);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
+ goto out;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto out;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto close_server_fd;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
+ &cgroup_fd, &val);
+ if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
+ goto close_client_fd;
+
+ err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
+ if (!ASSERT_OK(err, "getsockname"))
+ goto close_client_fd;
+
+ cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
+ ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
+
+close_client_fd:
+ close(client_fd);
+close_server_fd:
+ close(server_fd);
+out:
+ cgrp_ls_attach_cgroup__destroy(skel);
+}
+
+static void test_recursion(int cgroup_fd)
+{
+ struct cgrp_ls_recursion *skel;
+ int err;
+
+ skel = cgrp_ls_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ err = cgrp_ls_recursion__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger sys_enter, make sure it does not cause deadlock */
+ sys_gettid();
+
+out:
+ cgrp_ls_recursion__destroy(skel);
+}
+
+static void test_negative(void)
+{
+ struct cgrp_ls_negative *skel;
+
+ skel = cgrp_ls_negative__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
+ cgrp_ls_negative__destroy(skel);
+ return;
+ }
+}
+
+static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct cgrp_ls_sleepable *skel;
+ struct bpf_link *link;
+ int err, iter_fd;
+ char buf[16];
+
+ skel = cgrp_ls_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ bpf_program__set_autoload(skel->progs.cgroup_iter, true);
+ err = cgrp_ls_sleepable__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.cgroup.cgroup_fd = cgroup_fd;
+ linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "iter_create"))
+ goto out;
+
+ /* trigger the program run */
+ (void)read(iter_fd, buf, sizeof(buf));
+
+ ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
+
+ close(iter_fd);
+out:
+ cgrp_ls_sleepable__destroy(skel);
+}
+
+static void test_yes_rcu_lock(__u64 cgroup_id)
+{
+ struct cgrp_ls_sleepable *skel;
+ int err;
+
+ skel = cgrp_ls_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+ skel->bss->target_pid = sys_gettid();
+
+ bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
+ err = cgrp_ls_sleepable__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ err = cgrp_ls_sleepable__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
+out:
+ cgrp_ls_sleepable__destroy(skel);
+}
+
+static void test_no_rcu_lock(void)
+{
+ struct cgrp_ls_sleepable *skel;
+ int err;
+
+ skel = cgrp_ls_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
+ err = cgrp_ls_sleepable__load(skel);
+ ASSERT_ERR(err, "skel_load");
+
+ cgrp_ls_sleepable__destroy(skel);
+}
+
+static void test_cgrp1_no_rcu_lock(void)
+{
+ struct cgrp_ls_sleepable *skel;
+ int err;
+
+ skel = cgrp_ls_sleepable__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ CGROUP_MODE_SET(skel);
+
+ bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
+ err = cgrp_ls_sleepable__load(skel);
+ ASSERT_OK(err, "skel_load");
+
+ cgrp_ls_sleepable__destroy(skel);
+}
+
+static void cgrp2_local_storage(void)
+{
+ __u64 cgroup_id;
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/cgrp_local_storage");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
+ return;
+
+ cgroup_mode_value_init(0, -1);
+
+ cgroup_id = get_cgroup_id("/cgrp_local_storage");
+ if (test__start_subtest("tp_btf"))
+ test_tp_btf(cgroup_fd);
+ if (test__start_subtest("attach_cgroup"))
+ test_attach_cgroup(cgroup_fd);
+ if (test__start_subtest("recursion"))
+ test_recursion(cgroup_fd);
+ if (test__start_subtest("negative"))
+ test_negative();
+ if (test__start_subtest("cgroup_iter_sleepable"))
+ test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
+ if (test__start_subtest("yes_rcu_lock"))
+ test_yes_rcu_lock(cgroup_id);
+ if (test__start_subtest("no_rcu_lock"))
+ test_no_rcu_lock();
+
+ close(cgroup_fd);
+}
+
+static void cgrp1_local_storage(void)
+{
+ int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
+
+ /* Setup cgroup1 hierarchy */
+ err = setup_classid_environment();
+ if (!ASSERT_OK(err, "setup_classid_environment"))
+ return;
+
+ err = join_classid();
+ if (!ASSERT_OK(err, "join_cgroup1"))
+ goto cleanup;
+
+ cgrp1_fd = open_classid();
+ if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
+ goto cleanup;
+
+ cgrp1_id = get_classid_cgroup_id();
+ if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
+ goto close_fd;
+
+ cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
+ if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
+ goto close_fd;
+
+ cgroup_mode_value_init(1, cgrp1_hid);
+
+ if (test__start_subtest("cgrp1_tp_btf"))
+ test_tp_btf(cgrp1_fd);
+ if (test__start_subtest("cgrp1_recursion"))
+ test_recursion(cgrp1_fd);
+ if (test__start_subtest("cgrp1_negative"))
+ test_negative();
+ if (test__start_subtest("cgrp1_iter_sleepable"))
+ test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
+ if (test__start_subtest("cgrp1_yes_rcu_lock"))
+ test_yes_rcu_lock(cgrp1_id);
+ if (test__start_subtest("cgrp1_no_rcu_lock"))
+ test_cgrp1_no_rcu_lock();
+
+close_fd:
+ close(cgrp1_fd);
+cleanup:
+ cleanup_classid_environment();
+}
+
+void test_cgrp_local_storage(void)
+{
+ cgrp2_local_storage();
+ cgrp1_local_storage();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
new file mode 100644
index 000000000000..65b4512967e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Jesper Dangaard Brouer */
+
+#include <linux/if_link.h> /* before test_progs.h, avoid bpf_util.h redefines */
+#include <test_progs.h>
+#include "test_check_mtu.skel.h"
+#include "network_helpers.h"
+
+#include <stdlib.h>
+#include <inttypes.h>
+
+#define IFINDEX_LO 1
+
+static __u32 duration; /* Hint: needed for CHECK macro */
+
+static int read_mtu_device_lo(void)
+{
+ const char *filename = "/sys/class/net/lo/mtu";
+ char buf[11] = {};
+ int value, n, fd;
+
+ fd = open(filename, 0, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ n = read(fd, buf, sizeof(buf));
+ close(fd);
+
+ if (n == -1)
+ return -2;
+
+ value = strtoimax(buf, NULL, 10);
+ if (errno == ERANGE)
+ return -3;
+
+ return value;
+}
+
+static void test_check_mtu_xdp_attach(void)
+{
+ struct bpf_link_info link_info;
+ __u32 link_info_len = sizeof(link_info);
+ struct test_check_mtu *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err = 0;
+ int fd;
+
+ skel = test_check_mtu__open_and_load();
+ if (CHECK(!skel, "open and load skel", "failed"))
+ return; /* Exit if e.g. helper unknown to kernel */
+
+ prog = skel->progs.xdp_use_helper_basic;
+
+ link = bpf_program__attach_xdp(prog, IFINDEX_LO);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto out;
+ skel->links.xdp_use_helper_basic = link;
+
+ memset(&link_info, 0, sizeof(link_info));
+ fd = bpf_link__fd(link);
+ err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
+ if (CHECK(err, "link_info", "failed: %d\n", err))
+ goto out;
+
+ CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type",
+ "got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP);
+ CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex",
+ "got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO);
+
+ err = bpf_link__detach(link);
+ CHECK(err, "link_detach", "failed %d\n", err);
+
+out:
+ test_check_mtu__destroy(skel);
+}
+
+static void test_check_mtu_run_xdp(struct test_check_mtu *skel,
+ struct bpf_program *prog,
+ __u32 mtu_expect)
+{
+ int retval_expect = XDP_PASS;
+ __u32 mtu_result = 0;
+ char buf[256] = {};
+ int err, prog_fd = bpf_program__fd(prog);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, retval_expect, "retval");
+
+ /* Extract MTU that BPF-prog got */
+ mtu_result = skel->bss->global_bpf_mtu_xdp;
+ ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user");
+}
+
+
+static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex)
+{
+ struct test_check_mtu *skel;
+ int err;
+
+ skel = test_check_mtu__open();
+ if (CHECK(!skel, "skel_open", "failed"))
+ return;
+
+ /* Update "constants" in BPF-prog *BEFORE* libbpf load */
+ skel->rodata->GLOBAL_USER_MTU = mtu;
+ skel->rodata->GLOBAL_USER_IFINDEX = ifindex;
+
+ err = test_check_mtu__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu);
+ test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu);
+ test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu);
+ test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu);
+ test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu);
+
+cleanup:
+ test_check_mtu__destroy(skel);
+}
+
+static void test_check_mtu_run_tc(struct test_check_mtu *skel,
+ struct bpf_program *prog,
+ __u32 mtu_expect)
+{
+ int retval_expect = BPF_OK;
+ __u32 mtu_result = 0;
+ char buf[256] = {};
+ int err, prog_fd = bpf_program__fd(prog);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, retval_expect, "retval");
+
+ /* Extract MTU that BPF-prog got */
+ mtu_result = skel->bss->global_bpf_mtu_tc;
+ ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user");
+}
+
+static void test_chk_segs_flag(struct test_check_mtu *skel, __u32 mtu)
+{
+ int err, prog_fd = bpf_program__fd(skel->progs.tc_chk_segs_flag);
+ struct __sk_buff skb = {
+ .gso_size = 10,
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ /* Lower the mtu to test the BPF_MTU_CHK_SEGS */
+ SYS_NOFAIL("ip link set dev lo mtu 10");
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ SYS_NOFAIL("ip link set dev lo mtu %u", mtu);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, BPF_OK, "retval");
+}
+
+static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
+{
+ struct test_check_mtu *skel;
+ int err;
+
+ skel = test_check_mtu__open();
+ if (CHECK(!skel, "skel_open", "failed"))
+ return;
+
+ /* Update "constants" in BPF-prog *BEFORE* libbpf load */
+ skel->rodata->GLOBAL_USER_MTU = mtu;
+ skel->rodata->GLOBAL_USER_IFINDEX = ifindex;
+
+ err = test_check_mtu__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu);
+ test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu);
+ test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu);
+ test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
+ test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
+ test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
+ test_chk_segs_flag(skel, mtu);
+cleanup:
+ test_check_mtu__destroy(skel);
+}
+
+void test_ns_check_mtu(void)
+{
+ int mtu_lo;
+
+ if (test__start_subtest("bpf_check_mtu XDP-attach"))
+ test_check_mtu_xdp_attach();
+
+ mtu_lo = read_mtu_device_lo();
+ if (CHECK(mtu_lo < 0, "reading MTU value", "failed (err:%d)", mtu_lo))
+ return;
+
+ if (test__start_subtest("bpf_check_mtu XDP-run"))
+ test_check_mtu_xdp(mtu_lo, 0);
+
+ if (test__start_subtest("bpf_check_mtu XDP-run ifindex-lookup"))
+ test_check_mtu_xdp(mtu_lo, IFINDEX_LO);
+
+ if (test__start_subtest("bpf_check_mtu TC-run"))
+ test_check_mtu_tc(mtu_lo, 0);
+
+ if (test__start_subtest("bpf_check_mtu TC-run ifindex-lookup"))
+ test_check_mtu_tc(mtu_lo, IFINDEX_LO);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
index 9781d85cb223..7488a7606e6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
@@ -7,11 +7,14 @@
#include <string.h>
#include <linux/pkt_cls.h>
+#include <netinet/tcp.h>
#include <test_progs.h>
+#include "network_helpers.h"
#include "progs/test_cls_redirect.h"
#include "test_cls_redirect.skel.h"
+#include "test_cls_redirect_dynptr.skel.h"
#include "test_cls_redirect_subprogs.skel.h"
#define ENCAP_IP INADDR_LOOPBACK
@@ -19,112 +22,37 @@
static int duration = 0;
-struct addr_port {
- in_port_t port;
- union {
- struct in_addr in_addr;
- struct in6_addr in6_addr;
- };
-};
-
-struct tuple {
- int family;
- struct addr_port src;
- struct addr_port dst;
-};
-
-static int start_server(const struct sockaddr *addr, socklen_t len, int type)
-{
- int fd = socket(addr->sa_family, type, 0);
- if (CHECK_FAIL(fd == -1))
- return -1;
- if (CHECK_FAIL(bind(fd, addr, len) == -1))
- goto err;
- if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
- goto err;
-
- return fd;
-
-err:
- close(fd);
- return -1;
-}
-
-static int connect_to_server(const struct sockaddr *addr, socklen_t len,
- int type)
-{
- int fd = socket(addr->sa_family, type, 0);
- if (CHECK_FAIL(fd == -1))
- return -1;
- if (CHECK_FAIL(connect(fd, addr, len)))
- goto err;
-
- return fd;
-
-err:
- close(fd);
- return -1;
-}
-
-static bool fill_addr_port(const struct sockaddr *sa, struct addr_port *ap)
-{
- const struct sockaddr_in6 *in6;
- const struct sockaddr_in *in;
-
- switch (sa->sa_family) {
- case AF_INET:
- in = (const struct sockaddr_in *)sa;
- ap->in_addr = in->sin_addr;
- ap->port = in->sin_port;
- return true;
-
- case AF_INET6:
- in6 = (const struct sockaddr_in6 *)sa;
- ap->in6_addr = in6->sin6_addr;
- ap->port = in6->sin6_port;
- return true;
-
- default:
- return false;
- }
-}
-static bool set_up_conn(const struct sockaddr *addr, socklen_t len, int type,
- int *server, int *conn, struct tuple *tuple)
+static bool set_up_conn(const struct sockaddr_storage *addr, socklen_t len, int type,
+ int *server, int *conn,
+ struct sockaddr_storage *src,
+ struct sockaddr_storage *dst)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
- struct sockaddr *sa = (struct sockaddr *)&ss;
- *server = start_server(addr, len, type);
+ *server = start_server_addr(type, addr, len, NULL);
if (*server < 0)
return false;
- if (CHECK_FAIL(getsockname(*server, sa, &slen)))
+ if (CHECK_FAIL(getsockname(*server, (struct sockaddr *)&ss, &slen)))
goto close_server;
- *conn = connect_to_server(sa, slen, type);
+ *conn = connect_to_addr(type, &ss, slen, NULL);
if (*conn < 0)
goto close_server;
/* We want to simulate packets arriving at conn, so we have to
* swap src and dst.
*/
- slen = sizeof(ss);
- if (CHECK_FAIL(getsockname(*conn, sa, &slen)))
+ slen = sizeof(*dst);
+ if (CHECK_FAIL(getsockname(*conn, (struct sockaddr *)dst, &slen)))
goto close_conn;
- if (CHECK_FAIL(!fill_addr_port(sa, &tuple->dst)))
+ slen = sizeof(*src);
+ if (CHECK_FAIL(getpeername(*conn, (struct sockaddr *)src, &slen)))
goto close_conn;
- slen = sizeof(ss);
- if (CHECK_FAIL(getpeername(*conn, sa, &slen)))
- goto close_conn;
-
- if (CHECK_FAIL(!fill_addr_port(sa, &tuple->src)))
- goto close_conn;
-
- tuple->family = ss.ss_family;
return true;
close_conn:
@@ -140,17 +68,16 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
+ memset(addr, 0, sizeof(*addr));
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
- memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
return sizeof(*addr4);
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
- memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_addr = in6addr_loopback;
return sizeof(*addr6);
@@ -160,7 +87,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
}
}
-static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr)
+static bool was_decapsulated(struct bpf_test_run_opts *tattr)
{
return tattr->data_size_out < tattr->data_size_in;
}
@@ -272,9 +199,15 @@ static void encap_init(encap_headers_t *encap, uint8_t hop_count, uint8_t proto)
}
static size_t build_input(const struct test_cfg *test, void *const buf,
- const struct tuple *tuple)
+ const struct sockaddr_storage *src,
+ const struct sockaddr_storage *dst)
{
- in_port_t sport = tuple->src.port;
+ struct sockaddr_in6 *src_in6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *dst_in6 = (struct sockaddr_in6 *)dst;
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src;
+ struct sockaddr_in *dst_in = (struct sockaddr_in *)dst;
+ sa_family_t family = src->ss_family;
+ in_port_t sport, dport;
encap_headers_t encap;
struct iphdr ip;
struct ipv6hdr ipv6;
@@ -284,8 +217,11 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
uint8_t *p = buf;
int proto;
+ sport = (family == AF_INET) ? src_in->sin_port : src_in6->sin6_port;
+ dport = (family == AF_INET) ? dst_in->sin_port : dst_in6->sin6_port;
+
proto = IPPROTO_IPIP;
- if (tuple->family == AF_INET6)
+ if (family == AF_INET6)
proto = IPPROTO_IPV6;
encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto);
@@ -300,15 +236,15 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
if (test->type == UDP)
proto = IPPROTO_UDP;
- switch (tuple->family) {
+ switch (family) {
case AF_INET:
ip = (struct iphdr){
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = proto,
- .saddr = tuple->src.in_addr.s_addr,
- .daddr = tuple->dst.in_addr.s_addr,
+ .saddr = src_in->sin_addr.s_addr,
+ .daddr = dst_in->sin_addr.s_addr,
};
p = mempcpy(p, &ip, sizeof(ip));
break;
@@ -317,8 +253,8 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
.version = 6,
.hop_limit = IPDEFTTL,
.nexthdr = proto,
- .saddr = tuple->src.in6_addr,
- .daddr = tuple->dst.in6_addr,
+ .saddr = src_in6->sin6_addr,
+ .daddr = dst_in6->sin6_addr,
};
p = mempcpy(p, &ipv6, sizeof(ipv6));
break;
@@ -333,18 +269,16 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
case TCP:
tcp = (struct tcphdr){
.source = sport,
- .dest = tuple->dst.port,
+ .dest = dport,
+ .syn = (test->flags == SYN),
+ .ack = (test->flags == ACK),
};
- if (test->flags == SYN)
- tcp.syn = true;
- if (test->flags == ACK)
- tcp.ack = true;
p = mempcpy(p, &tcp, sizeof(tcp));
break;
case UDP:
udp = (struct udphdr){
.source = sport,
- .dest = tuple->dst.port,
+ .dest = dport,
};
p = mempcpy(p, &udp, sizeof(udp));
break;
@@ -366,43 +300,43 @@ static void close_fds(int *fds, int n)
static void test_cls_redirect_common(struct bpf_program *prog)
{
- struct bpf_prog_test_run_attr tattr = {};
+ LIBBPF_OPTS(bpf_test_run_opts, tattr);
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
- struct sockaddr *addr;
socklen_t slen;
- int i, j, err;
+ int i, j, err, prog_fd;
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
- struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
+ struct sockaddr_storage srcs[__NR_KIND][ARRAY_SIZE(families)];
+ struct sockaddr_storage dsts[__NR_KIND][ARRAY_SIZE(families)];
- addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
if (CHECK_FAIL(!slen))
goto cleanup;
- if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_DGRAM,
+ if (CHECK_FAIL(!set_up_conn(&ss, slen, SOCK_DGRAM,
&servers[UDP][i], &conns[UDP][i],
- &tuples[UDP][i])))
+ &srcs[UDP][i], &dsts[UDP][i])))
goto cleanup;
- if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_STREAM,
+ if (CHECK_FAIL(!set_up_conn(&ss, slen, SOCK_STREAM,
&servers[TCP][i], &conns[TCP][i],
- &tuples[TCP][i])))
+ &srcs[TCP][i], &dsts[TCP][i])))
goto cleanup;
}
- tattr.prog_fd = bpf_program__fd(prog);
+ prog_fd = bpf_program__fd(prog);
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i];
for (j = 0; j < ARRAY_SIZE(families); j++) {
- struct tuple *tuple = &tuples[test->type][j];
+ struct sockaddr_storage *src = &srcs[test->type][j];
+ struct sockaddr_storage *dst = &dsts[test->type][j];
char input[256];
char tmp[256];
- test_str(tmp, sizeof(tmp), test, tuple->family);
+ test_str(tmp, sizeof(tmp), test, families[j]);
if (!test__start_subtest(tmp))
continue;
@@ -410,11 +344,11 @@ static void test_cls_redirect_common(struct bpf_program *prog)
tattr.data_size_out = sizeof(tmp);
tattr.data_in = input;
- tattr.data_size_in = build_input(test, input, tuple);
+ tattr.data_size_in = build_input(test, input, src, dst);
if (CHECK_FAIL(!tattr.data_size_in))
continue;
- err = bpf_prog_test_run_xattr(&tattr);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
if (CHECK_FAIL(err))
continue;
@@ -445,6 +379,28 @@ cleanup:
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
}
+static void test_cls_redirect_dynptr(void)
+{
+ struct test_cls_redirect_dynptr *skel;
+ int err;
+
+ skel = test_cls_redirect_dynptr__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
+ skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
+
+ err = test_cls_redirect_dynptr__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ test_cls_redirect_common(skel->progs.cls_redirect);
+
+cleanup:
+ test_cls_redirect_dynptr__destroy(skel);
+}
+
static void test_cls_redirect_inlined(void)
{
struct test_cls_redirect *skel;
@@ -495,4 +451,6 @@ void test_cls_redirect(void)
test_cls_redirect_inlined();
if (test__start_subtest("cls_redirect_subprogs"))
test_cls_redirect_subprogs();
+ if (test__start_subtest("cls_redirect_dynptr"))
+ test_cls_redirect_dynptr();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c b/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c
new file mode 100644
index 000000000000..285f20241fe1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/compute_live_registers.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "compute_live_registers.skel.h"
+#include "test_progs.h"
+
+void test_compute_live_registers(void)
+{
+ RUN_TESTS(compute_live_registers);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
index 9229db2f5ca5..24d553109f8d 100644
--- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
+++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c
@@ -51,24 +51,25 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
bool v4 = family == AF_INET;
__u16 expected_local_port = v4 ? 22222 : 22223;
__u16 expected_peer_port = 60000;
- struct bpf_prog_load_attr attr = {
- .file = v4 ? "./connect_force_port4.o" :
- "./connect_force_port6.o",
- };
struct bpf_program *prog;
struct bpf_object *obj;
- int xlate_fd, fd, err;
+ const char *obj_file = v4 ? "connect_force_port4.bpf.o" : "connect_force_port6.bpf.o";
+ int fd, err;
__u32 duration = 0;
- err = bpf_prog_load_xattr(&attr, &obj, &xlate_fd);
- if (err) {
- log_err("Failed to load BPF object");
+ obj = bpf_object__open_file(obj_file, NULL);
+ if (!ASSERT_OK_PTR(obj, "bpf_obj_open"))
return -1;
+
+ err = bpf_object__load(obj);
+ if (!ASSERT_OK(err, "bpf_obj_load")) {
+ err = -EIO;
+ goto close_bpf_object;
}
- prog = bpf_object__find_program_by_title(obj, v4 ?
- "cgroup/connect4" :
- "cgroup/connect6");
+ prog = bpf_object__find_program_by_name(obj, v4 ?
+ "connect4" :
+ "connect6");
if (CHECK(!prog, "find_prog", "connect prog not found\n")) {
err = -EIO;
goto close_bpf_object;
@@ -82,9 +83,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
goto close_bpf_object;
}
- prog = bpf_object__find_program_by_title(obj, v4 ?
- "cgroup/getpeername4" :
- "cgroup/getpeername6");
+ prog = bpf_object__find_program_by_name(obj, v4 ?
+ "getpeername4" :
+ "getpeername6");
if (CHECK(!prog, "find_prog", "getpeername prog not found\n")) {
err = -EIO;
goto close_bpf_object;
@@ -98,9 +99,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
goto close_bpf_object;
}
- prog = bpf_object__find_program_by_title(obj, v4 ?
- "cgroup/getsockname4" :
- "cgroup/getsockname6");
+ prog = bpf_object__find_program_by_name(obj, v4 ?
+ "getsockname4" :
+ "getsockname6");
if (CHECK(!prog, "find_prog", "getsockname prog not found\n")) {
err = -EIO;
goto close_bpf_object;
diff --git a/tools/testing/selftests/bpf/prog_tests/connect_ping.c b/tools/testing/selftests/bpf/prog_tests/connect_ping.c
new file mode 100644
index 000000000000..40fe571f2fe7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/connect_ping.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2022 Google LLC.
+ */
+
+#define _GNU_SOURCE
+#include <sys/mount.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+#include "connect_ping.skel.h"
+
+/* 2001:db8::1 */
+#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+static const struct in6_addr bindaddr_v6 = BINDADDR_V6;
+
+static void subtest(int cgroup_fd, struct connect_ping *skel,
+ int family, int do_bind)
+{
+ struct sockaddr_in sa4 = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
+ };
+ struct sockaddr_in6 sa6 = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,
+ };
+ struct sockaddr *sa = NULL;
+ socklen_t sa_len;
+ int protocol = -1;
+ int sock_fd;
+
+ switch (family) {
+ case AF_INET:
+ sa = (struct sockaddr *)&sa4;
+ sa_len = sizeof(sa4);
+ protocol = IPPROTO_ICMP;
+ break;
+ case AF_INET6:
+ sa = (struct sockaddr *)&sa6;
+ sa_len = sizeof(sa6);
+ protocol = IPPROTO_ICMPV6;
+ break;
+ }
+
+ memset(skel->bss, 0, sizeof(*skel->bss));
+ skel->bss->do_bind = do_bind;
+
+ sock_fd = socket(family, SOCK_DGRAM, protocol);
+ if (!ASSERT_GE(sock_fd, 0, "sock-create"))
+ return;
+
+ if (!ASSERT_OK(connect(sock_fd, sa, sa_len), "connect"))
+ goto close_sock;
+
+ if (!ASSERT_EQ(skel->bss->invocations_v4, family == AF_INET ? 1 : 0,
+ "invocations_v4"))
+ goto close_sock;
+ if (!ASSERT_EQ(skel->bss->invocations_v6, family == AF_INET6 ? 1 : 0,
+ "invocations_v6"))
+ goto close_sock;
+ if (!ASSERT_EQ(skel->bss->has_error, 0, "has_error"))
+ goto close_sock;
+
+ if (!ASSERT_OK(getsockname(sock_fd, sa, &sa_len),
+ "getsockname"))
+ goto close_sock;
+
+ switch (family) {
+ case AF_INET:
+ if (!ASSERT_EQ(sa4.sin_family, family, "sin_family"))
+ goto close_sock;
+ if (!ASSERT_EQ(sa4.sin_addr.s_addr,
+ htonl(do_bind ? 0x01010101 : INADDR_LOOPBACK),
+ "sin_addr"))
+ goto close_sock;
+ break;
+ case AF_INET6:
+ if (!ASSERT_EQ(sa6.sin6_family, AF_INET6, "sin6_family"))
+ goto close_sock;
+ if (!ASSERT_EQ(memcmp(&sa6.sin6_addr,
+ do_bind ? &bindaddr_v6 : &in6addr_loopback,
+ sizeof(sa6.sin6_addr)),
+ 0, "sin6_addr"))
+ goto close_sock;
+ break;
+ }
+
+close_sock:
+ close(sock_fd);
+}
+
+void test_connect_ping(void)
+{
+ struct connect_ping *skel;
+ int cgroup_fd;
+
+ if (!ASSERT_OK(unshare(CLONE_NEWNET | CLONE_NEWNS), "unshare"))
+ return;
+
+ /* overmount sysfs, and making original sysfs private so overmount
+ * does not propagate to other mntns.
+ */
+ if (!ASSERT_OK(mount("none", "/sys", NULL, MS_PRIVATE, NULL),
+ "remount-private-sys"))
+ return;
+ if (!ASSERT_OK(mount("sysfs", "/sys", "sysfs", 0, NULL),
+ "mount-sys"))
+ return;
+ if (!ASSERT_OK(mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL),
+ "mount-bpf"))
+ goto clean_mount;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "lo-up"))
+ goto clean_mount;
+ if (!ASSERT_OK(system("ip addr add 1.1.1.1 dev lo"), "lo-addr-v4"))
+ goto clean_mount;
+ if (!ASSERT_OK(system("ip -6 addr add 2001:db8::1 dev lo"), "lo-addr-v6"))
+ goto clean_mount;
+ if (write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0"))
+ goto clean_mount;
+
+ cgroup_fd = test__join_cgroup("/connect_ping");
+ if (!ASSERT_GE(cgroup_fd, 0, "cg-create"))
+ goto clean_mount;
+
+ skel = connect_ping__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel-load"))
+ goto close_cgroup;
+ skel->links.connect_v4_prog =
+ bpf_program__attach_cgroup(skel->progs.connect_v4_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.connect_v4_prog, "cg-attach-v4"))
+ goto skel_destroy;
+ skel->links.connect_v6_prog =
+ bpf_program__attach_cgroup(skel->progs.connect_v6_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.connect_v6_prog, "cg-attach-v6"))
+ goto skel_destroy;
+
+ /* Connect a v4 ping socket to localhost, assert that only v4 is called,
+ * and called exactly once, and that the socket's bound address is
+ * original loopback address.
+ */
+ if (test__start_subtest("ipv4"))
+ subtest(cgroup_fd, skel, AF_INET, 0);
+
+ /* Connect a v4 ping socket to localhost, assert that only v4 is called,
+ * and called exactly once, and that the socket's bound address is
+ * address we explicitly bound.
+ */
+ if (test__start_subtest("ipv4-bind"))
+ subtest(cgroup_fd, skel, AF_INET, 1);
+
+ /* Connect a v6 ping socket to localhost, assert that only v6 is called,
+ * and called exactly once, and that the socket's bound address is
+ * original loopback address.
+ */
+ if (test__start_subtest("ipv6"))
+ subtest(cgroup_fd, skel, AF_INET6, 0);
+
+ /* Connect a v6 ping socket to localhost, assert that only v6 is called,
+ * and called exactly once, and that the socket's bound address is
+ * address we explicitly bound.
+ */
+ if (test__start_subtest("ipv6-bind"))
+ subtest(cgroup_fd, skel, AF_INET6, 1);
+
+skel_destroy:
+ connect_ping__destroy(skel);
+
+close_cgroup:
+ close(cgroup_fd);
+
+clean_mount:
+ umount2("/sys", MNT_DETACH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
index 981c251453d9..f2ce4fd1cdae 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
@@ -53,8 +53,8 @@ void test_core_autosize(void)
char btf_file[] = "/tmp/core_autosize.btf.XXXXXX";
int err, fd = -1, zero = 0;
int char_id, short_id, int_id, long_long_id, void_ptr_id, id;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
struct test_core_autosize* skel = NULL;
- struct bpf_object_load_attr load_attr = {};
struct bpf_program *prog;
struct bpf_map *bss_map;
struct btf *btf = NULL;
@@ -112,7 +112,7 @@ void test_core_autosize(void)
if (!ASSERT_OK_PTR(f, "btf_fdopen"))
goto cleanup;
- raw_data = btf__get_raw_data(btf, &raw_sz);
+ raw_data = btf__raw_data(btf, &raw_sz);
if (!ASSERT_OK_PTR(raw_data, "raw_data"))
goto cleanup;
written = fwrite(raw_data, 1, raw_sz, f);
@@ -125,9 +125,10 @@ void test_core_autosize(void)
fd = -1;
/* open and load BPF program with custom BTF as the kernel BTF */
- skel = test_core_autosize__open();
+ open_opts.btf_custom_path = btf_file;
+ skel = test_core_autosize__open_opts(&open_opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
+ goto cleanup;
/* disable handle_signed() for now */
prog = bpf_object__find_program_by_name(skel->obj, "handle_signed");
@@ -135,9 +136,7 @@ void test_core_autosize(void)
goto cleanup;
bpf_program__set_autoload(prog, false);
- load_attr.obj = skel->obj;
- load_attr.target_btf_path = btf_file;
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(skel->obj);
if (!ASSERT_OK(err, "prog_load"))
goto cleanup;
@@ -164,11 +163,11 @@ void test_core_autosize(void)
usleep(1);
- bss_map = bpf_object__find_map_by_name(skel->obj, "test_cor.bss");
+ bss_map = bpf_object__find_map_by_name(skel->obj, ".bss");
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
goto cleanup;
- err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
+ err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
if (!ASSERT_OK(err, "bss_lookup"))
goto cleanup;
@@ -204,14 +203,13 @@ void test_core_autosize(void)
skel = NULL;
/* now re-load with handle_signed() enabled, it should fail loading */
- skel = test_core_autosize__open();
+ open_opts.btf_custom_path = btf_file;
+ skel = test_core_autosize__open_opts(&open_opts);
if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
+ goto cleanup;
- load_attr.obj = skel->obj;
- load_attr.target_btf_path = btf_file;
- err = bpf_object__load_xattr(&load_attr);
- if (!ASSERT_ERR(err, "bad_prog_load"))
+ err = test_core_autosize__load(skel);
+ if (!ASSERT_ERR(err, "skel_load"))
goto cleanup;
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c
index 1931a158510e..63a51e9f3630 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_extern.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c
@@ -39,6 +39,7 @@ static struct test_case {
"CONFIG_STR=\"abracad\"\n"
"CONFIG_MISSING=0",
.data = {
+ .unkn_virt_val = 0,
.bpf_syscall = false,
.tristate_val = TRI_MODULE,
.bool_val = true,
@@ -121,7 +122,7 @@ static struct test_case {
void test_core_extern(void)
{
const uint32_t kern_ver = get_kernel_version();
- int err, duration = 0, i, j;
+ int err, i, j;
struct test_core_extern *skel = NULL;
uint64_t *got, *exp;
int n = sizeof(*skel->data) / sizeof(uint64_t);
@@ -136,19 +137,17 @@ void test_core_extern(void)
continue;
skel = test_core_extern__open_opts(&opts);
- if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_core_extern__load(skel);
if (t->fails) {
- CHECK(!err, "skel_load",
- "shouldn't succeed open/load of skeleton\n");
+ ASSERT_ERR(err, "skel_load_should_fail");
goto cleanup;
- } else if (CHECK(err, "skel_load",
- "failed to open/load skeleton\n")) {
+ } else if (!ASSERT_OK(err, "skel_load")) {
goto cleanup;
}
err = test_core_extern__attach(skel);
- if (CHECK(err, "attach_raw_tp", "failed attach: %d\n", err))
+ if (!ASSERT_OK(err, "attach_raw_tp"))
goto cleanup;
usleep(1);
@@ -158,9 +157,7 @@ void test_core_extern(void)
got = (uint64_t *)skel->data;
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
- CHECK(got[j] != exp[j], "check_res",
- "result #%d: expected %llx, but got %llx\n",
- j, (__u64)exp[j], (__u64)got[j]);
+ ASSERT_EQ(got[j], exp[j], "result");
}
cleanup:
test_core_extern__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c
new file mode 100644
index 000000000000..6a5a1c019a5d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "test_progs.h"
+#include "core_kern.lskel.h"
+
+void test_core_kern_lskel(void)
+{
+ struct core_kern_lskel *skel;
+ int link_fd;
+
+ skel = core_kern_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ link_fd = core_kern_lskel__core_relo_proto__attach(skel);
+ if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)"))
+ goto cleanup;
+
+ /* trigger tracepoints */
+ usleep(1);
+ ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists");
+ ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists");
+ ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested");
+
+cleanup:
+ core_kern_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c
new file mode 100644
index 000000000000..04cc145bc26a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "test_progs.h"
+#include "core_kern_overflow.lskel.h"
+
+void test_core_kern_overflow_lskel(void)
+{
+ struct core_kern_overflow_lskel *skel;
+
+ skel = core_kern_overflow_lskel__open_and_load();
+ if (!ASSERT_NULL(skel, "open_and_load"))
+ core_kern_overflow_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_read_macros.c b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c
new file mode 100644
index 000000000000..96f5cf3c6fa2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *head);
+};
+
+/* ___shuffled flavor is just an illusion for BPF code, it doesn't really
+ * exist and user-space needs to provide data in the memory layout that
+ * matches callback_head. We just defined ___shuffled flavor to make it easier
+ * to work with the skeleton
+ */
+struct callback_head___shuffled {
+ struct callback_head___shuffled *next;
+ void (*func)(struct callback_head *head);
+};
+
+#include "test_core_read_macros.skel.h"
+
+void test_core_read_macros(void)
+{
+ int duration = 0, err;
+ struct test_core_read_macros* skel;
+ struct test_core_read_macros__bss *bss;
+ struct callback_head u_probe_in;
+ struct callback_head___shuffled u_core_in;
+
+ skel = test_core_read_macros__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ /* next pointers have to be set from the kernel side */
+ bss->k_probe_in.func = (void *)(long)0x1234;
+ bss->k_core_in.func = (void *)(long)0xabcd;
+
+ u_probe_in.next = &u_probe_in;
+ u_probe_in.func = (void *)(long)0x5678;
+ bss->u_probe_in = &u_probe_in;
+
+ u_core_in.next = &u_core_in;
+ u_core_in.func = (void *)(long)0xdbca;
+ bss->u_core_in = &u_core_in;
+
+ err = test_core_read_macros__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ ASSERT_EQ(bss->k_probe_out, 0x1234, "k_probe_out");
+ ASSERT_EQ(bss->k_core_out, 0xabcd, "k_core_out");
+
+ ASSERT_EQ(bss->u_probe_out, 0x5678, "u_probe_out");
+ ASSERT_EQ(bss->u_core_out, 0xdbca, "u_core_out");
+
+cleanup:
+ test_core_read_macros__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 06eb956ff7bb..08963c82f30b 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include "progs/core_reloc_types.h"
-#include "bpf_testmod/bpf_testmod.h"
+#include "test_kmods/bpf_testmod.h"
+#include <linux/limits.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <bpf/btf.h>
@@ -10,9 +12,9 @@ static int duration = 0;
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
-#define MODULES_CASE(name, sec_name, tp_name) { \
+#define MODULES_CASE(name, pg_name, tp_name) { \
.case_name = name, \
- .bpf_obj_file = "test_core_reloc_module.o", \
+ .bpf_obj_file = "test_core_reloc_module.bpf.o", \
.btf_src_file = NULL, /* find in kernel module BTFs */ \
.input = "", \
.input_len = 0, \
@@ -28,9 +30,9 @@ static int duration = 0;
.comm_len = sizeof("test_progs"), \
}, \
.output_len = sizeof(struct core_reloc_module_output), \
- .prog_sec_name = sec_name, \
+ .prog_name = pg_name, \
.raw_tp_name = tp_name, \
- .trigger = trigger_module_test_read, \
+ .trigger = __trigger_module_test_read, \
.needs_testmod = true, \
}
@@ -42,8 +44,10 @@ static int duration = 0;
#define FLAVORS_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_flavors.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o" \
+ .bpf_obj_file = "test_core_reloc_flavors.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_flavors" \
#define FLAVORS_CASE(name) { \
FLAVORS_CASE_COMMON(name), \
@@ -65,8 +69,10 @@ static int duration = 0;
#define NESTING_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_nesting.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o"
+ .bpf_obj_file = "test_core_reloc_nesting.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_nesting" \
#define NESTING_CASE(name) { \
NESTING_CASE_COMMON(name), \
@@ -79,10 +85,11 @@ static int duration = 0;
#define NESTING_ERR_CASE(name) { \
NESTING_CASE_COMMON(name), \
.fails = true, \
+ .run_btfgen_fails = true, \
}
#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
- .a = { [2] = 1 }, \
+ .a = { [2] = 1, [3] = 11 }, \
.b = { [1] = { [2] = { [3] = 2 } } }, \
.c = { [1] = { .c = 3 } }, \
.d = { [0] = { [0] = { .d = 4 } } }, \
@@ -90,8 +97,10 @@ static int duration = 0;
#define ARRAYS_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_arrays.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o"
+ .bpf_obj_file = "test_core_reloc_arrays.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_arrays" \
#define ARRAYS_CASE(name) { \
ARRAYS_CASE_COMMON(name), \
@@ -99,6 +108,7 @@ static int duration = 0;
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \
.a2 = 1, \
+ .a3 = 12, \
.b123 = 2, \
.c1c = 3, \
.d00d = 4, \
@@ -122,8 +132,10 @@ static int duration = 0;
#define PRIMITIVES_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_primitives.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o"
+ .bpf_obj_file = "test_core_reloc_primitives.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_primitives" \
#define PRIMITIVES_CASE(name) { \
PRIMITIVES_CASE_COMMON(name), \
@@ -140,8 +152,8 @@ static int duration = 0;
#define MODS_CASE(name) { \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_mods.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .bpf_obj_file = "test_core_reloc_mods.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \
.a = 1, \
.b = 2, \
@@ -158,12 +170,14 @@ static int duration = 0;
.e = 5, .f = 6, .g = 7, .h = 8, \
}, \
.output_len = sizeof(struct core_reloc_mods_output), \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_mods", \
}
#define PTR_AS_ARR_CASE(name) { \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_ptr_as_arr.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .bpf_obj_file = "test_core_reloc_ptr_as_arr.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
.input = (const char *)&(struct core_reloc_##name []){ \
{ .a = 1 }, \
{ .a = 2 }, \
@@ -174,6 +188,8 @@ static int duration = 0;
.a = 3, \
}, \
.output_len = sizeof(struct core_reloc_ptr_as_arr), \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_ptr_as_arr", \
}
#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
@@ -189,8 +205,10 @@ static int duration = 0;
#define INTS_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_ints.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o"
+ .bpf_obj_file = "test_core_reloc_ints.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_ints"
#define INTS_CASE(name) { \
INTS_CASE_COMMON(name), \
@@ -207,65 +225,78 @@ static int duration = 0;
#define FIELD_EXISTS_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_existence.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o" \
-
-#define FIELD_EXISTS_ERR_CASE(name) { \
- FIELD_EXISTS_CASE_COMMON(name), \
- .fails = true, \
-}
+ .bpf_obj_file = "test_core_reloc_existence.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_existence"
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
.case_name = test_name_prefix#name, \
.bpf_obj_file = objfile, \
- .btf_src_file = "btf__core_reloc_" #name ".o"
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o"
#define BITFIELDS_CASE(name, ...) { \
- BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
- "direct:", name), \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \
+ "probed:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_bitfields", \
}, { \
- BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
- "probed:", name), \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \
+ "direct:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
- .prog_sec_name = "tp_btf/sys_enter", \
+ .prog_name = "test_core_bitfields_direct", \
}
#define BITFIELDS_ERR_CASE(name) { \
- BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \
"probed:", name), \
.fails = true, \
+ .run_btfgen_fails = true, \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_bitfields", \
}, { \
- BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
+ BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \
"direct:", name), \
- .prog_sec_name = "tp_btf/sys_enter", \
.fails = true, \
+ .run_btfgen_fails = true, \
+ .prog_name = "test_core_bitfields_direct", \
}
#define SIZE_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_size.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o", \
- .relaxed_core_relocs = true
+ .bpf_obj_file = "test_core_reloc_size.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_size"
#define SIZE_OUTPUT_DATA(type) \
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
.int_sz = sizeof(((type *)0)->int_field), \
+ .int_off = offsetof(type, int_field), \
.struct_sz = sizeof(((type *)0)->struct_field), \
+ .struct_off = offsetof(type, struct_field), \
.union_sz = sizeof(((type *)0)->union_field), \
+ .union_off = offsetof(type, union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
- .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
+ .arr_off = offsetof(type, arr_field), \
+ .arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \
+ .arr_elem_off = offsetof(type, arr_field[1]), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
+ .ptr_off = offsetof(type, ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
+ .enum_off = offsetof(type, enum_field), \
+ .float_sz = sizeof(((type *)0)->float_field), \
+ .float_off = offsetof(type, float_field), \
}
#define SIZE_CASE(name) { \
@@ -278,12 +309,15 @@ static int duration = 0;
#define SIZE_ERR_CASE(name) { \
SIZE_CASE_COMMON(name), \
.fails = true, \
+ .run_btfgen_fails = true, \
}
#define TYPE_BASED_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_type_based.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o" \
+ .bpf_obj_file = "test_core_reloc_type_based.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_type_based"
#define TYPE_BASED_CASE(name, ...) { \
TYPE_BASED_CASE_COMMON(name), \
@@ -299,8 +333,10 @@ static int duration = 0;
#define TYPE_ID_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_type_id.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o" \
+ .bpf_obj_file = "test_core_reloc_type_id.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_type_id"
#define TYPE_ID_CASE(name, setup_fn) { \
TYPE_ID_CASE_COMMON(name), \
@@ -316,8 +352,10 @@ static int duration = 0;
#define ENUMVAL_CASE_COMMON(name) \
.case_name = #name, \
- .bpf_obj_file = "test_core_reloc_enumval.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o" \
+ .bpf_obj_file = "test_core_reloc_enumval.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_enumval"
#define ENUMVAL_CASE(name, ...) { \
ENUMVAL_CASE_COMMON(name), \
@@ -331,6 +369,25 @@ static int duration = 0;
.fails = true, \
}
+#define ENUM64VAL_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_enum64val.bpf.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_enum64val"
+
+#define ENUM64VAL_CASE(name, ...) { \
+ ENUM64VAL_CASE_COMMON(name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_enum64val_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_enum64val_output), \
+}
+
+#define ENUM64VAL_ERR_CASE(name) { \
+ ENUM64VAL_CASE_COMMON(name), \
+ .fails = true, \
+}
+
struct core_reloc_test_case;
typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
@@ -345,9 +402,10 @@ struct core_reloc_test_case {
const char *output;
int output_len;
bool fails;
+ bool run_btfgen_fails;
bool needs_testmod;
bool relaxed_core_relocs;
- const char *prog_sec_name;
+ const char *prog_name;
const char *raw_tp_name;
setup_test_fn setup;
trigger_test_fn trigger;
@@ -373,8 +431,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
const char *name;
int i;
- if (CHECK(IS_ERR(local_btf), "local_btf", "failed: %ld\n", PTR_ERR(local_btf)) ||
- CHECK(IS_ERR(targ_btf), "targ_btf", "failed: %ld\n", PTR_ERR(targ_btf))) {
+ if (!ASSERT_OK_PTR(local_btf, "local_btf") || !ASSERT_OK_PTR(targ_btf, "targ_btf")) {
btf__free(local_btf);
btf__free(targ_btf);
return -EINVAL;
@@ -387,7 +444,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
exp->local_anon_void_ptr = -1;
exp->local_anon_arr = -1;
- for (i = 1; i <= btf__get_nr_types(local_btf); i++)
+ for (i = 1; i < btf__type_cnt(local_btf); i++)
{
t = btf__type_by_id(local_btf, i);
/* we are interested only in anonymous types */
@@ -439,7 +496,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
static int setup_type_id_case_success(struct core_reloc_test_case *test) {
struct core_reloc_type_id_output *exp = (void *)test->output;
- struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
+ struct btf *targ_btf;
int err;
err = setup_type_id_case_local(test);
@@ -480,28 +537,19 @@ static int setup_type_id_case_failure(struct core_reloc_test_case *test)
return 0;
}
-static int trigger_module_test_read(const struct core_reloc_test_case *test)
+static int __trigger_module_test_read(const struct core_reloc_test_case *test)
{
struct core_reloc_module_output *exp = (void *)test->output;
- int fd, err;
-
- fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
- err = -errno;
- if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
- return err;
-
- read(fd, NULL, exp->len); /* request expected number of bytes */
- close(fd);
+ trigger_module_test_read(exp->len);
return 0;
}
-
-static struct core_reloc_test_case test_cases[] = {
+static const struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
.case_name = "kernel",
- .bpf_obj_file = "test_core_reloc_kernel.o",
+ .bpf_obj_file = "test_core_reloc_kernel.bpf.o",
.btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */
.input = "",
.input_len = 0,
@@ -509,13 +557,16 @@ static struct core_reloc_test_case test_cases[] = {
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
.comm = "test_progs",
.comm_len = sizeof("test_progs"),
+ .local_task_struct_matches = true,
},
.output_len = sizeof(struct core_reloc_kernel_output),
+ .raw_tp_name = "sys_enter",
+ .prog_name = "test_core_kernel",
},
/* validate we can find kernel module BTF types for relocs/attach */
- MODULES_CASE("module_probed", "raw_tp/bpf_testmod_test_read", "bpf_testmod_test_read"),
- MODULES_CASE("module_direct", "tp_btf/bpf_testmod_test_read", NULL),
+ MODULES_CASE("module_probed", "test_core_module_probed", "bpf_testmod_test_read"),
+ MODULES_CASE("module_direct", "test_core_module_direct", NULL),
/* validate BPF program can use multiple flavors to match against
* single target BTF type
@@ -550,9 +601,9 @@ static struct core_reloc_test_case test_cases[] = {
ARRAYS_ERR_CASE(arrays___err_too_small),
ARRAYS_ERR_CASE(arrays___err_too_shallow),
ARRAYS_ERR_CASE(arrays___err_non_array),
- ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
- ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
+ ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
+ ARRAYS_ERR_CASE(arrays___err_bad_signed_arr_elem_sz),
/* enum/ptr/int handling scenarios */
PRIMITIVES_CASE(primitives),
@@ -581,8 +632,8 @@ static struct core_reloc_test_case test_cases[] = {
/* validate edge cases of capturing relocations */
{
.case_name = "misc",
- .bpf_obj_file = "test_core_reloc_misc.o",
- .btf_src_file = "btf__core_reloc_misc.o",
+ .bpf_obj_file = "test_core_reloc_misc.bpf.o",
+ .btf_src_file = "btf__core_reloc_misc.bpf.o",
.input = (const char *)&(struct core_reloc_misc_extensible[]){
{ .a = 1 },
{ .a = 2 }, /* not read */
@@ -595,6 +646,8 @@ static struct core_reloc_test_case test_cases[] = {
.c = 0, /* BUG in clang, should be 3 */
},
.output_len = sizeof(struct core_reloc_misc_output),
+ .raw_tp_name = "sys_enter",
+ .prog_name = "test_core_misc",
},
/* validate field existence checks */
@@ -642,13 +695,25 @@ static struct core_reloc_test_case test_cases[] = {
},
.output_len = sizeof(struct core_reloc_existence_output),
},
-
- FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
- FIELD_EXISTS_ERR_CASE(existence__err_int_type),
- FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
- FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
- FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
- FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
+ {
+ FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
+ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
+ },
+ .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
+ .a_exists = 0,
+ .b_exists = 0,
+ .c_exists = 0,
+ .arr_exists = 0,
+ .s_exists = 0,
+ .a_value = 0xff000001u,
+ .b_value = 0xff000002u,
+ .c_value = 0xff000003u,
+ .arr_value = 0xff000004u,
+ .s_value = 0xff000005u,
+ },
+ .output_len = sizeof(struct core_reloc_existence_output),
+ },
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
@@ -684,14 +749,16 @@ static struct core_reloc_test_case test_cases[] = {
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
- /* size relocation checks */
+ /* field size and offset relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
+ SIZE_CASE(size___diff_offs),
SIZE_ERR_CASE(size___err_ambiguous),
- /* validate type existence and size relocations */
+ /* validate type existence, match, and size relocations */
TYPE_BASED_CASE(type_based, {
.struct_exists = 1,
+ .complex_struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
@@ -700,8 +767,24 @@ static struct core_reloc_test_case test_cases[] = {
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
+ .typedef_restrict_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
+
+ .struct_matches = 1,
+ .complex_struct_matches = 1,
+ .union_matches = 1,
+ .enum_matches = 1,
+ .typedef_named_struct_matches = 1,
+ .typedef_anon_struct_matches = 1,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 1,
+ .typedef_enum_matches = 1,
+ .typedef_void_ptr_matches = 1,
+ .typedef_restrict_ptr_matches = 1,
+ .typedef_func_proto_matches = 1,
+ .typedef_arr_matches = 1,
+
.struct_sz = sizeof(struct a_struct),
.union_sz = sizeof(union a_union),
.enum_sz = sizeof(enum an_enum),
@@ -717,6 +800,45 @@ static struct core_reloc_test_case test_cases[] = {
TYPE_BASED_CASE(type_based___all_missing, {
/* all zeros */
}),
+ TYPE_BASED_CASE(type_based___diff, {
+ .struct_exists = 1,
+ .complex_struct_exists = 1,
+ .union_exists = 1,
+ .enum_exists = 1,
+ .typedef_named_struct_exists = 1,
+ .typedef_anon_struct_exists = 1,
+ .typedef_struct_ptr_exists = 1,
+ .typedef_int_exists = 1,
+ .typedef_enum_exists = 1,
+ .typedef_void_ptr_exists = 1,
+ .typedef_func_proto_exists = 1,
+ .typedef_arr_exists = 1,
+
+ .struct_matches = 1,
+ .complex_struct_matches = 1,
+ .union_matches = 1,
+ .enum_matches = 1,
+ .typedef_named_struct_matches = 1,
+ .typedef_anon_struct_matches = 1,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 0,
+ .typedef_enum_matches = 1,
+ .typedef_void_ptr_matches = 1,
+ .typedef_func_proto_matches = 0,
+ .typedef_arr_matches = 0,
+
+ .struct_sz = sizeof(struct a_struct___diff),
+ .union_sz = sizeof(union a_union___diff),
+ .enum_sz = sizeof(enum an_enum___diff),
+ .typedef_named_struct_sz = sizeof(named_struct_typedef___diff),
+ .typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff),
+ .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff),
+ .typedef_int_sz = sizeof(int_typedef___diff),
+ .typedef_enum_sz = sizeof(enum_typedef___diff),
+ .typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff),
+ .typedef_func_proto_sz = sizeof(func_proto_typedef___diff),
+ .typedef_arr_sz = sizeof(arr_typedef___diff),
+ }),
TYPE_BASED_CASE(type_based___diff_sz, {
.struct_exists = 1,
.union_exists = 1,
@@ -729,6 +851,19 @@ static struct core_reloc_test_case test_cases[] = {
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
+
+ .struct_matches = 0,
+ .union_matches = 0,
+ .enum_matches = 0,
+ .typedef_named_struct_matches = 0,
+ .typedef_anon_struct_matches = 0,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 0,
+ .typedef_enum_matches = 0,
+ .typedef_void_ptr_matches = 1,
+ .typedef_func_proto_matches = 0,
+ .typedef_arr_matches = 0,
+
.struct_sz = sizeof(struct a_struct___diff_sz),
.union_sz = sizeof(union a_union___diff_sz),
.enum_sz = sizeof(enum an_enum___diff_sz),
@@ -743,10 +878,12 @@ static struct core_reloc_test_case test_cases[] = {
}),
TYPE_BASED_CASE(type_based___incompat, {
.enum_exists = 1,
+ .enum_matches = 1,
.enum_sz = sizeof(enum an_enum),
}),
TYPE_BASED_CASE(type_based___fn_wrong_args, {
.struct_exists = 1,
+ .struct_matches = 1,
.struct_sz = sizeof(struct a_struct),
}),
@@ -792,6 +929,45 @@ static struct core_reloc_test_case test_cases[] = {
.anon_val2 = 0x222,
}),
ENUMVAL_ERR_CASE(enumval___err_missing),
+
+ /* 64bit enumerator value existence and value relocations */
+ ENUM64VAL_CASE(enum64val, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = true,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = true,
+ .unsigned_val1 = 0x1ffffffffULL,
+ .unsigned_val2 = 0x2,
+ .signed_val1 = 0x1ffffffffLL,
+ .signed_val2 = -2,
+ }),
+ ENUM64VAL_CASE(enum64val___diff, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = true,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = true,
+ .unsigned_val1 = 0x101ffffffffULL,
+ .unsigned_val2 = 0x202ffffffffULL,
+ .signed_val1 = -101,
+ .signed_val2 = -202,
+ }),
+ ENUM64VAL_CASE(enum64val___val3_missing, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = false,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = false,
+ .unsigned_val1 = 0x111ffffffffULL,
+ .unsigned_val2 = 0x222,
+ .signed_val1 = 0x111ffffffffLL,
+ .signed_val2 = -222,
+ }),
+ ENUM64VAL_ERR_CASE(enum64val___err_missing),
};
struct data {
@@ -807,13 +983,27 @@ static size_t roundup_page(size_t sz)
return (sz + page_size - 1) / page_size * page_size;
}
-void test_core_reloc(void)
+static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objpath)
+{
+ char command[4096];
+ int n;
+
+ n = snprintf(command, sizeof(command),
+ "./bpftool gen min_core_btf %s %s %s",
+ src_btf, dst_btf, objpath);
+ if (n < 0 || n >= sizeof(command))
+ return -1;
+
+ return system(command);
+}
+
+static void run_core_reloc_tests(bool use_btfgen)
{
const size_t mmap_sz = roundup_page(sizeof(struct data));
- struct bpf_object_load_attr load_attr = {};
- struct core_reloc_test_case *test_case;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ struct core_reloc_test_case *test_case, test_case_copy;
const char *tp_name, *probe_name;
- int err, i, equal;
+ int err, i, equal, fd;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
@@ -822,10 +1012,14 @@ void test_core_reloc(void)
struct data *data;
void *mmap_data = NULL;
- my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
+ my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
- test_case = &test_cases[i];
+ char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";
+
+ test_case_copy = test_cases[i];
+ test_case = &test_case_copy;
+
if (!test__start_subtest(test_case->case_name))
continue;
@@ -834,40 +1028,58 @@ void test_core_reloc(void)
continue;
}
+ /* generate a "minimal" BTF file and use it as source */
+ if (use_btfgen) {
+
+ if (!test_case->btf_src_file || test_case->run_btfgen_fails) {
+ test__skip();
+ continue;
+ }
+
+ fd = mkstemp(btf_file);
+ if (!ASSERT_GE(fd, 0, "btf_tmp"))
+ continue;
+ close(fd); /* we only need the path */
+ err = run_btfgen(test_case->btf_src_file, btf_file,
+ test_case->bpf_obj_file);
+ if (!ASSERT_OK(err, "run_btfgen"))
+ continue;
+
+ test_case->btf_src_file = btf_file;
+ }
+
if (test_case->setup) {
err = test_case->setup(test_case);
if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err))
continue;
}
- obj = bpf_object__open_file(test_case->bpf_obj_file, NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
- test_case->bpf_obj_file, PTR_ERR(obj)))
- continue;
-
- probe_name = "raw_tracepoint/sys_enter";
- tp_name = "sys_enter";
- if (test_case->prog_sec_name) {
- probe_name = test_case->prog_sec_name;
- tp_name = test_case->raw_tp_name; /* NULL for tp_btf */
+ if (test_case->btf_src_file) {
+ err = access(test_case->btf_src_file, R_OK);
+ if (!ASSERT_OK(err, "btf_src_file"))
+ continue;
}
- prog = bpf_object__find_program_by_title(obj, probe_name);
+ open_opts.btf_custom_path = test_case->btf_src_file;
+ obj = bpf_object__open_file(test_case->bpf_obj_file, &open_opts);
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
+ goto cleanup;
+
+ probe_name = test_case->prog_name;
+ tp_name = test_case->raw_tp_name; /* NULL for tp_btf */
+ prog = bpf_object__find_program_by_name(obj, probe_name);
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
goto cleanup;
- load_attr.obj = obj;
- load_attr.log_level = 0;
- load_attr.target_btf_path = test_case->btf_src_file;
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(obj);
if (err) {
if (!test_case->fails)
- CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
+ ASSERT_OK(err, "obj_load");
goto cleanup;
}
- data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
+ data_map = bpf_object__find_map_by_name(obj, ".bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto cleanup;
@@ -881,12 +1093,12 @@ void test_core_reloc(void)
data = mmap_data;
memset(mmap_data, 0, sizeof(*data));
- memcpy(data->in, test_case->input, test_case->input_len);
+ if (test_case->input_len)
+ memcpy(data->in, test_case->input, test_case->input_len);
data->my_pid_tgid = my_pid_tgid;
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
/* trigger test run */
@@ -902,10 +1114,8 @@ void test_core_reloc(void)
goto cleanup;
}
- if (test_case->fails) {
- CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
+ if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
goto cleanup;
- }
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
@@ -929,10 +1139,20 @@ cleanup:
CHECK_FAIL(munmap(mmap_data, mmap_sz));
mmap_data = NULL;
}
- if (!IS_ERR_OR_NULL(link)) {
- bpf_link__destroy(link);
- link = NULL;
- }
+ if (use_btfgen)
+ remove(test_case->btf_src_file);
+ bpf_link__destroy(link);
+ link = NULL;
bpf_object__close(obj);
}
}
+
+void test_core_reloc(void)
+{
+ run_core_reloc_tests(false);
+}
+
+void test_core_reloc_btfgen(void)
+{
+ run_core_reloc_tests(true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c
new file mode 100644
index 000000000000..a18d3680fb16
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Test cases that can't load programs using libbpf and need direct
+ * BPF syscall access
+ */
+
+#include <sys/syscall.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+
+#include "test_progs.h"
+#include "test_btf.h"
+#include "bpf/libbpf_internal.h"
+
+static char log[16 * 1024];
+
+/* Check that verifier rejects BPF program containing relocation
+ * pointing to non-existent BTF type.
+ */
+static void test_bad_local_id(void)
+{
+ struct test_btf {
+ struct btf_header hdr;
+ __u32 types[15];
+ char strings[128];
+ } raw_btf = {
+ .hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_off = 0,
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct test_btf, strings) -
+ offsetof(struct test_btf, types),
+ .str_len = sizeof(raw_btf.strings),
+ },
+ .types = {
+ BTF_PTR_ENC(0), /* [1] void* */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [2] int */
+ BTF_FUNC_PROTO_ENC(2, 1), /* [3] int (*)(void*) */
+ BTF_FUNC_PROTO_ARG_ENC(8, 1),
+ BTF_FUNC_ENC(8, 3) /* [4] FUNC 'foo' type_id=2 */
+ },
+ .strings = "\0int\0 0\0foo\0"
+ };
+ __u32 log_level = 1 | 2 | 4;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .log_buf = log,
+ .log_size = sizeof(log),
+ .log_level = log_level,
+ );
+ struct bpf_insn insns[] = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ struct bpf_func_info funcs[] = {
+ {
+ .insn_off = 0,
+ .type_id = 4,
+ }
+ };
+ struct bpf_core_relo relos[] = {
+ {
+ .insn_off = 0, /* patch first instruction (r0 = 0) */
+ .type_id = 100500, /* !!! this type id does not exist */
+ .access_str_off = 6, /* offset of "0" */
+ .kind = BPF_CORE_TYPE_ID_LOCAL,
+ }
+ };
+ union bpf_attr attr;
+ int saved_errno;
+ int prog_fd = -1;
+ int btf_fd = -1;
+
+ btf_fd = bpf_btf_load(&raw_btf, sizeof(raw_btf), &opts);
+ saved_errno = errno;
+ if (btf_fd < 0 || env.verbosity > VERBOSE_NORMAL) {
+ printf("-------- BTF load log start --------\n");
+ printf("%s", log);
+ printf("-------- BTF load log end ----------\n");
+ }
+ if (btf_fd < 0) {
+ PRINT_FAIL("bpf_btf_load() failed, errno=%d\n", saved_errno);
+ return;
+ }
+
+ log[0] = 0;
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_btf_fd = btf_fd;
+ attr.prog_type = BPF_TRACE_RAW_TP;
+ attr.license = (__u64)"GPL";
+ attr.insns = (__u64)&insns;
+ attr.insn_cnt = sizeof(insns) / sizeof(*insns);
+ attr.log_buf = (__u64)log;
+ attr.log_size = sizeof(log);
+ attr.log_level = log_level;
+ attr.func_info = (__u64)funcs;
+ attr.func_info_cnt = sizeof(funcs) / sizeof(*funcs);
+ attr.func_info_rec_size = sizeof(*funcs);
+ attr.core_relos = (__u64)relos;
+ attr.core_relo_cnt = sizeof(relos) / sizeof(*relos);
+ attr.core_relo_rec_size = sizeof(*relos);
+ prog_fd = sys_bpf_prog_load(&attr, sizeof(attr), 1);
+ saved_errno = errno;
+ if (prog_fd < 0 || env.verbosity > VERBOSE_NORMAL) {
+ printf("-------- program load log start --------\n");
+ printf("%s", log);
+ printf("-------- program load log end ----------\n");
+ }
+ if (prog_fd >= 0) {
+ PRINT_FAIL("sys_bpf_prog_load() expected to fail\n");
+ goto out;
+ }
+ ASSERT_HAS_SUBSTR(log, "relo #0: bad type id 100500", "program load log");
+
+out:
+ close(prog_fd);
+ close(btf_fd);
+}
+
+void test_core_reloc_raw(void)
+{
+ if (test__start_subtest("bad_local_id"))
+ test_bad_local_id();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c
index 6acb0e94d4d7..4a2c256c8db6 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_retro.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c
@@ -6,31 +6,32 @@
void test_core_retro(void)
{
- int err, zero = 0, res, duration = 0, my_pid = getpid();
+ int err, zero = 0, res, my_pid = getpid();
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
- if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
goto out_close;
- err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
- if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
+ err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
+ &my_pid, sizeof(my_pid), 0);
+ if (!ASSERT_OK(err, "map_update"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
- if (CHECK(err, "attach_kprobe", "err %d\n", err))
+ if (!ASSERT_OK(err, "attach_kprobe"))
goto out_close;
/* trigger */
usleep(1);
- err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
- if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
+ err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
+ if (!ASSERT_OK(err, "map_lookup"))
goto out_close;
- CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
+ ASSERT_EQ(res, my_pid, "pid_check");
out_close:
test_core_retro__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
new file mode 100644
index 000000000000..6c45330a5ca3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "cpumask_failure.skel.h"
+#include "cpumask_success.skel.h"
+
+static const char * const cpumask_success_testcases[] = {
+ "test_alloc_free_cpumask",
+ "test_set_clear_cpu",
+ "test_setall_clear_cpu",
+ "test_first_firstzero_cpu",
+ "test_firstand_nocpu",
+ "test_test_and_set_clear",
+ "test_and_or_xor",
+ "test_intersects_subset",
+ "test_copy_any_anyand",
+ "test_insert_leave",
+ "test_insert_remove_release",
+ "test_global_mask_rcu",
+ "test_global_mask_array_one_rcu",
+ "test_global_mask_array_rcu",
+ "test_global_mask_array_l2_rcu",
+ "test_global_mask_nested_rcu",
+ "test_global_mask_nested_deep_rcu",
+ "test_global_mask_nested_deep_array_rcu",
+ "test_cpumask_weight",
+ "test_refcount_null_tracking",
+ "test_populate_reject_small_mask",
+ "test_populate_reject_unaligned",
+ "test_populate",
+};
+
+static void verify_success(const char *prog_name)
+{
+ struct cpumask_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+ pid_t child_pid;
+ int status, err;
+
+ skel = cpumask_success__open();
+ if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
+ return;
+
+ skel->bss->pid = getpid();
+ skel->bss->nr_cpus = libbpf_num_possible_cpus();
+
+ err = cpumask_success__load(skel);
+ if (!ASSERT_OK(err, "cpumask_success__load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+ if (child_pid == 0)
+ _exit(0);
+ waitpid(child_pid, &status, 0);
+ ASSERT_OK(skel->bss->err, "post_wait_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ cpumask_success__destroy(skel);
+}
+
+void test_cpumask(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cpumask_success_testcases); i++) {
+ if (!test__start_subtest(cpumask_success_testcases[i]))
+ continue;
+
+ verify_success(cpumask_success_testcases[i]);
+ }
+
+ RUN_TESTS(cpumask_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
new file mode 100644
index 000000000000..42bd07f7218d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/if_alg.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "crypto_sanity.skel.h"
+#include "crypto_basic.skel.h"
+
+#define NS_TEST "crypto_sanity_ns"
+#define IPV6_IFACE_ADDR "face::1"
+static const unsigned char crypto_key[] = "testtest12345678";
+static const char plain_text[] = "stringtoencrypt0";
+static int opfd = -1, tfmfd = -1;
+static const char algo[] = "ecb(aes)";
+static int init_afalg(void)
+{
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher",
+ .salg_name = "ecb(aes)"
+ };
+
+ tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ if (tfmfd == -1)
+ return errno;
+ if (bind(tfmfd, (struct sockaddr *)&sa, sizeof(sa)) == -1)
+ return errno;
+ if (setsockopt(tfmfd, SOL_ALG, ALG_SET_KEY, crypto_key, 16) == -1)
+ return errno;
+ opfd = accept(tfmfd, NULL, 0);
+ if (opfd == -1)
+ return errno;
+ return 0;
+}
+
+static void deinit_afalg(void)
+{
+ if (tfmfd != -1)
+ close(tfmfd);
+ if (opfd != -1)
+ close(opfd);
+}
+
+static void do_crypt_afalg(const void *src, void *dst, int size, bool encrypt)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ char cbuf[CMSG_SPACE(4)] = {0};
+ struct iovec iov;
+
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_ALG;
+ cmsg->cmsg_type = ALG_SET_OP;
+ cmsg->cmsg_len = CMSG_LEN(4);
+ *(__u32 *)CMSG_DATA(cmsg) = encrypt ? ALG_OP_ENCRYPT : ALG_OP_DECRYPT;
+
+ iov.iov_base = (char *)src;
+ iov.iov_len = size;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ sendmsg(opfd, &msg, 0);
+ read(opfd, dst, size);
+}
+
+void test_crypto_basic(void)
+{
+ RUN_TESTS(crypto_basic);
+}
+
+void test_crypto_sanity(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct nstoken *nstoken = NULL;
+ struct crypto_sanity *skel;
+ char afalg_plain[16] = {0};
+ char afalg_dst[16] = {0};
+ struct sockaddr_in6 addr;
+ int sockfd, err, pfd;
+ socklen_t addrlen;
+ u16 udp_test_port;
+
+ skel = crypto_sanity__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ return;
+
+ SYS(fail, "ip netns add %s", NS_TEST);
+ SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
+ SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ err = init_afalg();
+ if (!ASSERT_OK(err, "AF_ALG init fail"))
+ goto fail;
+
+ qdisc_hook.ifindex = if_nametoindex("lo");
+ if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
+ goto fail;
+
+ skel->bss->key_len = 16;
+ skel->bss->authsize = 0;
+ udp_test_port = skel->data->udp_test_port;
+ memcpy(skel->bss->key, crypto_key, sizeof(crypto_key));
+ snprintf(skel->bss->algo, 128, "%s", algo);
+ pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
+ if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
+ goto fail;
+
+ err = bpf_prog_test_run_opts(pfd, &opts);
+ if (!ASSERT_OK(err, "skb_crypto_setup") ||
+ !ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
+ goto fail;
+
+ if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
+ goto fail;
+
+ err = bpf_tc_hook_create(&qdisc_hook);
+ if (!ASSERT_OK(err, "create qdisc hook"))
+ goto fail;
+
+ addrlen = sizeof(addr);
+ err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, udp_test_port,
+ (void *)&addr, &addrlen);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto fail;
+
+ tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(err, "attach encrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
+ goto fail;
+ err = sendto(sockfd, plain_text, sizeof(plain_text), 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, sizeof(plain_text), "encrypt send"))
+ goto fail;
+
+ do_crypt_afalg(plain_text, afalg_dst, sizeof(afalg_dst), true);
+
+ if (!ASSERT_OK(skel->bss->status, "encrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, afalg_dst, sizeof(afalg_dst), "encrypt AF_ALG"))
+ goto fail;
+
+ tc_attach_enc.flags = tc_attach_enc.prog_fd = tc_attach_enc.prog_id = 0;
+ err = bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
+ if (!ASSERT_OK(err, "bpf_tc_detach encrypt"))
+ goto fail;
+
+ tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
+ if (!ASSERT_OK(err, "attach decrypt filter"))
+ goto fail;
+
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
+ goto fail;
+ err = sendto(sockfd, afalg_dst, sizeof(afalg_dst), 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, sizeof(afalg_dst), "decrypt send"))
+ goto fail;
+
+ do_crypt_afalg(afalg_dst, afalg_plain, sizeof(afalg_plain), false);
+
+ if (!ASSERT_OK(skel->bss->status, "decrypt status"))
+ goto fail;
+ if (!ASSERT_STRNEQ(skel->bss->dst, afalg_plain, sizeof(afalg_plain), "decrypt AF_ALG"))
+ goto fail;
+
+ tc_attach_dec.flags = tc_attach_dec.prog_fd = tc_attach_dec.prog_id = 0;
+ err = bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
+ ASSERT_OK(err, "bpf_tc_detach decrypt");
+
+fail:
+ close_netns(nstoken);
+ deinit_afalg();
+ SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ crypto_sanity__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
new file mode 100644
index 000000000000..dd75ccb03770
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <regex.h>
+#include <test_progs.h>
+
+#include "bpf/btf.h"
+#include "bpf_util.h"
+#include "linux/filter.h"
+#include "linux/kernel.h"
+#include "disasm_helpers.h"
+
+#define MAX_PROG_TEXT_SZ (32 * 1024)
+
+/* The code in this file serves the sole purpose of executing test cases
+ * specified in the test_cases array. Each test case specifies a program
+ * type, context field offset, and disassembly patterns that correspond
+ * to read and write instructions generated by
+ * verifier.c:convert_ctx_access() for accessing that field.
+ *
+ * For each test case, up to three programs are created:
+ * - One that uses BPF_LDX_MEM to read the context field.
+ * - One that uses BPF_STX_MEM to write to the context field.
+ * - One that uses BPF_ST_MEM to write to the context field.
+ *
+ * The disassembly of each program is then compared with the pattern
+ * specified in the test case.
+ */
+struct test_case {
+ char *name;
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type expected_attach_type;
+ int field_offset;
+ int field_sz;
+ /* Program generated for BPF_ST_MEM uses value 42 by default,
+ * this field allows to specify custom value.
+ */
+ struct {
+ bool use;
+ int value;
+ } st_value;
+ /* Pattern for BPF_LDX_MEM(field_sz, dst, ctx, field_offset) */
+ char *read;
+ /* Pattern for BPF_STX_MEM(field_sz, ctx, src, field_offset) and
+ * BPF_ST_MEM (field_sz, ctx, src, field_offset)
+ */
+ char *write;
+ /* Pattern for BPF_ST_MEM(field_sz, ctx, src, field_offset),
+ * takes priority over `write`.
+ */
+ char *write_st;
+ /* Pattern for BPF_STX_MEM (field_sz, ctx, src, field_offset),
+ * takes priority over `write`.
+ */
+ char *write_stx;
+};
+
+#define N(_prog_type, type, field, name_extra...) \
+ .name = #_prog_type "." #field name_extra, \
+ .prog_type = BPF_PROG_TYPE_##_prog_type, \
+ .field_offset = offsetof(type, field), \
+ .field_sz = sizeof(typeof(((type *)NULL)->field))
+
+static struct test_case test_cases[] = {
+/* Sign extension on s390 changes the pattern */
+#if defined(__x86_64__) || defined(__aarch64__)
+ {
+ N(SCHED_CLS, struct __sk_buff, tstamp),
+ .read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
+ "if w11 & 0x4 goto pc+1;"
+ "goto pc+4;"
+ "if w11 & 0x3 goto pc+1;"
+ "goto pc+2;"
+ "$dst = 0;"
+ "goto pc+1;"
+ "$dst = *(u64 *)($ctx + sk_buff::tstamp);",
+ .write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);"
+ "if w11 & 0x4 goto pc+1;"
+ "goto pc+2;"
+ "w11 &= -4;"
+ "*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;"
+ "*(u64 *)($ctx + sk_buff::tstamp) = $src;",
+ },
+#endif
+ {
+ N(SCHED_CLS, struct __sk_buff, priority),
+ .read = "$dst = *(u32 *)($ctx + sk_buff::priority);",
+ .write = "*(u32 *)($ctx + sk_buff::priority) = $src;",
+ },
+ {
+ N(SCHED_CLS, struct __sk_buff, mark),
+ .read = "$dst = *(u32 *)($ctx + sk_buff::mark);",
+ .write = "*(u32 *)($ctx + sk_buff::mark) = $src;",
+ },
+ {
+ N(SCHED_CLS, struct __sk_buff, cb[0]),
+ .read = "$dst = *(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data));",
+ .write = "*(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data)) = $src;",
+ },
+ {
+ N(SCHED_CLS, struct __sk_buff, tc_classid),
+ .read = "$dst = *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid));",
+ .write = "*(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;",
+ },
+ {
+ N(SCHED_CLS, struct __sk_buff, tc_index),
+ .read = "$dst = *(u16 *)($ctx + sk_buff::tc_index);",
+ .write = "*(u16 *)($ctx + sk_buff::tc_index) = $src;",
+ },
+ {
+ N(SCHED_CLS, struct __sk_buff, queue_mapping),
+ .read = "$dst = *(u16 *)($ctx + sk_buff::queue_mapping);",
+ .write_stx = "if $src >= 0xffff goto pc+1;"
+ "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
+ .write_st = "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;",
+ },
+ {
+ /* This is a corner case in filter.c:bpf_convert_ctx_access() */
+ N(SCHED_CLS, struct __sk_buff, queue_mapping, ".ushrt_max"),
+ .st_value = { true, USHRT_MAX },
+ .write_st = "goto pc+0;",
+ },
+ {
+ N(CGROUP_SOCK, struct bpf_sock, bound_dev_if),
+ .read = "$dst = *(u32 *)($ctx + sock_common::skc_bound_dev_if);",
+ .write = "*(u32 *)($ctx + sock_common::skc_bound_dev_if) = $src;",
+ },
+ {
+ N(CGROUP_SOCK, struct bpf_sock, mark),
+ .read = "$dst = *(u32 *)($ctx + sock::sk_mark);",
+ .write = "*(u32 *)($ctx + sock::sk_mark) = $src;",
+ },
+ {
+ N(CGROUP_SOCK, struct bpf_sock, priority),
+ .read = "$dst = *(u32 *)($ctx + sock::sk_priority);",
+ .write = "*(u32 *)($ctx + sock::sk_priority) = $src;",
+ },
+ {
+ N(SOCK_OPS, struct bpf_sock_ops, replylong[0]),
+ .read = "$dst = *(u32 *)($ctx + bpf_sock_ops_kern::replylong);",
+ .write = "*(u32 *)($ctx + bpf_sock_ops_kern::replylong) = $src;",
+ },
+ {
+ N(CGROUP_SYSCTL, struct bpf_sysctl, file_pos),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ .read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
+ "$dst = *(u32 *)($dst +0);",
+ .write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
+ "r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
+ "*(u32 *)(r9 +0) = $src;"
+ "r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
+#else
+ .read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
+ "$dst = *(u32 *)($dst +4);",
+ .write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;"
+ "r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);"
+ "*(u32 *)(r9 +4) = $src;"
+ "r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);",
+#endif
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, sk),
+ .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::sk);",
+ .expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, level),
+ .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::level);",
+ .write = "*(u32 *)($ctx + bpf_sockopt_kern::level) = $src;",
+ .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, optname),
+ .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optname);",
+ .write = "*(u32 *)($ctx + bpf_sockopt_kern::optname) = $src;",
+ .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, optlen),
+ .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optlen);",
+ .write = "*(u32 *)($ctx + bpf_sockopt_kern::optlen) = $src;",
+ .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, retval),
+ .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
+ "$dst = *(u64 *)($dst + task_struct::bpf_ctx);"
+ "$dst = *(u32 *)($dst + bpf_cg_run_ctx::retval);",
+ .write = "*(u64 *)($ctx + bpf_sockopt_kern::tmp_reg) = r9;"
+ "r9 = *(u64 *)($ctx + bpf_sockopt_kern::current_task);"
+ "r9 = *(u64 *)(r9 + task_struct::bpf_ctx);"
+ "*(u32 *)(r9 + bpf_cg_run_ctx::retval) = $src;"
+ "r9 = *(u64 *)($ctx + bpf_sockopt_kern::tmp_reg);",
+ .expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, optval),
+ .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval);",
+ .expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ },
+ {
+ N(CGROUP_SOCKOPT, struct bpf_sockopt, optval_end),
+ .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval_end);",
+ .expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ },
+};
+
+#undef N
+
+static regex_t *ident_regex;
+static regex_t *field_regex;
+
+static char *skip_space(char *str)
+{
+ while (*str && isspace(*str))
+ ++str;
+ return str;
+}
+
+static char *skip_space_and_semi(char *str)
+{
+ while (*str && (isspace(*str) || *str == ';'))
+ ++str;
+ return str;
+}
+
+static char *match_str(char *str, char *prefix)
+{
+ while (*str && *prefix && *str == *prefix) {
+ ++str;
+ ++prefix;
+ }
+ if (*prefix)
+ return NULL;
+ return str;
+}
+
+static char *match_number(char *str, int num)
+{
+ char *next;
+ int snum = strtol(str, &next, 10);
+
+ if (next - str == 0 || num != snum)
+ return NULL;
+
+ return next;
+}
+
+static int find_field_offset_aux(struct btf *btf, int btf_id, char *field_name, int off)
+{
+ const struct btf_type *type = btf__type_by_id(btf, btf_id);
+ const struct btf_member *m;
+ __u16 mnum;
+ int i;
+
+ if (!type) {
+ PRINT_FAIL("Can't find btf_type for id %d\n", btf_id);
+ return -1;
+ }
+
+ if (!btf_is_struct(type) && !btf_is_union(type)) {
+ PRINT_FAIL("BTF id %d is not struct or union\n", btf_id);
+ return -1;
+ }
+
+ m = btf_members(type);
+ mnum = btf_vlen(type);
+
+ for (i = 0; i < mnum; ++i, ++m) {
+ const char *mname = btf__name_by_offset(btf, m->name_off);
+
+ if (strcmp(mname, "") == 0) {
+ int msize = find_field_offset_aux(btf, m->type, field_name,
+ off + m->offset);
+ if (msize >= 0)
+ return msize;
+ }
+
+ if (strcmp(mname, field_name))
+ continue;
+
+ return (off + m->offset) / 8;
+ }
+
+ return -1;
+}
+
+static int find_field_offset(struct btf *btf, char *pattern, regmatch_t *matches)
+{
+ int type_sz = matches[1].rm_eo - matches[1].rm_so;
+ int field_sz = matches[2].rm_eo - matches[2].rm_so;
+ char *type = pattern + matches[1].rm_so;
+ char *field = pattern + matches[2].rm_so;
+ char field_str[128] = {};
+ char type_str[128] = {};
+ int btf_id, field_offset;
+
+ if (type_sz >= sizeof(type_str)) {
+ PRINT_FAIL("Malformed pattern: type ident is too long: %d\n", type_sz);
+ return -1;
+ }
+
+ if (field_sz >= sizeof(field_str)) {
+ PRINT_FAIL("Malformed pattern: field ident is too long: %d\n", field_sz);
+ return -1;
+ }
+
+ strncpy(type_str, type, type_sz);
+ strncpy(field_str, field, field_sz);
+ btf_id = btf__find_by_name(btf, type_str);
+ if (btf_id < 0) {
+ PRINT_FAIL("No BTF info for type %s\n", type_str);
+ return -1;
+ }
+
+ field_offset = find_field_offset_aux(btf, btf_id, field_str, 0);
+ if (field_offset < 0) {
+ PRINT_FAIL("No BTF info for field %s::%s\n", type_str, field_str);
+ return -1;
+ }
+
+ return field_offset;
+}
+
+static regex_t *compile_regex(char *pat)
+{
+ regex_t *re;
+ int err;
+
+ re = malloc(sizeof(regex_t));
+ if (!re) {
+ PRINT_FAIL("Can't alloc regex\n");
+ return NULL;
+ }
+
+ err = regcomp(re, pat, REG_EXTENDED);
+ if (err) {
+ char errbuf[512];
+
+ regerror(err, re, errbuf, sizeof(errbuf));
+ PRINT_FAIL("Can't compile regex: %s\n", errbuf);
+ free(re);
+ return NULL;
+ }
+
+ return re;
+}
+
+static void free_regex(regex_t *re)
+{
+ if (!re)
+ return;
+
+ regfree(re);
+ free(re);
+}
+
+static u32 max_line_len(char *str)
+{
+ u32 max_line = 0;
+ char *next = str;
+
+ while (next) {
+ next = strchr(str, '\n');
+ if (next) {
+ max_line = max_t(u32, max_line, (next - str));
+ str = next + 1;
+ } else {
+ max_line = max_t(u32, max_line, strlen(str));
+ }
+ }
+
+ return min(max_line, 60u);
+}
+
+/* Print strings `pattern_origin` and `text_origin` side by side,
+ * assume `pattern_pos` and `text_pos` designate location within
+ * corresponding origin string where match diverges.
+ * The output should look like:
+ *
+ * Can't match disassembly(left) with pattern(right):
+ * r2 = *(u64 *)(r1 +0) ; $dst = *(u64 *)($ctx + bpf_sockopt_kern::sk1)
+ * ^ ^
+ * r0 = 0 ;
+ * exit ;
+ */
+static void print_match_error(FILE *out,
+ char *pattern_origin, char *text_origin,
+ char *pattern_pos, char *text_pos)
+{
+ char *pattern = pattern_origin;
+ char *text = text_origin;
+ int middle = max_line_len(text) + 2;
+
+ fprintf(out, "Can't match disassembly(left) with pattern(right):\n");
+ while (*pattern || *text) {
+ int column = 0;
+ int mark1 = -1;
+ int mark2 = -1;
+
+ /* Print one line from text */
+ while (*text && *text != '\n') {
+ if (text == text_pos)
+ mark1 = column;
+ fputc(*text, out);
+ ++text;
+ ++column;
+ }
+ if (text == text_pos)
+ mark1 = column;
+
+ /* Pad to the middle */
+ while (column < middle) {
+ fputc(' ', out);
+ ++column;
+ }
+ fputs("; ", out);
+ column += 3;
+
+ /* Print one line from pattern, pattern lines are terminated by ';' */
+ while (*pattern && *pattern != ';') {
+ if (pattern == pattern_pos)
+ mark2 = column;
+ fputc(*pattern, out);
+ ++pattern;
+ ++column;
+ }
+ if (pattern == pattern_pos)
+ mark2 = column;
+
+ fputc('\n', out);
+ if (*pattern)
+ ++pattern;
+ if (*text)
+ ++text;
+
+ /* If pattern and text diverge at this line, print an
+ * additional line with '^' marks, highlighting
+ * positions where match fails.
+ */
+ if (mark1 > 0 || mark2 > 0) {
+ for (column = 0; column <= max(mark1, mark2); ++column) {
+ if (column == mark1 || column == mark2)
+ fputc('^', out);
+ else
+ fputc(' ', out);
+ }
+ fputc('\n', out);
+ }
+ }
+}
+
+/* Test if `text` matches `pattern`. Pattern consists of the following elements:
+ *
+ * - Field offset references:
+ *
+ * <type>::<field>
+ *
+ * When such reference is encountered BTF is used to compute numerical
+ * value for the offset of <field> in <type>. The `text` is expected to
+ * contain matching numerical value.
+ *
+ * - Field groups:
+ *
+ * $(<type>::<field> [+ <type>::<field>]*)
+ *
+ * Allows to specify an offset that is a sum of multiple field offsets.
+ * The `text` is expected to contain matching numerical value.
+ *
+ * - Variable references, e.g. `$src`, `$dst`, `$ctx`.
+ * These are substitutions specified in `reg_map` array.
+ * If a substring of pattern is equal to `reg_map[i][0]` the `text` is
+ * expected to contain `reg_map[i][1]` in the matching position.
+ *
+ * - Whitespace is ignored, ';' counts as whitespace for `pattern`.
+ *
+ * - Any other characters, `pattern` and `text` should match one-to-one.
+ *
+ * Example of a pattern:
+ *
+ * __________ fields group ________________
+ * ' '
+ * *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;
+ * ^^^^ '______________________'
+ * variable reference field offset reference
+ */
+static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_map[][2])
+{
+ char *pattern_origin = pattern;
+ char *text_origin = text;
+ regmatch_t matches[3];
+
+_continue:
+ while (*pattern) {
+ if (!*text)
+ goto err;
+
+ /* Skip whitespace */
+ if (isspace(*pattern) || *pattern == ';') {
+ if (!isspace(*text) && text != text_origin && isalnum(text[-1]))
+ goto err;
+ pattern = skip_space_and_semi(pattern);
+ text = skip_space(text);
+ continue;
+ }
+
+ /* Check for variable references */
+ for (int i = 0; reg_map[i][0]; ++i) {
+ char *pattern_next, *text_next;
+
+ pattern_next = match_str(pattern, reg_map[i][0]);
+ if (!pattern_next)
+ continue;
+
+ text_next = match_str(text, reg_map[i][1]);
+ if (!text_next)
+ goto err;
+
+ pattern = pattern_next;
+ text = text_next;
+ goto _continue;
+ }
+
+ /* Match field group:
+ * $(sk_buff::cb + qdisc_skb_cb::tc_classid)
+ */
+ if (strncmp(pattern, "$(", 2) == 0) {
+ char *group_start = pattern, *text_next;
+ int acc_offset = 0;
+
+ pattern += 2;
+
+ for (;;) {
+ int field_offset;
+
+ pattern = skip_space(pattern);
+ if (!*pattern) {
+ PRINT_FAIL("Unexpected end of pattern\n");
+ goto err;
+ }
+
+ if (*pattern == ')') {
+ ++pattern;
+ break;
+ }
+
+ if (*pattern == '+') {
+ ++pattern;
+ continue;
+ }
+
+ printf("pattern: %s\n", pattern);
+ if (regexec(field_regex, pattern, 3, matches, 0) != 0) {
+ PRINT_FAIL("Field reference expected\n");
+ goto err;
+ }
+
+ field_offset = find_field_offset(btf, pattern, matches);
+ if (field_offset < 0)
+ goto err;
+
+ pattern += matches[0].rm_eo;
+ acc_offset += field_offset;
+ }
+
+ text_next = match_number(text, acc_offset);
+ if (!text_next) {
+ PRINT_FAIL("No match for group offset %.*s (%d)\n",
+ (int)(pattern - group_start),
+ group_start,
+ acc_offset);
+ goto err;
+ }
+ text = text_next;
+ }
+
+ /* Match field reference:
+ * sk_buff::cb
+ */
+ if (regexec(field_regex, pattern, 3, matches, 0) == 0) {
+ int field_offset;
+ char *text_next;
+
+ field_offset = find_field_offset(btf, pattern, matches);
+ if (field_offset < 0)
+ goto err;
+
+ text_next = match_number(text, field_offset);
+ if (!text_next) {
+ PRINT_FAIL("No match for field offset %.*s (%d)\n",
+ (int)matches[0].rm_eo, pattern, field_offset);
+ goto err;
+ }
+
+ pattern += matches[0].rm_eo;
+ text = text_next;
+ continue;
+ }
+
+ /* If pattern points to identifier not followed by '::'
+ * skip the identifier to avoid n^2 application of the
+ * field reference rule.
+ */
+ if (regexec(ident_regex, pattern, 1, matches, 0) == 0) {
+ if (strncmp(pattern, text, matches[0].rm_eo) != 0)
+ goto err;
+
+ pattern += matches[0].rm_eo;
+ text += matches[0].rm_eo;
+ continue;
+ }
+
+ /* Match literally */
+ if (*pattern != *text)
+ goto err;
+
+ ++pattern;
+ ++text;
+ }
+
+ return true;
+
+err:
+ test__fail();
+ print_match_error(stdout, pattern_origin, text_origin, pattern, text);
+ return false;
+}
+
+struct prog_info {
+ char *prog_kind;
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type expected_attach_type;
+ struct bpf_insn *prog;
+ u32 prog_len;
+};
+
+static void match_program(struct btf *btf,
+ struct prog_info *pinfo,
+ char *pattern,
+ char *reg_map[][2],
+ bool skip_first_insn)
+{
+ struct bpf_insn *buf = NULL, *insn, *insn_end;
+ int err = 0, prog_fd = 0;
+ FILE *prog_out = NULL;
+ char insn_buf[64];
+ char *text = NULL;
+ __u32 cnt = 0;
+
+ text = calloc(MAX_PROG_TEXT_SZ, 1);
+ if (!text) {
+ PRINT_FAIL("Can't allocate %d bytes\n", MAX_PROG_TEXT_SZ);
+ goto out;
+ }
+
+ // TODO: log level
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ opts.log_buf = text;
+ opts.log_size = MAX_PROG_TEXT_SZ;
+ opts.log_level = 1 | 2 | 4;
+ opts.expected_attach_type = pinfo->expected_attach_type;
+
+ prog_fd = bpf_prog_load(pinfo->prog_type, NULL, "GPL",
+ pinfo->prog, pinfo->prog_len, &opts);
+ if (prog_fd < 0) {
+ PRINT_FAIL("Can't load program, errno %d (%s), verifier log:\n%s\n",
+ errno, strerror(errno), text);
+ goto out;
+ }
+
+ memset(text, 0, MAX_PROG_TEXT_SZ);
+
+ err = get_xlated_program(prog_fd, &buf, &cnt);
+ if (err) {
+ PRINT_FAIL("Can't load back BPF program\n");
+ goto out;
+ }
+
+ prog_out = fmemopen(text, MAX_PROG_TEXT_SZ - 1, "w");
+ if (!prog_out) {
+ PRINT_FAIL("Can't open memory stream\n");
+ goto out;
+ }
+ insn_end = buf + cnt;
+ insn = buf + (skip_first_insn ? 1 : 0);
+ while (insn < insn_end) {
+ insn = disasm_insn(insn, insn_buf, sizeof(insn_buf));
+ fprintf(prog_out, "%s\n", insn_buf);
+ }
+ fclose(prog_out);
+
+ ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map),
+ pinfo->prog_kind);
+
+out:
+ if (prog_fd)
+ close(prog_fd);
+ free(buf);
+ free(text);
+}
+
+static void run_one_testcase(struct btf *btf, struct test_case *test)
+{
+ struct prog_info pinfo = {};
+ int bpf_sz;
+
+ if (!test__start_subtest(test->name))
+ return;
+
+ switch (test->field_sz) {
+ case 8:
+ bpf_sz = BPF_DW;
+ break;
+ case 4:
+ bpf_sz = BPF_W;
+ break;
+ case 2:
+ bpf_sz = BPF_H;
+ break;
+ case 1:
+ bpf_sz = BPF_B;
+ break;
+ default:
+ PRINT_FAIL("Unexpected field size: %d, want 8,4,2 or 1\n", test->field_sz);
+ return;
+ }
+
+ pinfo.prog_type = test->prog_type;
+ pinfo.expected_attach_type = test->expected_attach_type;
+
+ if (test->read) {
+ struct bpf_insn ldx_prog[] = {
+ BPF_LDX_MEM(bpf_sz, BPF_REG_2, BPF_REG_1, test->field_offset),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ char *reg_map[][2] = {
+ { "$ctx", "r1" },
+ { "$dst", "r2" },
+ {}
+ };
+
+ pinfo.prog_kind = "LDX";
+ pinfo.prog = ldx_prog;
+ pinfo.prog_len = ARRAY_SIZE(ldx_prog);
+ match_program(btf, &pinfo, test->read, reg_map, false);
+ }
+
+ if (test->write || test->write_st || test->write_stx) {
+ struct bpf_insn stx_prog[] = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(bpf_sz, BPF_REG_1, BPF_REG_2, test->field_offset),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ char *stx_reg_map[][2] = {
+ { "$ctx", "r1" },
+ { "$src", "r2" },
+ {}
+ };
+ struct bpf_insn st_prog[] = {
+ BPF_ST_MEM(bpf_sz, BPF_REG_1, test->field_offset,
+ test->st_value.use ? test->st_value.value : 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ char *st_reg_map[][2] = {
+ { "$ctx", "r1" },
+ { "$src", "42" },
+ {}
+ };
+
+ if (test->write || test->write_stx) {
+ char *pattern = test->write_stx ? test->write_stx : test->write;
+
+ pinfo.prog_kind = "STX";
+ pinfo.prog = stx_prog;
+ pinfo.prog_len = ARRAY_SIZE(stx_prog);
+ match_program(btf, &pinfo, pattern, stx_reg_map, true);
+ }
+
+ if (test->write || test->write_st) {
+ char *pattern = test->write_st ? test->write_st : test->write;
+
+ pinfo.prog_kind = "ST";
+ pinfo.prog = st_prog;
+ pinfo.prog_len = ARRAY_SIZE(st_prog);
+ match_program(btf, &pinfo, pattern, st_reg_map, false);
+ }
+ }
+
+ test__end_subtest();
+}
+
+void test_ctx_rewrite(void)
+{
+ struct btf *btf;
+ int i;
+
+ field_regex = compile_regex("^([[:alpha:]_][[:alnum:]_]+)::([[:alpha:]_][[:alnum:]_]+)");
+ ident_regex = compile_regex("^[[:alpha:]_][[:alnum:]_]+");
+ if (!field_regex || !ident_regex)
+ return;
+
+ btf = btf__load_vmlinux_btf();
+ if (!btf) {
+ PRINT_FAIL("Can't load vmlinux BTF, errno %d (%s)\n", errno, strerror(errno));
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); ++i)
+ run_one_testcase(btf, &test_cases[i]);
+
+out:
+ btf__free(btf);
+ free_regex(field_regex);
+ free_regex(ident_regex);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c
new file mode 100644
index 000000000000..b2dfc5954aea
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <test_progs.h>
+#include "test_custom_sec_handlers.skel.h"
+
+#define COOKIE_ABC1 1
+#define COOKIE_ABC2 2
+#define COOKIE_CUSTOM 3
+#define COOKIE_FALLBACK 4
+#define COOKIE_KPROBE 5
+
+static int custom_setup_prog(struct bpf_program *prog, long cookie)
+{
+ if (cookie == COOKIE_ABC1)
+ bpf_program__set_autoload(prog, false);
+
+ return 0;
+}
+
+static int custom_prepare_load_prog(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie)
+{
+ if (cookie == COOKIE_FALLBACK)
+ opts->prog_flags |= BPF_F_SLEEPABLE;
+ else if (cookie == COOKIE_ABC1)
+ ASSERT_FALSE(true, "unexpected preload for abc");
+
+ return 0;
+}
+
+static int custom_attach_prog(const struct bpf_program *prog, long cookie,
+ struct bpf_link **link)
+{
+ switch (cookie) {
+ case COOKIE_ABC2:
+ *link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ return libbpf_get_error(*link);
+ case COOKIE_CUSTOM:
+ *link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep");
+ return libbpf_get_error(*link);
+ case COOKIE_KPROBE:
+ case COOKIE_FALLBACK:
+ /* no auto-attach for SEC("xyz") and SEC("kprobe") */
+ *link = NULL;
+ return 0;
+ default:
+ ASSERT_FALSE(true, "unexpected cookie");
+ return -EINVAL;
+ }
+}
+
+static int abc1_id;
+static int abc2_id;
+static int custom_id;
+static int fallback_id;
+static int kprobe_id;
+
+__attribute__((constructor))
+static void register_sec_handlers(void)
+{
+ LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts,
+ .cookie = COOKIE_ABC1,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = NULL,
+ );
+ LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts,
+ .cookie = COOKIE_ABC2,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = custom_attach_prog,
+ );
+ LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts,
+ .cookie = COOKIE_CUSTOM,
+ .prog_setup_fn = NULL,
+ .prog_prepare_load_fn = NULL,
+ .prog_attach_fn = custom_attach_prog,
+ );
+
+ abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts);
+ abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts);
+ custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts);
+}
+
+__attribute__((destructor))
+static void unregister_sec_handlers(void)
+{
+ libbpf_unregister_prog_handler(abc1_id);
+ libbpf_unregister_prog_handler(abc2_id);
+ libbpf_unregister_prog_handler(custom_id);
+}
+
+void test_custom_sec_handlers(void)
+{
+ LIBBPF_OPTS(libbpf_prog_handler_opts, opts,
+ .prog_setup_fn = custom_setup_prog,
+ .prog_prepare_load_fn = custom_prepare_load_prog,
+ .prog_attach_fn = custom_attach_prog,
+ );
+ struct test_custom_sec_handlers* skel;
+ int err;
+
+ ASSERT_GT(abc1_id, 0, "abc1_id");
+ ASSERT_GT(abc2_id, 0, "abc2_id");
+ ASSERT_GT(custom_id, 0, "custom_id");
+
+ /* override libbpf's handle of SEC("kprobe/...") but also allow pure
+ * SEC("kprobe") due to "kprobe+" specifier. Register it as
+ * TRACEPOINT, just for fun.
+ */
+ opts.cookie = COOKIE_KPROBE;
+ kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts);
+ /* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test
+ * setting custom BPF_F_SLEEPABLE bit in preload handler
+ */
+ opts.cookie = COOKIE_FALLBACK;
+ fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts);
+
+ if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) {
+ if (fallback_id > 0)
+ libbpf_unregister_prog_handler(fallback_id);
+ if (kprobe_id > 0)
+ libbpf_unregister_prog_handler(kprobe_id);
+ return;
+ }
+
+ /* open skeleton and validate assumptions */
+ skel = test_custom_sec_handlers__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload");
+
+ ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type");
+ ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type");
+
+ skel->rodata->my_pid = getpid();
+
+ /* now attempt to load everything */
+ err = test_custom_sec_handlers__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ /* now try to auto-attach everything */
+ err = test_custom_sec_handlers__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ skel->links.xyz = bpf_program__attach(skel->progs.kprobe1);
+ ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err");
+ ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach");
+
+ /* trigger programs */
+ usleep(1);
+
+ /* SEC("abc") is set to not auto-loaded */
+ ASSERT_FALSE(skel->bss->abc1_called, "abc1_called");
+ ASSERT_TRUE(skel->bss->abc2_called, "abc2_called");
+ ASSERT_TRUE(skel->bss->custom1_called, "custom1_called");
+ ASSERT_TRUE(skel->bss->custom2_called, "custom2_called");
+ /* SEC("kprobe") shouldn't be auto-attached */
+ ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called");
+ /* SEC("xyz") shouldn't be auto-attached */
+ ASSERT_FALSE(skel->bss->xyz_called, "xyz_called");
+
+cleanup:
+ test_custom_sec_handlers__destroy(skel);
+
+ ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback");
+ ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c
index 0a577a248d34..ccc768592e66 100644
--- a/tools/testing/selftests/bpf/prog_tests/d_path.c
+++ b/tools/testing/selftests/bpf/prog_tests/d_path.c
@@ -9,6 +9,19 @@
#define MAX_FILES 7
#include "test_d_path.skel.h"
+#include "test_d_path_check_rdonly_mem.skel.h"
+#include "test_d_path_check_types.skel.h"
+
+/* sys_close_range is not around for long time, so let's
+ * make sure we can call it on systems with older glibc
+ */
+#ifndef __NR_close_range
+#ifdef __alpha__
+#define __NR_close_range 546
+#else
+#define __NR_close_range 436
+#endif
+#endif
static int duration;
@@ -88,7 +101,11 @@ static int trigger_fstat_events(pid_t pid)
fstat(indicatorfd, &fileStat);
out_close:
- /* triggers filp_close */
+ /* sys_close no longer triggers filp_close, but we can
+ * call sys_close_range instead which still does
+ */
+#define close(fd) syscall(__NR_close_range, fd, fd, 0)
+
close(pipefd[0]);
close(pipefd[1]);
close(sockfd);
@@ -96,10 +113,12 @@ out_close:
close(devfd);
close(localfd);
close(indicatorfd);
+
+#undef close
return ret;
}
-void test_d_path(void)
+static void test_d_path_basic(void)
{
struct test_d_path__bss *bss;
struct test_d_path *skel;
@@ -155,3 +174,35 @@ void test_d_path(void)
cleanup:
test_d_path__destroy(skel);
}
+
+static void test_d_path_check_rdonly_mem(void)
+{
+ struct test_d_path_check_rdonly_mem *skel;
+
+ skel = test_d_path_check_rdonly_mem__open_and_load();
+ ASSERT_ERR_PTR(skel, "unexpected_load_overwriting_rdonly_mem");
+
+ test_d_path_check_rdonly_mem__destroy(skel);
+}
+
+static void test_d_path_check_types(void)
+{
+ struct test_d_path_check_types *skel;
+
+ skel = test_d_path_check_types__open_and_load();
+ ASSERT_ERR_PTR(skel, "unexpected_load_passing_wrong_type");
+
+ test_d_path_check_types__destroy(skel);
+}
+
+void test_d_path(void)
+{
+ if (test__start_subtest("basic"))
+ test_d_path_basic();
+
+ if (test__start_subtest("check_rdonly_mem"))
+ test_d_path_check_rdonly_mem();
+
+ if (test__start_subtest("check_alloc_mem"))
+ test_d_path_check_types();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
new file mode 100644
index 000000000000..d79f398ec6b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "decap_sanity.skel.h"
+
+#define NS_TEST "decap_sanity_ns"
+#define IPV6_IFACE_ADDR "face::1"
+#define UDP_TEST_PORT 7777
+
+void test_decap_sanity(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach);
+ struct nstoken *nstoken = NULL;
+ struct decap_sanity *skel;
+ struct sockaddr_in6 addr;
+ socklen_t addrlen;
+ char buf[128] = {};
+ int sockfd, err;
+
+ skel = decap_sanity__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+
+ SYS(fail, "ip netns add %s", NS_TEST);
+ SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
+ SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ qdisc_hook.ifindex = if_nametoindex("lo");
+ if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
+ goto fail;
+
+ err = bpf_tc_hook_create(&qdisc_hook);
+ if (!ASSERT_OK(err, "create qdisc hook"))
+ goto fail;
+
+ tc_attach.prog_fd = bpf_program__fd(skel->progs.decap_sanity);
+ err = bpf_tc_attach(&qdisc_hook, &tc_attach);
+ if (!ASSERT_OK(err, "attach filter"))
+ goto fail;
+
+ addrlen = sizeof(addr);
+ err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
+ (void *)&addr, &addrlen);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto fail;
+ sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_NEQ(sockfd, -1, "socket"))
+ goto fail;
+ err = sendto(sockfd, buf, sizeof(buf), 0, (void *)&addr, addrlen);
+ close(sockfd);
+ if (!ASSERT_EQ(err, sizeof(buf), "send"))
+ goto fail;
+
+ ASSERT_TRUE(skel->bss->init_csum_partial, "init_csum_partial");
+ ASSERT_TRUE(skel->bss->final_csum_none, "final_csum_none");
+ ASSERT_FALSE(skel->bss->broken_csum_start, "broken_csum_start");
+
+fail:
+ if (nstoken) {
+ bpf_tc_hook_destroy(&qdisc_hook);
+ close_netns(nstoken);
+ }
+ SYS_NOFAIL("ip netns del " NS_TEST);
+ decap_sanity__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/deny_namespace.c b/tools/testing/selftests/bpf/prog_tests/deny_namespace.c
new file mode 100644
index 000000000000..1bc6241b755b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/deny_namespace.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include "test_deny_namespace.skel.h"
+#include <sched.h>
+#include "cap_helpers.h"
+#include <stdio.h>
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+/* negative return value -> some internal error
+ * positive return value -> userns creation failed
+ * 0 -> userns creation succeeded
+ */
+static int create_user_ns(void)
+{
+ pid_t pid;
+
+ pid = fork();
+ if (pid < 0)
+ return -1;
+
+ if (pid == 0) {
+ if (unshare(CLONE_NEWUSER))
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ return wait_for_pid(pid);
+}
+
+static void test_userns_create_bpf(void)
+{
+ __u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
+ __u64 old_caps = 0;
+
+ cap_enable_effective(cap_mask, &old_caps);
+
+ ASSERT_OK(create_user_ns(), "priv new user ns");
+
+ cap_disable_effective(cap_mask, &old_caps);
+
+ ASSERT_EQ(create_user_ns(), EPERM, "unpriv new user ns");
+
+ if (cap_mask & old_caps)
+ cap_enable_effective(cap_mask, NULL);
+}
+
+static void test_unpriv_userns_create_no_bpf(void)
+{
+ __u32 cap_mask = 1ULL << CAP_SYS_ADMIN;
+ __u64 old_caps = 0;
+
+ cap_disable_effective(cap_mask, &old_caps);
+
+ ASSERT_OK(create_user_ns(), "no-bpf unpriv new user ns");
+
+ if (cap_mask & old_caps)
+ cap_enable_effective(cap_mask, NULL);
+}
+
+void test_deny_namespace(void)
+{
+ struct test_deny_namespace *skel = NULL;
+ int err;
+
+ if (test__start_subtest("unpriv_userns_create_no_bpf"))
+ test_unpriv_userns_create_no_bpf();
+
+ skel = test_deny_namespace__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel load"))
+ goto close_prog;
+
+ err = test_deny_namespace__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto close_prog;
+
+ if (test__start_subtest("userns_create_bpf"))
+ test_userns_create_bpf();
+
+ test_deny_namespace__detach(skel);
+
+close_prog:
+ test_deny_namespace__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c
new file mode 100644
index 000000000000..6c2b0c3dbcd8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Google */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "dmabuf_iter.skel.h"
+
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/udmabuf.h>
+
+static int udmabuf = -1;
+static const char udmabuf_test_buffer_name[DMA_BUF_NAME_LEN] = "udmabuf_test_buffer_for_iter";
+static size_t udmabuf_test_buffer_size;
+static int sysheap_dmabuf = -1;
+static const char sysheap_test_buffer_name[DMA_BUF_NAME_LEN] = "sysheap_test_buffer_for_iter";
+static size_t sysheap_test_buffer_size;
+
+static int create_udmabuf(void)
+{
+ struct udmabuf_create create;
+ int dev_udmabuf, memfd, local_udmabuf;
+
+ udmabuf_test_buffer_size = 10 * getpagesize();
+
+ if (!ASSERT_LE(sizeof(udmabuf_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG"))
+ return -1;
+
+ memfd = memfd_create("memfd_test", MFD_ALLOW_SEALING);
+ if (!ASSERT_OK_FD(memfd, "memfd_create"))
+ return -1;
+
+ if (!ASSERT_OK(ftruncate(memfd, udmabuf_test_buffer_size), "ftruncate"))
+ goto close_memfd;
+
+ if (!ASSERT_OK(fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK), "seal"))
+ goto close_memfd;
+
+ dev_udmabuf = open("/dev/udmabuf", O_RDONLY);
+ if (!ASSERT_OK_FD(dev_udmabuf, "open udmabuf"))
+ goto close_memfd;
+
+ memset(&create, 0, sizeof(create));
+ create.memfd = memfd;
+ create.flags = UDMABUF_FLAGS_CLOEXEC;
+ create.offset = 0;
+ create.size = udmabuf_test_buffer_size;
+
+ local_udmabuf = ioctl(dev_udmabuf, UDMABUF_CREATE, &create);
+ close(dev_udmabuf);
+ if (!ASSERT_OK_FD(local_udmabuf, "udmabuf_create"))
+ goto close_memfd;
+
+ if (!ASSERT_OK(ioctl(local_udmabuf, DMA_BUF_SET_NAME_B, udmabuf_test_buffer_name), "name"))
+ goto close_udmabuf;
+
+ return local_udmabuf;
+
+close_udmabuf:
+ close(local_udmabuf);
+close_memfd:
+ close(memfd);
+ return -1;
+}
+
+static int create_sys_heap_dmabuf(void)
+{
+ sysheap_test_buffer_size = 20 * getpagesize();
+
+ struct dma_heap_allocation_data data = {
+ .len = sysheap_test_buffer_size,
+ .fd = 0,
+ .fd_flags = O_RDWR | O_CLOEXEC,
+ .heap_flags = 0,
+ };
+ int heap_fd, ret;
+
+ if (!ASSERT_LE(sizeof(sysheap_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG"))
+ return -1;
+
+ heap_fd = open("/dev/dma_heap/system", O_RDONLY);
+ if (!ASSERT_OK_FD(heap_fd, "open dma heap"))
+ return -1;
+
+ ret = ioctl(heap_fd, DMA_HEAP_IOCTL_ALLOC, &data);
+ close(heap_fd);
+ if (!ASSERT_OK(ret, "syheap alloc"))
+ return -1;
+
+ if (!ASSERT_OK(ioctl(data.fd, DMA_BUF_SET_NAME_B, sysheap_test_buffer_name), "name"))
+ goto close_sysheap_dmabuf;
+
+ return data.fd;
+
+close_sysheap_dmabuf:
+ close(data.fd);
+ return -1;
+}
+
+static int create_test_buffers(void)
+{
+ udmabuf = create_udmabuf();
+ sysheap_dmabuf = create_sys_heap_dmabuf();
+
+ if (udmabuf < 0 || sysheap_dmabuf < 0)
+ return -1;
+
+ return 0;
+}
+
+static void destroy_test_buffers(void)
+{
+ close(udmabuf);
+ udmabuf = -1;
+
+ close(sysheap_dmabuf);
+ sysheap_dmabuf = -1;
+}
+
+enum Fields { INODE, SIZE, NAME, EXPORTER, FIELD_COUNT };
+struct DmabufInfo {
+ unsigned long inode;
+ unsigned long size;
+ char name[DMA_BUF_NAME_LEN];
+ char exporter[32];
+};
+
+static bool check_dmabuf_info(const struct DmabufInfo *bufinfo,
+ unsigned long size,
+ const char *name, const char *exporter)
+{
+ return size == bufinfo->size &&
+ !strcmp(name, bufinfo->name) &&
+ !strcmp(exporter, bufinfo->exporter);
+}
+
+static void subtest_dmabuf_iter_check_no_infinite_reads(struct dmabuf_iter *skel)
+{
+ int iter_fd;
+ char buf[256];
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector));
+ if (!ASSERT_OK_FD(iter_fd, "iter_create"))
+ return;
+
+ while (read(iter_fd, buf, sizeof(buf)) > 0)
+ ; /* Read out all contents */
+
+ /* Next reads should return 0 */
+ ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
+
+ close(iter_fd);
+}
+
+static void subtest_dmabuf_iter_check_default_iter(struct dmabuf_iter *skel)
+{
+ bool found_test_sysheap_dmabuf = false;
+ bool found_test_udmabuf = false;
+ struct DmabufInfo bufinfo;
+ size_t linesize = 0;
+ char *line = NULL;
+ FILE *iter_file;
+ int iter_fd, f = INODE;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector));
+ if (!ASSERT_OK_FD(iter_fd, "iter_create"))
+ return;
+
+ iter_file = fdopen(iter_fd, "r");
+ if (!ASSERT_OK_PTR(iter_file, "fdopen"))
+ goto close_iter_fd;
+
+ while (getline(&line, &linesize, iter_file) != -1) {
+ if (f % FIELD_COUNT == INODE) {
+ ASSERT_EQ(sscanf(line, "%ld", &bufinfo.inode), 1,
+ "read inode");
+ } else if (f % FIELD_COUNT == SIZE) {
+ ASSERT_EQ(sscanf(line, "%ld", &bufinfo.size), 1,
+ "read size");
+ } else if (f % FIELD_COUNT == NAME) {
+ ASSERT_EQ(sscanf(line, "%s", bufinfo.name), 1,
+ "read name");
+ } else if (f % FIELD_COUNT == EXPORTER) {
+ ASSERT_EQ(sscanf(line, "%31s", bufinfo.exporter), 1,
+ "read exporter");
+
+ if (check_dmabuf_info(&bufinfo,
+ sysheap_test_buffer_size,
+ sysheap_test_buffer_name,
+ "system"))
+ found_test_sysheap_dmabuf = true;
+ else if (check_dmabuf_info(&bufinfo,
+ udmabuf_test_buffer_size,
+ udmabuf_test_buffer_name,
+ "udmabuf"))
+ found_test_udmabuf = true;
+ }
+ ++f;
+ }
+
+ ASSERT_EQ(f % FIELD_COUNT, INODE, "number of fields");
+
+ ASSERT_TRUE(found_test_sysheap_dmabuf, "found_test_sysheap_dmabuf");
+ ASSERT_TRUE(found_test_udmabuf, "found_test_udmabuf");
+
+ free(line);
+ fclose(iter_file);
+close_iter_fd:
+ close(iter_fd);
+}
+
+static void subtest_dmabuf_iter_check_open_coded(struct dmabuf_iter *skel, int map_fd)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ char key[DMA_BUF_NAME_LEN];
+ int err, fd;
+ bool found;
+
+ /* No need to attach it, just run it directly */
+ fd = bpf_program__fd(skel->progs.iter_dmabuf_for_each);
+
+ err = bpf_prog_test_run_opts(fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, key), "get next key"))
+ return;
+
+ do {
+ ASSERT_OK(bpf_map_lookup_elem(map_fd, key, &found), "lookup");
+ ASSERT_TRUE(found, "found test buffer");
+ } while (bpf_map_get_next_key(map_fd, key, key));
+}
+
+void test_dmabuf_iter(void)
+{
+ struct dmabuf_iter *skel = NULL;
+ int map_fd;
+ const bool f = false;
+
+ skel = dmabuf_iter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dmabuf_iter__open_and_load"))
+ return;
+
+ map_fd = bpf_map__fd(skel->maps.testbuf_hash);
+ if (!ASSERT_OK_FD(map_fd, "map_fd"))
+ goto destroy_skel;
+
+ if (!ASSERT_OK(bpf_map_update_elem(map_fd, udmabuf_test_buffer_name, &f, BPF_ANY),
+ "insert udmabuf"))
+ goto destroy_skel;
+ if (!ASSERT_OK(bpf_map_update_elem(map_fd, sysheap_test_buffer_name, &f, BPF_ANY),
+ "insert sysheap buffer"))
+ goto destroy_skel;
+
+ if (!ASSERT_OK(create_test_buffers(), "create_test_buffers"))
+ goto destroy;
+
+ if (!ASSERT_OK(dmabuf_iter__attach(skel), "skel_attach"))
+ goto destroy;
+
+ if (test__start_subtest("no_infinite_reads"))
+ subtest_dmabuf_iter_check_no_infinite_reads(skel);
+ if (test__start_subtest("default_iter"))
+ subtest_dmabuf_iter_check_default_iter(skel);
+ if (test__start_subtest("open_coded"))
+ subtest_dmabuf_iter_check_open_coded(skel, map_fd);
+
+destroy:
+ destroy_test_buffers();
+destroy_skel:
+ dmabuf_iter__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
new file mode 100644
index 000000000000..d3d94596ab79
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+#include "dummy_st_ops_success.skel.h"
+#include "dummy_st_ops_fail.skel.h"
+#include "trace_dummy_st_ops.skel.h"
+
+/* Need to keep consistent with definition in include/linux/bpf.h */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+static void test_dummy_st_ops_attach(void)
+{
+ struct dummy_st_ops_success *skel;
+ struct bpf_link *link;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
+ ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach");
+
+ dummy_st_ops_success__destroy(skel);
+}
+
+static void test_dummy_init_ret_value(void)
+{
+ __u64 args[1] = {0};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_1);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
+
+ dummy_st_ops_success__destroy(skel);
+}
+
+static void test_dummy_init_ptr_arg(void)
+{
+ int exp_retval = 0xbeef;
+ struct bpf_dummy_ops_state in_state = {
+ .val = exp_retval,
+ };
+ __u64 args[1] = {(unsigned long)&in_state};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct trace_dummy_st_ops *trace_skel;
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_1);
+
+ trace_skel = trace_dummy_st_ops__open();
+ if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open"))
+ goto done;
+
+ err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1,
+ fd, "test_1");
+ if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)"))
+ goto done;
+
+ err = trace_dummy_st_ops__load(trace_skel);
+ if (!ASSERT_OK(err, "load(trace_skel)"))
+ goto done;
+
+ err = trace_dummy_st_ops__attach(trace_skel);
+ if (!ASSERT_OK(err, "attach(trace_skel)"))
+ goto done;
+
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
+ ASSERT_EQ(attr.retval, exp_retval, "test_ret");
+ ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
+
+done:
+ dummy_st_ops_success__destroy(skel);
+ trace_dummy_st_ops__destroy(trace_skel);
+}
+
+static void test_dummy_multiple_args(void)
+{
+ struct bpf_dummy_ops_state st = { 7 };
+ __u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+ size_t i;
+ char name[8];
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_2);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+ args[0] = 7;
+ for (i = 0; i < ARRAY_SIZE(args); i++) {
+ snprintf(name, sizeof(name), "arg %zu", i);
+ ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
+ }
+
+ dummy_st_ops_success__destroy(skel);
+}
+
+static void test_dummy_sleepable(void)
+{
+ struct bpf_dummy_ops_state st;
+ __u64 args[1] = {(__u64)&st};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_sleepable);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+
+ dummy_st_ops_success__destroy(skel);
+}
+
+/* dummy_st_ops.test_sleepable() parameter is not marked as nullable,
+ * thus bpf_prog_test_run_opts() below should be rejected as it tries
+ * to pass NULL for this parameter.
+ */
+static void test_dummy_sleepable_reject_null(void)
+{
+ __u64 args[1] = {0};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops_success *skel;
+ int fd, err;
+
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_sleepable);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_EQ(err, -EINVAL, "test_run");
+
+ dummy_st_ops_success__destroy(skel);
+}
+
+void test_dummy_st_ops(void)
+{
+ if (test__start_subtest("dummy_st_ops_attach"))
+ test_dummy_st_ops_attach();
+ if (test__start_subtest("dummy_init_ret_value"))
+ test_dummy_init_ret_value();
+ if (test__start_subtest("dummy_init_ptr_arg"))
+ test_dummy_init_ptr_arg();
+ if (test__start_subtest("dummy_multiple_args"))
+ test_dummy_multiple_args();
+ if (test__start_subtest("dummy_sleepable"))
+ test_dummy_sleepable();
+ if (test__start_subtest("dummy_sleepable_reject_null"))
+ test_dummy_sleepable_reject_null();
+
+ RUN_TESTS(dummy_st_ops_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
new file mode 100644
index 000000000000..b9f86cb91e81
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "dynptr_fail.skel.h"
+#include "dynptr_success.skel.h"
+
+enum test_setup_type {
+ SETUP_SYSCALL_SLEEP,
+ SETUP_SKB_PROG,
+ SETUP_SKB_PROG_TP,
+ SETUP_XDP_PROG,
+};
+
+static struct {
+ const char *prog_name;
+ enum test_setup_type type;
+} success_tests[] = {
+ {"test_read_write", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_data", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_copy", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_copy_xdp", SETUP_XDP_PROG},
+ {"test_dynptr_memset_zero", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_notzero", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_zero_offset", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_zero_adjusted", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_overflow", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_overflow_offset", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_readonly", SETUP_SKB_PROG},
+ {"test_dynptr_memset_xdp_chunks", SETUP_XDP_PROG},
+ {"test_ringbuf", SETUP_SYSCALL_SLEEP},
+ {"test_skb_readonly", SETUP_SKB_PROG},
+ {"test_dynptr_skb_data", SETUP_SKB_PROG},
+ {"test_dynptr_skb_meta_data", SETUP_SKB_PROG},
+ {"test_dynptr_skb_meta_flags", SETUP_SKB_PROG},
+ {"test_adjust", SETUP_SYSCALL_SLEEP},
+ {"test_adjust_err", SETUP_SYSCALL_SLEEP},
+ {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_rdonly", SETUP_SKB_PROG},
+ {"test_dynptr_clone", SETUP_SKB_PROG},
+ {"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
+ {"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
+ {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP},
+ {"test_probe_read_user_dynptr", SETUP_XDP_PROG},
+ {"test_probe_read_kernel_dynptr", SETUP_XDP_PROG},
+ {"test_probe_read_user_str_dynptr", SETUP_XDP_PROG},
+ {"test_probe_read_kernel_str_dynptr", SETUP_XDP_PROG},
+ {"test_copy_from_user_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_copy_from_user_str_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_copy_from_user_task_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP},
+};
+
+#define PAGE_SIZE_64K 65536
+
+static void verify_success(const char *prog_name, enum test_setup_type setup_type)
+{
+ char user_data[384] = {[0 ... 382] = 'a', '\0'};
+ struct dynptr_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err;
+
+ skel = dynptr_success__open();
+ if (!ASSERT_OK_PTR(skel, "dynptr_success__open"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, true);
+
+ err = dynptr_success__load(skel);
+ if (!ASSERT_OK(err, "dynptr_success__load"))
+ goto cleanup;
+
+ skel->bss->user_ptr = user_data;
+ skel->data->test_len[0] = sizeof(user_data);
+ memcpy(skel->bss->expected_str, user_data, sizeof(user_data));
+
+ switch (setup_type) {
+ case SETUP_SYSCALL_SLEEP:
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ usleep(1);
+
+ bpf_link__destroy(link);
+ break;
+ case SETUP_SKB_PROG:
+ {
+ int prog_fd;
+ char buf[64];
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ if (!ASSERT_OK(err, "test_run"))
+ goto cleanup;
+
+ break;
+ }
+ case SETUP_SKB_PROG_TP:
+ {
+ struct __sk_buff skb = {};
+ struct bpf_object *obj;
+ int aux_prog_fd;
+
+ /* Just use its test_run to trigger kfree_skb tracepoint */
+ err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &aux_prog_fd);
+ if (!ASSERT_OK(err, "prog_load sched cls"))
+ goto cleanup;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(aux_prog_fd, &topts);
+ bpf_link__destroy(link);
+
+ if (!ASSERT_OK(err, "test_run"))
+ goto cleanup;
+
+ break;
+ }
+ case SETUP_XDP_PROG:
+ {
+ char data[90000];
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .repeat = 1,
+ );
+
+ if (getpagesize() == PAGE_SIZE_64K)
+ opts.data_size_in = sizeof(data);
+ else
+ opts.data_size_in = 5000;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+
+ if (!ASSERT_OK(err, "test_run"))
+ goto cleanup;
+
+ break;
+ }
+ }
+
+ ASSERT_EQ(skel->bss->err, 0, "err");
+
+cleanup:
+ dynptr_success__destroy(skel);
+}
+
+void test_dynptr(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i].prog_name))
+ continue;
+
+ verify_success(success_tests[i].prog_name, success_tests[i].type);
+ }
+
+ RUN_TESTS(dynptr_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
new file mode 100644
index 000000000000..438583e1f2d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <net/if.h>
+#include "empty_skb.skel.h"
+
+void test_empty_skb(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, tattr);
+ struct empty_skb *bpf_obj = NULL;
+ struct nstoken *tok = NULL;
+ struct bpf_program *prog;
+ char eth_hlen_pp[15];
+ char eth_hlen[14];
+ int veth_ifindex;
+ int ipip_ifindex;
+ int err;
+ int i;
+
+ struct {
+ const char *msg;
+ const void *data_in;
+ __u32 data_size_in;
+ int *ifindex;
+ int err;
+ int ret;
+ int lwt_egress_ret; /* expected retval at lwt/egress */
+ bool success_on_tc;
+ } tests[] = {
+ /* Empty packets are always rejected. */
+
+ {
+ /* BPF_PROG_RUN ETH_HLEN size check */
+ .msg = "veth empty ingress packet",
+ .data_in = NULL,
+ .data_size_in = 0,
+ .ifindex = &veth_ifindex,
+ .err = -EINVAL,
+ },
+ {
+ /* BPF_PROG_RUN ETH_HLEN size check */
+ .msg = "ipip empty ingress packet",
+ .data_in = NULL,
+ .data_size_in = 0,
+ .ifindex = &ipip_ifindex,
+ .err = -EINVAL,
+ },
+
+ /* ETH_HLEN-sized packets:
+ * - can not be redirected at LWT_XMIT
+ * - can be redirected at TC to non-tunneling dest
+ */
+
+ {
+ /* __bpf_redirect_common */
+ .msg = "veth ETH_HLEN packet ingress",
+ .data_in = eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &veth_ifindex,
+ .ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
+ .success_on_tc = true,
+ },
+ {
+ /* __bpf_redirect_no_mac
+ *
+ * lwt: skb->len=0 <= skb_network_offset=0
+ * tc: skb->len=14 <= skb_network_offset=14
+ */
+ .msg = "ipip ETH_HLEN packet ingress",
+ .data_in = eth_hlen,
+ .data_size_in = sizeof(eth_hlen),
+ .ifindex = &ipip_ifindex,
+ .ret = -ERANGE,
+ .lwt_egress_ret = -ERANGE,
+ },
+
+ /* ETH_HLEN+1-sized packet should be redirected. */
+
+ {
+ .msg = "veth ETH_HLEN+1 packet ingress",
+ .data_in = eth_hlen_pp,
+ .data_size_in = sizeof(eth_hlen_pp),
+ .ifindex = &veth_ifindex,
+ .lwt_egress_ret = 1, /* veth_xmit NET_XMIT_DROP */
+ },
+ {
+ .msg = "ipip ETH_HLEN+1 packet ingress",
+ .data_in = eth_hlen_pp,
+ .data_size_in = sizeof(eth_hlen_pp),
+ .ifindex = &ipip_ifindex,
+ },
+ };
+
+ SYS(out, "ip netns add empty_skb");
+ tok = open_netns("empty_skb");
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+ SYS(out, "ip link add veth0 type veth peer veth1");
+ SYS(out, "ip link set dev veth0 up");
+ SYS(out, "ip link set dev veth1 up");
+ SYS(out, "ip addr add 10.0.0.1/8 dev veth0");
+ SYS(out, "ip addr add 10.0.0.2/8 dev veth1");
+ veth_ifindex = if_nametoindex("veth0");
+
+ SYS(out, "ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
+ SYS(out, "ip link set ipip0 up");
+ SYS(out, "ip addr add 192.168.1.1/16 dev ipip0");
+ ipip_ifindex = if_nametoindex("ipip0");
+
+ bpf_obj = empty_skb__open_and_load();
+ if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ bpf_object__for_each_program(prog, bpf_obj->obj) {
+ bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL;
+ bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
+ int expected_ret;
+ char buf[128];
+
+ expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret;
+
+ tattr.data_in = tests[i].data_in;
+ tattr.data_size_in = tests[i].data_size_in;
+
+ tattr.data_size_out = 0;
+ bpf_obj->bss->ifindex = *tests[i].ifindex;
+ bpf_obj->bss->ret = 0;
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr);
+ sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog));
+
+ if (at_tc && tests[i].success_on_tc)
+ ASSERT_GE(err, 0, buf);
+ else
+ ASSERT_EQ(err, tests[i].err, buf);
+ sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog));
+ if (at_tc && tests[i].success_on_tc)
+ ASSERT_GE(bpf_obj->bss->ret, 0, buf);
+ else
+ ASSERT_EQ(bpf_obj->bss->ret, expected_ret, buf);
+ }
+ }
+
+out:
+ if (bpf_obj)
+ empty_skb__destroy(bpf_obj);
+ if (tok)
+ close_netns(tok);
+ SYS_NOFAIL("ip netns del empty_skb");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/enable_stats.c b/tools/testing/selftests/bpf/prog_tests/enable_stats.c
index 2cb2085917e7..75f85d0fe74a 100644
--- a/tools/testing/selftests/bpf/prog_tests/enable_stats.c
+++ b/tools/testing/selftests/bpf/prog_tests/enable_stats.c
@@ -28,7 +28,7 @@ void test_enable_stats(void)
prog_fd = bpf_program__fd(skel->progs.test_enable_stats);
memset(&info, 0, info_len);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info",
"failed to get bpf_prog_info for fd %d\n", prog_fd))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/exceptions.c b/tools/testing/selftests/bpf/prog_tests/exceptions.c
new file mode 100644
index 000000000000..516f4a13013c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/exceptions.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "exceptions.skel.h"
+#include "exceptions_ext.skel.h"
+#include "exceptions_fail.skel.h"
+#include "exceptions_assert.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static void test_exceptions_failure(void)
+{
+ RUN_TESTS(exceptions_fail);
+}
+
+static void test_exceptions_success(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, ropts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct exceptions_ext *eskel = NULL;
+ struct exceptions *skel;
+ int ret;
+
+ skel = exceptions__open();
+ if (!ASSERT_OK_PTR(skel, "exceptions__open"))
+ return;
+
+ ret = exceptions__load(skel);
+ if (!ASSERT_OK(ret, "exceptions__load"))
+ goto done;
+
+ if (!ASSERT_OK(bpf_map_update_elem(bpf_map__fd(skel->maps.jmp_table), &(int){0},
+ &(int){bpf_program__fd(skel->progs.exception_tail_call_target)}, BPF_ANY),
+ "bpf_map_update_elem jmp_table"))
+ goto done;
+
+#define RUN_SUCCESS(_prog, return_val) \
+ if (!test__start_subtest(#_prog)) goto _prog##_##return_val; \
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs._prog), &ropts); \
+ ASSERT_OK(ret, #_prog " prog run ret"); \
+ ASSERT_EQ(ropts.retval, return_val, #_prog " prog run retval"); \
+ _prog##_##return_val:
+
+ RUN_SUCCESS(exception_throw_always_1, 64);
+ RUN_SUCCESS(exception_throw_always_2, 32);
+ RUN_SUCCESS(exception_throw_unwind_1, 16);
+ RUN_SUCCESS(exception_throw_unwind_2, 32);
+ RUN_SUCCESS(exception_throw_default, 0);
+ RUN_SUCCESS(exception_throw_default_value, 5);
+ RUN_SUCCESS(exception_tail_call, 24);
+ RUN_SUCCESS(exception_ext, 0);
+ RUN_SUCCESS(exception_ext_mod_cb_runtime, 35);
+ RUN_SUCCESS(exception_throw_subprog, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc, 1);
+ RUN_SUCCESS(exception_assert_nz_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_zero_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_neg_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_pos_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_negeq_gfunc_with, 1);
+ RUN_SUCCESS(exception_assert_poseq_gfunc_with, 1);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc, 0);
+ RUN_SUCCESS(exception_bad_assert_nz_gfunc_with, 100);
+ RUN_SUCCESS(exception_bad_assert_zero_gfunc_with, 105);
+ RUN_SUCCESS(exception_bad_assert_neg_gfunc_with, 200);
+ RUN_SUCCESS(exception_bad_assert_pos_gfunc_with, 0);
+ RUN_SUCCESS(exception_bad_assert_negeq_gfunc_with, 101);
+ RUN_SUCCESS(exception_bad_assert_poseq_gfunc_with, 99);
+ RUN_SUCCESS(exception_assert_range, 1);
+ RUN_SUCCESS(exception_assert_range_with, 1);
+ RUN_SUCCESS(exception_bad_assert_range, 0);
+ RUN_SUCCESS(exception_bad_assert_range_with, 10);
+
+#define RUN_EXT(load_ret, attach_err, expr, msg, after_link) \
+ { \
+ LIBBPF_OPTS(bpf_object_open_opts, o, .kernel_log_buf = log_buf, \
+ .kernel_log_size = sizeof(log_buf), \
+ .kernel_log_level = 2); \
+ exceptions_ext__destroy(eskel); \
+ eskel = exceptions_ext__open_opts(&o); \
+ struct bpf_program *prog = NULL; \
+ struct bpf_link *link = NULL; \
+ if (!ASSERT_OK_PTR(eskel, "exceptions_ext__open")) \
+ goto done; \
+ (expr); \
+ ASSERT_OK_PTR(bpf_program__name(prog), bpf_program__name(prog)); \
+ if (!ASSERT_EQ(exceptions_ext__load(eskel), load_ret, \
+ "exceptions_ext__load")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ if (load_ret != 0) { \
+ if (!ASSERT_OK_PTR(strstr(log_buf, msg), "strstr")) { \
+ printf("%s\n", log_buf); \
+ goto done; \
+ } \
+ } \
+ if (!load_ret && attach_err) { \
+ if (!ASSERT_ERR_PTR(link = bpf_program__attach(prog), "attach err")) \
+ goto done; \
+ } else if (!load_ret) { \
+ if (!ASSERT_OK_PTR(link = bpf_program__attach(prog), "attach ok")) \
+ goto done; \
+ (void)(after_link); \
+ bpf_link__destroy(link); \
+ } \
+ }
+
+ if (test__start_subtest("non-throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fentry -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("non-throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing fexit -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension (with custom cb) -> exception_cb"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod"), "set_attach_target"))
+ goto done;
+ }), "Extension programs cannot attach to exception callback", 0);
+
+ if (test__start_subtest("throwing extension -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_exception_cb_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
+ "exception_cb_mod_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext_mod_cb_runtime, 131); }));
+
+ if (test__start_subtest("throwing extension (with custom cb) -> global func in exception_cb"))
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_ext),
+ "exception_ext_global"), "set_attach_target"))
+ goto done;
+ }), "", ({ RUN_SUCCESS(exception_ext, 128); }));
+
+ if (test__start_subtest("non-throwing fentry -> non-throwing subprog"))
+ /* non-throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> non-throwing subprog"))
+ /* throwing fentry -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fentry -> throwing subprog"))
+ /* non-throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fentry -> throwing subprog"))
+ /* throwing fentry -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fentry;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> non-throwing subprog"))
+ /* non-throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> non-throwing subprog"))
+ /* throwing fexit -> non-throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing fexit -> throwing subprog"))
+ /* non-throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.pfexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing fexit -> throwing subprog"))
+ /* throwing fexit -> throwing subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_fexit;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ /* fmod_ret not allowed for subprog - Check so we remember to handle its
+ * throwing specification compatibility with target when supported.
+ */
+ if (test__start_subtest("non-throwing fmod_ret -> non-throwing global subprog"))
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.pfmod_ret;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "can't modify return codes of BPF program", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "subprog"), "set_attach_target"))
+ goto done;
+ }), "subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing subprog"))
+ /* non-throwing extension -> throwing subprog : BAD (!global) */
+ RUN_EXT(-EINVAL, true, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_subprog"), "set_attach_target"))
+ goto done;
+ }), "throwing_subprog() is not a global function", 0);
+
+ if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
+ /* non-throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> throwing global subprog"))
+ /* non-throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> throwing global subprog"))
+ /* throwing extension -> throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "throwing_global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> non-throwing global subprog"))
+ /* throwing extension -> non-throwing global subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "global_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("non-throwing extension -> main subprog"))
+ /* non-throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+ if (test__start_subtest("throwing extension -> main subprog"))
+ /* throwing extension -> main subprog : OK */
+ RUN_EXT(0, false, ({
+ prog = eskel->progs.throwing_extension;
+ bpf_program__set_autoload(prog, true);
+ if (!ASSERT_OK(bpf_program__set_attach_target(prog,
+ bpf_program__fd(skel->progs.exception_throw_subprog),
+ "exception_throw_subprog"), "set_attach_target"))
+ goto done;
+ }), "", 0);
+
+done:
+ exceptions_ext__destroy(eskel);
+ exceptions__destroy(skel);
+}
+
+static void test_exceptions_assertions(void)
+{
+ RUN_TESTS(exceptions_assert);
+}
+
+void test_exceptions(void)
+{
+ test_exceptions_success();
+ test_exceptions_failure();
+ test_exceptions_assertions();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/exhandler.c b/tools/testing/selftests/bpf/prog_tests/exhandler.c
new file mode 100644
index 000000000000..118bb182ee20
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/exhandler.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+
+/* Test that verifies exception handling is working. fork()
+ * triggers task_newtask tracepoint; that new task will have a
+ * NULL pointer task_works, and the associated task->task_works->func
+ * should not be NULL if task_works itself is non-NULL.
+ *
+ * So to verify exception handling we want to see a NULL task_works
+ * and task_works->func; if we see this we can conclude that the
+ * exception handler ran when we attempted to dereference task->task_works
+ * and zeroed the destination register.
+ */
+#include "exhandler_kern.skel.h"
+
+void test_exhandler(void)
+{
+ int err = 0, duration = 0, status;
+ struct exhandler_kern *skel;
+ pid_t cpid;
+
+ skel = exhandler_kern__open_and_load();
+ if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err))
+ goto cleanup;
+
+ skel->bss->test_pid = getpid();
+
+ err = exhandler_kern__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto cleanup;
+ cpid = fork();
+ if (!ASSERT_GT(cpid, -1, "fork failed"))
+ goto cleanup;
+ if (cpid == 0)
+ _exit(0);
+ waitpid(cpid, &status, 0);
+
+ ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred");
+cleanup:
+ exhandler_kern__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c
new file mode 100644
index 000000000000..c534b4d5f9da
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include <linux/btf.h>
+#include <bpf/bpf.h>
+
+#include "../test_btf.h"
+
+static inline int new_map(void)
+{
+ const char *name = NULL;
+ __u32 max_entries = 1;
+ __u32 value_size = 8;
+ __u32 key_size = 4;
+
+ return bpf_map_create(BPF_MAP_TYPE_ARRAY, name,
+ key_size, value_size,
+ max_entries, NULL);
+}
+
+static int new_btf(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
+ .str_len = sizeof(raw_btf.str),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+
+ return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL);
+}
+
+#define Close(FD) do { \
+ if ((FD) >= 0) { \
+ close(FD); \
+ FD = -1; \
+ } \
+} while(0)
+
+static bool map_exists(__u32 id)
+{
+ int fd;
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ }
+ return false;
+}
+
+static bool btf_exists(__u32 id)
+{
+ int fd;
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ }
+ return false;
+}
+
+static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids)
+{
+ __u32 len = sizeof(struct bpf_prog_info);
+ struct bpf_prog_info info;
+ int err;
+
+ memset(&info, 0, len);
+ info.nr_map_ids = *nr_map_ids;
+ info.map_ids = ptr_to_u64(map_ids);
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ return -1;
+
+ *nr_map_ids = info.nr_map_ids;
+
+ return 0;
+}
+
+static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt)
+{
+ /* A trivial program which uses one map */
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.fd_array = fd_array;
+ opts.fd_array_cnt = fd_array_cnt;
+
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts);
+}
+
+static int load_test_prog(const int *fd_array, int fd_array_cnt)
+{
+ int map_fd;
+ int ret;
+
+ map_fd = new_map();
+ if (!ASSERT_GE(map_fd, 0, "new_map"))
+ return map_fd;
+
+ ret = __load_test_prog(map_fd, fd_array, fd_array_cnt);
+ close(map_fd);
+ return ret;
+}
+
+static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids)
+{
+ int err;
+
+ err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids);
+ if (!ASSERT_OK(err, "bpf_prog_get_map_ids"))
+ return false;
+ if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids"))
+ return false;
+
+ return true;
+}
+
+/*
+ * Load a program, which uses one map. No fd_array maps are present.
+ * On return only one map is expected to be bound to prog.
+ */
+static void check_fd_array_cnt__no_fd_array(void)
+{
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ prog_fd = load_test_prog(NULL, 0);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ return;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids);
+ close(prog_fd);
+}
+
+/*
+ * Load a program, which uses one map, and pass two extra, non-equal, maps in
+ * fd_array with fd_array_cnt=2. On return three maps are expected to be bound
+ * to the program.
+ */
+static void check_fd_array_cnt__fd_array_ok(void)
+{
+ int extra_fds[2] = { -1, -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = new_map();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 2);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* maps should still exist when original file descriptors are closed */
+ Close(extra_fds[0]);
+ Close(extra_fds[1]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist"))
+ goto cleanup;
+ if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Load a program with a few extra maps duplicated in the fd_array.
+ * After the load maps should only be referenced once.
+ */
+static void check_fd_array_cnt__duplicated_maps(void)
+{
+ int extra_fds[4] = { -1, -1, -1, -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = extra_fds[2] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = extra_fds[3] = new_map();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 4);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* maps should still exist when original file descriptors are closed */
+ Close(extra_fds[0]);
+ Close(extra_fds[1]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
+ goto cleanup;
+ if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Check that if maps which are referenced by a program are
+ * passed in fd_array, then they will be referenced only once
+ */
+static void check_fd_array_cnt__referenced_maps_in_fd_array(void)
+{
+ int extra_fds[1] = { -1 };
+ __u32 map_ids[16];
+ __u32 nr_map_ids;
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+ nr_map_ids = ARRAY_SIZE(map_ids);
+ if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids))
+ goto cleanup;
+
+ /* map should still exist when original file descriptor is closed */
+ Close(extra_fds[0]);
+ if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+static int get_btf_id_by_fd(int btf_fd, __u32 *id)
+{
+ struct bpf_btf_info info;
+ __u32 info_len = sizeof(info);
+ int err;
+
+ memset(&info, 0, info_len);
+ err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
+ if (err)
+ return err;
+ if (id)
+ *id = info.id;
+ return 0;
+}
+
+/*
+ * Check that fd_array operates properly for btfs. Namely, to check that
+ * passing a btf fd in fd_array increases its reference count, do the
+ * following:
+ * 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
+ * 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
+ * 3) Close the btf_fd, now refcnt=1
+ * Wait and check that BTF still exists.
+ */
+static void check_fd_array_cnt__referenced_btfs(void)
+{
+ int extra_fds[1] = { -1 };
+ int prog_fd = -1;
+ __u32 btf_id;
+ int tries;
+ int err;
+
+ extra_fds[0] = new_btf();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_btf"))
+ goto cleanup;
+ prog_fd = load_test_prog(extra_fds, 1);
+ if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
+ goto cleanup;
+
+ /* btf should still exist when original file descriptor is closed */
+ err = get_btf_id_by_fd(extra_fds[0], &btf_id);
+ if (!ASSERT_EQ(err, 0, "get_btf_id_by_fd"))
+ goto cleanup;
+
+ Close(extra_fds[0]);
+
+ if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist"))
+ goto cleanup;
+
+ Close(prog_fd);
+
+ /* The program is freed by a workqueue, so no reliable
+ * way to sync, so just wait a bit (max ~1 second). */
+ for (tries = 100; tries >= 0; tries--) {
+ usleep(1000);
+
+ if (!btf_exists(btf_id))
+ break;
+
+ if (tries)
+ continue;
+
+ PRINT_FAIL("btf should have been freed");
+ }
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[0]);
+ Close(prog_fd);
+}
+
+/*
+ * Test that a program with trash in fd_array can't be loaded:
+ * only map and BTF file descriptors should be accepted.
+ */
+static void check_fd_array_cnt__fd_array_with_trash(void)
+{
+ int extra_fds[3] = { -1, -1, -1 };
+ int prog_fd = -1;
+
+ extra_fds[0] = new_map();
+ if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
+ goto cleanup;
+ extra_fds[1] = new_btf();
+ if (!ASSERT_GE(extra_fds[1], 0, "new_btf"))
+ goto cleanup;
+
+ /* trash 1: not a file descriptor */
+ extra_fds[2] = 0xbeef;
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF"))
+ goto cleanup;
+
+ /* trash 2: not a map or btf */
+ extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(extra_fds[2], 0, "socket"))
+ goto cleanup;
+
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL"))
+ goto cleanup;
+
+ /* Validate that the prog is ok if trash is removed */
+ Close(extra_fds[2]);
+ extra_fds[2] = new_btf();
+ if (!ASSERT_GE(extra_fds[2], 0, "new_btf"))
+ goto cleanup;
+
+ prog_fd = load_test_prog(extra_fds, 3);
+ if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded"))
+ goto cleanup;
+
+ /* some fds might be invalid, so ignore return codes */
+cleanup:
+ Close(extra_fds[2]);
+ Close(extra_fds[1]);
+ Close(extra_fds[0]);
+}
+
+/*
+ * Test that a program with too big fd_array can't be loaded.
+ */
+static void check_fd_array_cnt__fd_array_too_big(void)
+{
+ int extra_fds[65];
+ int prog_fd = -1;
+ int i;
+
+ for (i = 0; i < 65; i++) {
+ extra_fds[i] = new_map();
+ if (!ASSERT_GE(extra_fds[i], 0, "new_map"))
+ goto cleanup_fds;
+ }
+
+ prog_fd = load_test_prog(extra_fds, 65);
+ ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG");
+
+cleanup_fds:
+ while (i > 0)
+ Close(extra_fds[--i]);
+}
+
+void test_fd_array_cnt(void)
+{
+ if (test__start_subtest("no-fd-array"))
+ check_fd_array_cnt__no_fd_array();
+
+ if (test__start_subtest("fd-array-ok"))
+ check_fd_array_cnt__fd_array_ok();
+
+ if (test__start_subtest("fd-array-dup-input"))
+ check_fd_array_cnt__duplicated_maps();
+
+ if (test__start_subtest("fd-array-ref-maps-in-array"))
+ check_fd_array_cnt__referenced_maps_in_fd_array();
+
+ if (test__start_subtest("fd-array-ref-btfs"))
+ check_fd_array_cnt__referenced_btfs();
+
+ if (test__start_subtest("fd-array-trash-input"))
+ check_fd_array_cnt__fd_array_with_trash();
+
+ if (test__start_subtest("fd-array-2big"))
+ check_fd_array_cnt__fd_array_too_big();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c
new file mode 100644
index 000000000000..ca46fdd6e1ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <test_progs.h>
+#include "fd_htab_lookup.skel.h"
+
+struct htab_op_ctx {
+ int fd;
+ int loop;
+ unsigned int entries;
+ bool stop;
+};
+
+#define ERR_TO_RETVAL(where, err) ((void *)(long)(((where) << 12) | (-err)))
+
+static void *htab_lookup_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ unsigned int j;
+
+ for (j = 0; j < ctx->entries; j++) {
+ unsigned int key = j, zero = 0, value;
+ int inner_fd, err;
+
+ err = bpf_map_lookup_elem(ctx->fd, &key, &value);
+ if (err) {
+ ctx->stop = true;
+ return ERR_TO_RETVAL(1, err);
+ }
+
+ inner_fd = bpf_map_get_fd_by_id(value);
+ if (inner_fd < 0) {
+ /* The old map has been freed */
+ if (inner_fd == -ENOENT)
+ continue;
+ ctx->stop = true;
+ return ERR_TO_RETVAL(2, inner_fd);
+ }
+
+ err = bpf_map_lookup_elem(inner_fd, &zero, &value);
+ if (err) {
+ close(inner_fd);
+ ctx->stop = true;
+ return ERR_TO_RETVAL(3, err);
+ }
+ close(inner_fd);
+
+ if (value != key) {
+ ctx->stop = true;
+ return ERR_TO_RETVAL(4, -EINVAL);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void *htab_update_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ unsigned int j;
+
+ for (j = 0; j < ctx->entries; j++) {
+ unsigned int key = j, zero = 0;
+ int inner_fd, err;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
+ if (inner_fd < 0) {
+ ctx->stop = true;
+ return ERR_TO_RETVAL(1, inner_fd);
+ }
+
+ err = bpf_map_update_elem(inner_fd, &zero, &key, 0);
+ if (err) {
+ close(inner_fd);
+ ctx->stop = true;
+ return ERR_TO_RETVAL(2, err);
+ }
+
+ err = bpf_map_update_elem(ctx->fd, &key, &inner_fd, BPF_EXIST);
+ if (err) {
+ close(inner_fd);
+ ctx->stop = true;
+ return ERR_TO_RETVAL(3, err);
+ }
+ close(inner_fd);
+ }
+ }
+
+ return NULL;
+}
+
+static int setup_htab(int fd, unsigned int entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < entries; i++) {
+ unsigned int key = i, zero = 0;
+ int inner_fd, err;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "new array"))
+ return -1;
+
+ err = bpf_map_update_elem(inner_fd, &zero, &key, 0);
+ if (!ASSERT_OK(err, "init array")) {
+ close(inner_fd);
+ return -1;
+ }
+
+ err = bpf_map_update_elem(fd, &key, &inner_fd, 0);
+ if (!ASSERT_OK(err, "init outer")) {
+ close(inner_fd);
+ return -1;
+ }
+ close(inner_fd);
+ }
+
+ return 0;
+}
+
+static int get_int_from_env(const char *name, int dft)
+{
+ const char *value;
+
+ value = getenv(name);
+ if (!value)
+ return dft;
+
+ return atoi(value);
+}
+
+void test_fd_htab_lookup(void)
+{
+ unsigned int i, wr_nr = 8, rd_nr = 16;
+ pthread_t tids[wr_nr + rd_nr];
+ struct fd_htab_lookup *skel;
+ struct htab_op_ctx ctx;
+ int err;
+
+ skel = fd_htab_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fd_htab_lookup__open_and_load"))
+ return;
+
+ ctx.fd = bpf_map__fd(skel->maps.outer_map);
+ ctx.loop = get_int_from_env("FD_HTAB_LOOP_NR", 5);
+ ctx.stop = false;
+ ctx.entries = 8;
+
+ err = setup_htab(ctx.fd, ctx.entries);
+ if (err)
+ goto destroy;
+
+ memset(tids, 0, sizeof(tids));
+ for (i = 0; i < wr_nr; i++) {
+ err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+ for (i = 0; i < rd_nr; i++) {
+ err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+
+reap:
+ for (i = 0; i < wr_nr + rd_nr; i++) {
+ void *ret = NULL;
+ char desc[32];
+
+ if (!tids[i])
+ continue;
+
+ snprintf(desc, sizeof(desc), "thread %u", i + 1);
+ err = pthread_join(tids[i], &ret);
+ ASSERT_OK(err, desc);
+ ASSERT_EQ(ret, NULL, desc);
+ }
+destroy:
+ fd_htab_lookup__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 109d0345a2be..5ef1804e44df 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,49 +1,56 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
-#include "fexit_test.skel.h"
+#include "fentry_test.lskel.h"
+#include "fexit_test.lskel.h"
void test_fentry_fexit(void)
{
- struct fentry_test *fentry_skel = NULL;
- struct fexit_test *fexit_skel = NULL;
+ struct fentry_test_lskel *fentry_skel = NULL;
+ struct fexit_test_lskel *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
- __u32 duration = 0, retval;
int err, prog_fd, i;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
- fentry_skel = fentry_test__open_and_load();
- if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
+ fentry_skel = fentry_test_lskel__open();
+ if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto close_prog;
- fexit_skel = fexit_test__open_and_load();
- if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+
+ fentry_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fentry_test_lskel__load(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_skel_load"))
+ goto close_prog;
+
+ fexit_skel = fexit_test_lskel__open();
+ if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
+ goto close_prog;
+
+ fexit_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fexit_test_lskel__load(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_skel_load"))
goto close_prog;
- err = fentry_test__attach(fentry_skel);
- if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
+ err = fentry_test_lskel__attach(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_attach"))
goto close_prog;
- err = fexit_test__attach(fexit_skel);
- if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+ err = fexit_test_lskel__attach(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_attach"))
goto close_prog;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6 test_run");
+ ASSERT_OK(topts.retval, "ipv6 test retval");
fentry_res = (__u64 *)fentry_skel->bss;
fexit_res = (__u64 *)fexit_skel->bss;
printf("%lld\n", fentry_skel->bss->test1_result);
for (i = 0; i < 8; i++) {
- CHECK(fentry_res[i] != 1, "result",
- "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
- CHECK(fexit_res[i] != 1, "result",
- "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]);
+ ASSERT_EQ(fentry_res[i], 1, "fentry result");
+ ASSERT_EQ(fexit_res[i], 1, "fexit result");
}
close_prog:
- fentry_test__destroy(fentry_skel);
- fexit_test__destroy(fexit_skel);
+ fentry_test_lskel__destroy(fentry_skel);
+ fexit_test_lskel__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 04ebbf1cb390..ec882328eb59 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,37 +1,98 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fentry_test.skel.h"
+#include "fentry_test.lskel.h"
+#include "fentry_many_args.skel.h"
-void test_fentry_test(void)
+static int fentry_test_common(struct fentry_test_lskel *fentry_skel)
{
- struct fentry_test *fentry_skel = NULL;
int err, prog_fd, i;
- __u32 duration = 0, retval;
+ int link_fd;
__u64 *result;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
- fentry_skel = fentry_test__open_and_load();
- if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
- goto cleanup;
+ err = fentry_test_lskel__attach(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_attach"))
+ return err;
- err = fentry_test__attach(fentry_skel);
- if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
- goto cleanup;
+ /* Check that already linked program can't be attached again. */
+ link_fd = fentry_test_lskel__test1__attach(fentry_skel);
+ if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
+ return -1;
- prog_fd = bpf_program__fd(fentry_skel->progs.test1);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ prog_fd = fentry_skel->progs.test1.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
result = (__u64 *)fentry_skel->bss;
- for (i = 0; i < 6; i++) {
- if (CHECK(result[i] != 1, "result",
- "fentry_test%d failed err %lld\n", i + 1, result[i]))
- goto cleanup;
+ for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) {
+ if (!ASSERT_EQ(result[i], 1, "fentry_result"))
+ return -1;
}
+ fentry_test_lskel__detach(fentry_skel);
+
+ /* zero results for re-attach test */
+ memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss));
+ return 0;
+}
+
+static void fentry_test(void)
+{
+ struct fentry_test_lskel *fentry_skel = NULL;
+ int err;
+
+ fentry_skel = fentry_test_lskel__open();
+ if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_open"))
+ goto cleanup;
+
+ fentry_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fentry_test_lskel__load(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_skel_load"))
+ goto cleanup;
+
+ err = fentry_test_common(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_first_attach"))
+ goto cleanup;
+
+ err = fentry_test_common(fentry_skel);
+ ASSERT_OK(err, "fentry_second_attach");
+
cleanup:
- fentry_test__destroy(fentry_skel);
+ fentry_test_lskel__destroy(fentry_skel);
+}
+
+static void fentry_many_args(void)
+{
+ struct fentry_many_args *fentry_skel = NULL;
+ int err;
+
+ fentry_skel = fentry_many_args__open_and_load();
+ if (!ASSERT_OK_PTR(fentry_skel, "fentry_many_args_skel_load"))
+ goto cleanup;
+
+ err = fentry_many_args__attach(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_many_args_attach"))
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(1), "trigger_read");
+
+ ASSERT_EQ(fentry_skel->bss->test1_result, 1,
+ "fentry_many_args_result1");
+ ASSERT_EQ(fentry_skel->bss->test2_result, 1,
+ "fentry_many_args_result2");
+ ASSERT_EQ(fentry_skel->bss->test3_result, 1,
+ "fentry_many_args_result3");
+
+cleanup:
+ fentry_many_args__destroy(fentry_skel);
+}
+
+void test_fentry_test(void)
+{
+ if (test__start_subtest("fentry"))
+ fentry_test();
+ if (test__start_subtest("fentry_many_args"))
+ fentry_many_args();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 5c0448910426..f29fc789c14b 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -3,6 +3,9 @@
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
+#include "bind4_prog.skel.h"
+#include "freplace_progmap.skel.h"
+#include "xdp_dummy.skel.h"
typedef int (*test_cb)(struct bpf_object *obj);
@@ -58,42 +61,89 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
test_cb cb)
{
struct bpf_object *obj = NULL, *tgt_obj;
- struct bpf_program **prog = NULL;
+ __u32 tgt_prog_id, info_len;
+ struct bpf_prog_info prog_info = {};
+ struct bpf_program **prog = NULL, *p;
struct bpf_link **link = NULL;
- __u32 duration = 0, retval;
int err, tgt_fd, i;
-
- err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
+ struct btf *btf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
- if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
- target_obj_file, err, errno))
+ if (!ASSERT_OK(err, "tgt_prog_load"))
return;
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .attach_prog_fd = tgt_fd,
- );
+
+ info_len = sizeof(prog_info);
+ err = bpf_prog_get_info_by_fd(tgt_fd, &prog_info, &info_len);
+ if (!ASSERT_OK(err, "tgt_fd_get_info"))
+ goto close_prog;
+
+ tgt_prog_id = prog_info.id;
+ btf = bpf_object__btf(tgt_obj);
link = calloc(sizeof(struct bpf_link *), prog_cnt);
+ if (!ASSERT_OK_PTR(link, "link_ptr"))
+ goto close_prog;
+
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
- if (CHECK(!link || !prog, "alloc_memory", "failed to alloc memory"))
+ if (!ASSERT_OK_PTR(prog, "prog_ptr"))
goto close_prog;
- obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open %s: %ld\n", obj_file,
- PTR_ERR(obj)))
+ obj = bpf_object__open_file(obj_file, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
+ bpf_object__for_each_program(p, obj) {
+ err = bpf_program__set_attach_target(p, tgt_fd, NULL);
+ ASSERT_OK(err, "set_attach_target");
+ }
+
err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
+ if (!ASSERT_OK(err, "obj_load"))
goto close_prog;
for (i = 0; i < prog_cnt; i++) {
- prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i]))
+ struct bpf_link_info link_info;
+ struct bpf_program *pos;
+ const char *pos_sec_name;
+ char *tgt_name;
+ __s32 btf_id;
+
+ tgt_name = strstr(prog_name[i], "/");
+ if (!ASSERT_OK_PTR(tgt_name, "tgt_name"))
goto close_prog;
+ btf_id = btf__find_by_name_kind(btf, tgt_name + 1, BTF_KIND_FUNC);
+
+ prog[i] = NULL;
+ bpf_object__for_each_program(pos, obj) {
+ pos_sec_name = bpf_program__section_name(pos);
+ if (pos_sec_name && !strcmp(pos_sec_name, prog_name[i])) {
+ prog[i] = pos;
+ break;
+ }
+ }
+ if (!ASSERT_OK_PTR(prog[i], prog_name[i]))
+ goto close_prog;
+
link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
+ if (!ASSERT_OK_PTR(link[i], "attach_trace"))
goto close_prog;
+
+ info_len = sizeof(link_info);
+ memset(&link_info, 0, sizeof(link_info));
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link[i]),
+ &link_info, &info_len);
+ ASSERT_OK(err, "link_fd_get_info");
+ ASSERT_EQ(link_info.tracing.attach_type,
+ bpf_program__expected_attach_type(prog[i]),
+ "link_attach_type");
+ ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id");
+ ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id");
}
if (cb) {
@@ -105,21 +155,17 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
if (!run_prog)
goto close_prog;
- err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ err = bpf_prog_test_run_opts(tgt_fd, &topts);
+ ASSERT_OK(err, "prog_run");
+ ASSERT_EQ(topts.retval, 0, "prog_run_ret");
if (check_data_map(obj, prog_cnt, false))
goto close_prog;
close_prog:
for (i = 0; i < prog_cnt; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- if (!IS_ERR_OR_NULL(obj))
- bpf_object__close(obj);
+ bpf_link__destroy(link[i]);
+ bpf_object__close(obj);
bpf_object__close(tgt_obj);
free(link);
free(prog);
@@ -130,8 +176,8 @@ static void test_target_no_callees(void)
const char *prog_name[] = {
"fexit/test_pkt_md_access",
};
- test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o",
- "./test_pkt_md_access.o",
+ test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.bpf.o",
+ "./test_pkt_md_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
@@ -144,8 +190,8 @@ static void test_target_yes_callees(void)
"fexit/test_pkt_access_subprog2",
"fexit/test_pkt_access_subprog3",
};
- test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
- "./test_pkt_access.o",
+ test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o",
+ "./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
@@ -162,8 +208,8 @@ static void test_func_replace(void)
"freplace/get_constant",
"freplace/test_pkt_write_access_subprog",
};
- test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
- "./test_pkt_access.o",
+ test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o",
+ "./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, NULL);
}
@@ -173,42 +219,44 @@ static void test_func_replace_verify(void)
const char *prog_name[] = {
"freplace/do_bind",
};
- test_fexit_bpf2bpf_common("./freplace_connect4.o",
- "./connect4_prog.o",
+ test_fexit_bpf2bpf_common("./freplace_connect4.bpf.o",
+ "./connect4_prog.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
static int test_second_attach(struct bpf_object *obj)
{
- const char *prog_name = "freplace/get_constant";
- const char *tgt_name = prog_name + 9; /* cut off freplace/ */
- const char *tgt_obj_file = "./test_pkt_access.o";
+ const char *prog_name = "security_new_get_constant";
+ const char *tgt_name = "get_constant";
+ const char *tgt_obj_file = "./test_pkt_access.bpf.o";
struct bpf_program *prog = NULL;
struct bpf_object *tgt_obj;
- __u32 duration = 0, retval;
struct bpf_link *link;
int err = 0, tgt_fd;
-
- prog = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name))
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .repeat = 1,
+ );
+
+ prog = bpf_object__find_program_by_name(obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "find_prog"))
return -ENOENT;
- err = bpf_prog_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
- if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n",
- tgt_obj_file, err, errno))
+ if (!ASSERT_OK(err, "second_prog_load"))
return err;
link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name);
- if (CHECK(IS_ERR(link), "second_link", "failed to attach second link prog_fd %d tgt_fd %d\n", bpf_program__fd(prog), tgt_fd))
+ if (!ASSERT_OK_PTR(link, "second_link"))
goto out;
- err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration))
+ err = bpf_prog_test_run_opts(tgt_fd, &topts);
+ if (!ASSERT_OK(err, "ipv6 test_run"))
+ goto out;
+ if (!ASSERT_OK(topts.retval, "ipv6 retval"))
goto out;
err = check_data_map(obj, 1, true);
@@ -226,8 +274,8 @@ static void test_func_replace_multi(void)
const char *prog_name[] = {
"freplace/get_constant",
};
- test_fexit_bpf2bpf_common("./freplace_get_constant.o",
- "./test_pkt_access.o",
+ test_fexit_bpf2bpf_common("./freplace_get_constant.bpf.o",
+ "./test_pkt_access.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, true, test_second_attach);
}
@@ -235,45 +283,47 @@ static void test_func_replace_multi(void)
static void test_fmod_ret_freplace(void)
{
struct bpf_object *freplace_obj = NULL, *pkt_obj, *fmod_obj = NULL;
- const char *freplace_name = "./freplace_get_constant.o";
- const char *fmod_ret_name = "./fmod_ret_freplace.o";
+ const char *freplace_name = "./freplace_get_constant.bpf.o";
+ const char *fmod_ret_name = "./fmod_ret_freplace.bpf.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
- const char *tgt_name = "./test_pkt_access.o";
+ const char *tgt_name = "./test_pkt_access.bpf.o";
struct bpf_link *freplace_link = NULL;
struct bpf_program *prog;
__u32 duration = 0;
- int err, pkt_fd;
+ int err, pkt_fd, attach_prog_fd;
- err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
tgt_name, err, errno))
return;
- opts.attach_prog_fd = pkt_fd;
- freplace_obj = bpf_object__open_file(freplace_name, &opts);
- if (CHECK(IS_ERR_OR_NULL(freplace_obj), "freplace_obj_open",
- "failed to open %s: %ld\n", freplace_name,
- PTR_ERR(freplace_obj)))
+ freplace_obj = bpf_object__open_file(freplace_name, NULL);
+ if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open"))
goto out;
+ prog = bpf_object__next_program(freplace_obj, NULL);
+ err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
+ ASSERT_OK(err, "freplace__set_attach_target");
+
err = bpf_object__load(freplace_obj);
if (CHECK(err, "freplace_obj_load", "err %d\n", err))
goto out;
- prog = bpf_program__next(NULL, freplace_obj);
freplace_link = bpf_program__attach_trace(prog);
- if (CHECK(IS_ERR(freplace_link), "freplace_attach_trace", "failed to link\n"))
+ if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace"))
goto out;
- opts.attach_prog_fd = bpf_program__fd(prog);
- fmod_obj = bpf_object__open_file(fmod_ret_name, &opts);
- if (CHECK(IS_ERR_OR_NULL(fmod_obj), "fmod_obj_open",
- "failed to open %s: %ld\n", fmod_ret_name,
- PTR_ERR(fmod_obj)))
+ fmod_obj = bpf_object__open_file(fmod_ret_name, NULL);
+ if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open"))
goto out;
+ attach_prog_fd = bpf_program__fd(prog);
+ prog = bpf_object__next_program(fmod_obj, NULL);
+ err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL);
+ ASSERT_OK(err, "fmod_ret_set_attach_target");
+
err = bpf_object__load(fmod_obj);
if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n"))
goto out;
@@ -291,66 +341,235 @@ static void test_func_sockmap_update(void)
const char *prog_name[] = {
"freplace/cls_redirect",
};
- test_fexit_bpf2bpf_common("./freplace_cls_redirect.o",
- "./test_cls_redirect.o",
+ test_fexit_bpf2bpf_common("./freplace_cls_redirect.bpf.o",
+ "./test_cls_redirect.bpf.o",
ARRAY_SIZE(prog_name),
prog_name, false, NULL);
}
static void test_obj_load_failure_common(const char *obj_file,
- const char *target_obj_file)
-
+ const char *target_obj_file,
+ const char *exp_msg)
{
/*
* standalone test that asserts failure to load freplace prog
* because of invalid return code.
*/
struct bpf_object *obj = NULL, *pkt_obj;
+ struct bpf_program *prog;
+ char log_buf[64 * 1024];
int err, pkt_fd;
__u32 duration = 0;
- err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
target_obj_file, err, errno))
return;
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .attach_prog_fd = pkt_fd,
- );
-
- obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
- "failed to open %s: %ld\n", obj_file,
- PTR_ERR(obj)))
+
+ obj = bpf_object__open_file(obj_file, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
goto close_prog;
+ prog = bpf_object__next_program(obj, NULL);
+ err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
+ ASSERT_OK(err, "set_attach_target");
+
+ log_buf[0] = '\0';
+ if (exp_msg)
+ bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf));
+ if (env.verbosity > VERBOSE_NONE)
+ bpf_program__set_log_level(prog, 2);
+
/* It should fail to load the program */
err = bpf_object__load(obj);
+ if (env.verbosity > VERBOSE_NONE && exp_msg) /* we overtook log */
+ printf("VERIFIER LOG:\n================\n%s\n================\n", log_buf);
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
goto close_prog;
+ if (exp_msg)
+ ASSERT_HAS_SUBSTR(log_buf, exp_msg, "fail_msg");
close_prog:
- if (!IS_ERR_OR_NULL(obj))
- bpf_object__close(obj);
+ bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
static void test_func_replace_return_code(void)
{
/* test invalid return code in the replaced program */
- test_obj_load_failure_common("./freplace_connect_v4_prog.o",
- "./connect4_prog.o");
+ test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o",
+ "./connect4_prog.bpf.o", NULL);
}
static void test_func_map_prog_compatibility(void)
{
/* test with spin lock map value in the replaced program */
- test_obj_load_failure_common("./freplace_attach_probe.o",
- "./test_attach_probe.o");
+ test_obj_load_failure_common("./freplace_attach_probe.bpf.o",
+ "./test_attach_probe.bpf.o", NULL);
+}
+
+static void test_func_replace_unreliable(void)
+{
+ /* freplace'ing unreliable main prog should fail with error
+ * "Cannot replace static functions"
+ */
+ test_obj_load_failure_common("freplace_unreliable_prog.bpf.o",
+ "./verifier_btf_unreliable_prog.bpf.o",
+ "Cannot replace static functions");
+}
+
+static void test_func_replace_global_func(void)
+{
+ const char *prog_name[] = {
+ "freplace/test_pkt_access",
+ };
+
+ test_fexit_bpf2bpf_common("./freplace_global_func.bpf.o",
+ "./test_pkt_access.bpf.o",
+ ARRAY_SIZE(prog_name),
+ prog_name, false, NULL);
+}
+
+static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *btf;
+ int ret;
+
+ ret = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ if (ret)
+ return ret;
+
+ if (!info.btf_id)
+ return -EINVAL;
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ ret = libbpf_get_error(btf);
+ if (ret)
+ return ret;
+
+ ret = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ btf__free(btf);
+ return ret;
+}
+
+static int load_fentry(int attach_prog_fd, int attach_btf_id)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .attach_prog_fd = attach_prog_fd,
+ .attach_btf_id = attach_btf_id,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ return bpf_prog_load(BPF_PROG_TYPE_TRACING,
+ "bind4_fentry",
+ "GPL",
+ insns,
+ ARRAY_SIZE(insns),
+ &opts);
+}
+
+static void test_fentry_to_cgroup_bpf(void)
+{
+ struct bind4_prog *skel = NULL;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int cgroup_fd = -1;
+ int fentry_fd = -1;
+ int btf_id;
+
+ cgroup_fd = test__join_cgroup("/fentry_to_cgroup_bpf");
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ return;
+
+ skel = bind4_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto cleanup;
+
+ skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bind_v4_prog, "bpf_program__attach_cgroup"))
+ goto cleanup;
+
+ btf_id = find_prog_btf_id("bind_v4_prog", bpf_program__fd(skel->progs.bind_v4_prog));
+ if (!ASSERT_GE(btf_id, 0, "find_prog_btf_id"))
+ goto cleanup;
+
+ fentry_fd = load_fentry(bpf_program__fd(skel->progs.bind_v4_prog), btf_id);
+ if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
+ goto cleanup;
+
+ /* Make sure bpf_prog_get_info_by_fd works correctly when attaching
+ * to another BPF program.
+ */
+
+ ASSERT_OK(bpf_prog_get_info_by_fd(fentry_fd, &info, &info_len),
+ "bpf_prog_get_info_by_fd");
+
+ ASSERT_EQ(info.btf_id, 0, "info.btf_id");
+ ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
+ ASSERT_GT(info.attach_btf_obj_id, 0, "info.attach_btf_obj_id");
+
+cleanup:
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+ if (fentry_fd >= 0)
+ close(fentry_fd);
+ bind4_prog__destroy(skel);
+}
+
+static void test_func_replace_progmap(void)
+{
+ struct bpf_cpumap_val value = { .qsize = 1 };
+ struct freplace_progmap *skel = NULL;
+ struct xdp_dummy *tgt_skel = NULL;
+ __u32 key = 0;
+ int err;
+
+ skel = freplace_progmap__open();
+ if (!ASSERT_OK_PTR(skel, "prog_open"))
+ return;
+
+ tgt_skel = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(tgt_skel, "tgt_prog_load"))
+ goto out;
+
+ err = bpf_program__set_attach_target(skel->progs.xdp_cpumap_prog,
+ bpf_program__fd(tgt_skel->progs.xdp_dummy_prog),
+ "xdp_dummy_prog");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = freplace_progmap__load(skel);
+ if (!ASSERT_OK(err, "obj_load"))
+ goto out;
+
+ /* Prior to fixing the kernel, loading the PROG_TYPE_EXT 'redirect'
+ * program above will cause the map owner type of 'cpumap' to be set to
+ * PROG_TYPE_EXT. This in turn will cause the bpf_map_update_elem()
+ * below to fail, because the program we are inserting into the map is
+ * of PROG_TYPE_XDP. After fixing the kernel, the initial ownership will
+ * be correctly resolved to the *target* of the PROG_TYPE_EXT program
+ * (i.e., PROG_TYPE_XDP) and the map update will succeed.
+ */
+ value.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_drop_prog);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.cpu_map),
+ &key, &value, 0);
+ ASSERT_OK(err, "map_update");
+
+out:
+ xdp_dummy__destroy(tgt_skel);
+ freplace_progmap__destroy(skel);
}
-void test_fexit_bpf2bpf(void)
+/* NOTE: affect other tests, must run in serial mode */
+void serial_test_fexit_bpf2bpf(void)
{
if (test__start_subtest("target_no_callees"))
test_target_no_callees();
@@ -366,8 +585,16 @@ void test_fexit_bpf2bpf(void)
test_func_replace_return_code();
if (test__start_subtest("func_map_prog_compatibility"))
test_func_map_prog_compatibility();
+ if (test__start_subtest("func_replace_unreliable"))
+ test_func_replace_unreliable();
if (test__start_subtest("func_replace_multi"))
test_func_replace_multi();
if (test__start_subtest("fmod_ret_freplace"))
test_fmod_ret_freplace();
+ if (test__start_subtest("func_replace_global_func"))
+ test_func_replace_global_func();
+ if (test__start_subtest("fentry_to_cgroup_bpf"))
+ test_fentry_to_cgroup_bpf();
+ if (test__start_subtest("func_replace_progmap"))
+ test_func_replace_progmap();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
new file mode 100644
index 000000000000..552a0875ca6d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include "fexit_sleep.lskel.h"
+
+static int do_sleep(void *skel)
+{
+ struct fexit_sleep_lskel *fexit_skel = skel;
+ struct timespec ts1 = { .tv_nsec = 1 };
+ struct timespec ts2 = { .tv_sec = 10 };
+
+ fexit_skel->bss->pid = getpid();
+ (void)syscall(__NR_nanosleep, &ts1, NULL);
+ (void)syscall(__NR_nanosleep, &ts2, NULL);
+ return 0;
+}
+
+#define STACK_SIZE (1024 * 1024)
+
+void test_fexit_sleep(void)
+{
+ struct fexit_sleep_lskel *fexit_skel = NULL;
+ int wstatus, duration = 0;
+ pid_t cpid;
+ char *child_stack = NULL;
+ int err, fexit_cnt;
+
+ fexit_skel = fexit_sleep_lskel__open_and_load();
+ if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+ goto cleanup;
+
+ err = fexit_sleep_lskel__attach(fexit_skel);
+ if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+ goto cleanup;
+
+ child_stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (!ASSERT_NEQ(child_stack, MAP_FAILED, "mmap"))
+ goto cleanup;
+
+ cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
+ if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
+ goto cleanup;
+
+ /* wait until first sys_nanosleep ends and second sys_nanosleep starts */
+ while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2);
+ fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+ if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+ goto cleanup;
+
+ /* close progs and detach them. That will trigger two nop5->jmp5 rewrites
+ * in the trampolines to skip nanosleep_fexit prog.
+ * The nanosleep_fentry prog will get detached first.
+ * The nanosleep_fexit prog will get detached second.
+ * Detaching will trigger freeing of both progs JITed images.
+ * There will be two dying bpf_tramp_image-s, but only the initial
+ * bpf_tramp_image (with both _fentry and _fexit progs will be stuck
+ * waiting for percpu_ref_kill to confirm). The other one
+ * will be freed quickly.
+ */
+ close(fexit_skel->progs.nanosleep_fentry.prog_fd);
+ close(fexit_skel->progs.nanosleep_fexit.prog_fd);
+ fexit_sleep_lskel__detach(fexit_skel);
+
+ /* kill the thread to unwind sys_nanosleep stack through the trampoline */
+ kill(cpid, 9);
+
+ if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
+ goto cleanup;
+ if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
+ goto cleanup;
+
+ /* The bypassed nanosleep_fexit prog shouldn't have executed.
+ * Unlike progs the maps were not freed and directly accessible.
+ */
+ fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+ if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+ goto cleanup;
+
+cleanup:
+ munmap(child_stack, STACK_SIZE);
+ fexit_sleep_lskel__destroy(fexit_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3b9dbf7433f0..14c91b6f1e83 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -1,76 +1,60 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "bpf_util.h"
-/* x86-64 fits 55 JITed and 43 interpreted progs into half page */
-#define CNT 40
-
-void test_fexit_stress(void)
+void serial_test_fexit_stress(void)
{
- char test_skb[128] = {};
- int fexit_fd[CNT] = {};
- int link_fd[CNT] = {};
- __u32 duration = 0;
- char error[4096];
- __u32 prog_ret;
- int err, i, filter_fd;
+ int bpf_max_tramp_links, err, i;
+ int *fd, *fexit_fd, *link_fd;
+
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ fd = calloc(bpf_max_tramp_links * 2, sizeof(*fd));
+ if (!ASSERT_OK_PTR(fd, "fd"))
+ return;
+ fexit_fd = fd;
+ link_fd = fd + bpf_max_tramp_links;
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_TRACING,
- .license = "GPL",
- .insns = trace_program,
- .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.expected_attach_type = BPF_TRACE_FEXIT,
- };
+ );
- const struct bpf_insn skb_program[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
-
- struct bpf_load_program_attr skb_load_attr = {
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .license = "GPL",
- .insns = skb_program,
- .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
- };
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
- load_attr.expected_attach_type);
- if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
+ trace_opts.expected_attach_type);
+ if (!ASSERT_GT(err, 0, "find_vmlinux_btf_id"))
goto out;
- load_attr.attach_btf_id = err;
-
- for (i = 0; i < CNT; i++) {
- fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
- if (CHECK(fexit_fd[i] < 0, "fexit loaded",
- "failed: %d errno %d\n", fexit_fd[i], errno))
+ trace_opts.attach_btf_id = err;
+
+ for (i = 0; i < bpf_max_tramp_links; i++) {
+ fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
+ trace_program,
+ ARRAY_SIZE(trace_program),
+ &trace_opts);
+ if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
- link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
- if (CHECK(link_fd[i] < 0, "fexit attach failed",
- "prog %d failed: %d err %d\n", i, link_fd[i], errno))
+ link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL);
+ if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))
goto out;
}
- filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
- if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
- filter_fd, errno))
- goto out;
+ err = bpf_prog_test_run_opts(fexit_fd[0], &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
- err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
- 0, &prog_ret, 0);
- close(filter_fd);
- CHECK_FAIL(err);
out:
- for (i = 0; i < CNT; i++) {
- if (link_fd[i])
+ for (i = 0; i < bpf_max_tramp_links; i++) {
+ if (link_fd[i] > 0)
close(link_fd[i]);
- if (fexit_fd[i])
+ if (fexit_fd[i] > 0)
close(fexit_fd[i]);
}
+ free(fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index 78d7a2765c27..94eed753560c 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -1,37 +1,98 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "fexit_test.skel.h"
+#include "fexit_test.lskel.h"
+#include "fexit_many_args.skel.h"
-void test_fexit_test(void)
+static int fexit_test_common(struct fexit_test_lskel *fexit_skel)
{
- struct fexit_test *fexit_skel = NULL;
int err, prog_fd, i;
- __u32 duration = 0, retval;
+ int link_fd;
__u64 *result;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
- fexit_skel = fexit_test__open_and_load();
- if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
- goto cleanup;
+ err = fexit_test_lskel__attach(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_attach"))
+ return err;
- err = fexit_test__attach(fexit_skel);
- if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
- goto cleanup;
+ /* Check that already linked program can't be attached again. */
+ link_fd = fexit_test_lskel__test1__attach(fexit_skel);
+ if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
+ return -1;
- prog_fd = bpf_program__fd(fexit_skel->progs.test1);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ prog_fd = fexit_skel->progs.test1.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
result = (__u64 *)fexit_skel->bss;
- for (i = 0; i < 6; i++) {
- if (CHECK(result[i] != 1, "result",
- "fexit_test%d failed err %lld\n", i + 1, result[i]))
- goto cleanup;
+ for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) {
+ if (!ASSERT_EQ(result[i], 1, "fexit_result"))
+ return -1;
}
+ fexit_test_lskel__detach(fexit_skel);
+
+ /* zero results for re-attach test */
+ memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss));
+ return 0;
+}
+
+static void fexit_test(void)
+{
+ struct fexit_test_lskel *fexit_skel = NULL;
+ int err;
+
+ fexit_skel = fexit_test_lskel__open();
+ if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_open"))
+ goto cleanup;
+
+ fexit_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fexit_test_lskel__load(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_skel_load"))
+ goto cleanup;
+
+ err = fexit_test_common(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_first_attach"))
+ goto cleanup;
+
+ err = fexit_test_common(fexit_skel);
+ ASSERT_OK(err, "fexit_second_attach");
+
cleanup:
- fexit_test__destroy(fexit_skel);
+ fexit_test_lskel__destroy(fexit_skel);
+}
+
+static void fexit_many_args(void)
+{
+ struct fexit_many_args *fexit_skel = NULL;
+ int err;
+
+ fexit_skel = fexit_many_args__open_and_load();
+ if (!ASSERT_OK_PTR(fexit_skel, "fexit_many_args_skel_load"))
+ goto cleanup;
+
+ err = fexit_many_args__attach(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_many_args_attach"))
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(1), "trigger_read");
+
+ ASSERT_EQ(fexit_skel->bss->test1_result, 1,
+ "fexit_many_args_result1");
+ ASSERT_EQ(fexit_skel->bss->test2_result, 1,
+ "fexit_many_args_result2");
+ ASSERT_EQ(fexit_skel->bss->test3_result, 1,
+ "fexit_many_args_result3");
+
+cleanup:
+ fexit_many_args__destroy(fexit_skel);
+}
+
+void test_fexit_test(void)
+{
+ if (test__start_subtest("fexit"))
+ fexit_test();
+ if (test__start_subtest("fexit_many_args"))
+ fexit_many_args();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
new file mode 100644
index 000000000000..bd7658958004
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/rtnetlink.h>
+#include <sys/types.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "fib_lookup.skel.h"
+
+#define NS_TEST "fib_lookup_ns"
+#define IPV6_IFACE_ADDR "face::face"
+#define IPV6_IFACE_ADDR_SEC "cafe::cafe"
+#define IPV6_ADDR_DST "face::3"
+#define IPV6_NUD_FAILED_ADDR "face::1"
+#define IPV6_NUD_STALE_ADDR "face::2"
+#define IPV4_IFACE_ADDR "10.0.0.254"
+#define IPV4_IFACE_ADDR_SEC "10.1.0.254"
+#define IPV4_ADDR_DST "10.2.0.254"
+#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
+#define IPV4_NUD_STALE_ADDR "10.0.0.2"
+#define IPV4_TBID_ADDR "172.0.0.254"
+#define IPV4_TBID_NET "172.0.0.0"
+#define IPV4_TBID_DST "172.0.0.2"
+#define IPV6_TBID_ADDR "fd00::FFFF"
+#define IPV6_TBID_NET "fd00::"
+#define IPV6_TBID_DST "fd00::2"
+#define MARK_NO_POLICY 33
+#define MARK 42
+#define MARK_TABLE "200"
+#define IPV4_REMOTE_DST "1.2.3.4"
+#define IPV4_LOCAL "10.4.0.3"
+#define IPV4_GW1 "10.4.0.1"
+#define IPV4_GW2 "10.4.0.2"
+#define IPV6_REMOTE_DST "be:ef::b0:10"
+#define IPV6_LOCAL "fd01::3"
+#define IPV6_GW1 "fd01::1"
+#define IPV6_GW2 "fd01::2"
+#define DMAC "11:11:11:11:11:11"
+#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }
+#define DMAC2 "01:01:01:01:01:01"
+#define DMAC_INIT2 { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }
+
+struct fib_lookup_test {
+ const char *desc;
+ const char *daddr;
+ int expected_ret;
+ const char *expected_src;
+ const char *expected_dst;
+ int lookup_flags;
+ __u32 tbid;
+ __u8 dmac[6];
+ __u32 mark;
+};
+
+static const struct fib_lookup_test tests[] = {
+ { .desc = "IPv6 failed neigh",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
+ { .desc = "IPv6 stale neigh",
+ .daddr = IPV6_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .dmac = DMAC_INIT, },
+ { .desc = "IPv6 skip neigh",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 failed neigh",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
+ { .desc = "IPv4 stale neigh",
+ .daddr = IPV4_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .dmac = DMAC_INIT, },
+ { .desc = "IPv4 skip neigh",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 TBID lookup failure",
+ .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
+ .tbid = RT_TABLE_MAIN, },
+ { .desc = "IPv4 TBID lookup success",
+ .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
+ .dmac = DMAC_INIT2, },
+ { .desc = "IPv6 TBID lookup failure",
+ .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID,
+ .tbid = RT_TABLE_MAIN, },
+ { .desc = "IPv6 TBID lookup success",
+ .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100,
+ .dmac = DMAC_INIT2, },
+ { .desc = "IPv4 set src addr from netdev",
+ .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV4_IFACE_ADDR,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv6 set src addr from netdev",
+ .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV6_IFACE_ADDR,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 set prefsrc addr from route",
+ .daddr = IPV4_ADDR_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV4_IFACE_ADDR_SEC,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv6 set prefsrc addr route",
+ .daddr = IPV6_ADDR_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_src = IPV6_IFACE_ADDR_SEC,
+ .lookup_flags = BPF_FIB_LOOKUP_SRC | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ /* policy routing */
+ { .desc = "IPv4 policy routing, default",
+ .daddr = IPV4_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV4_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv4 policy routing, mark doesn't point to a policy",
+ .daddr = IPV4_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV4_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK_NO_POLICY, },
+ { .desc = "IPv4 policy routing, mark points to a policy",
+ .daddr = IPV4_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV4_GW2,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK, },
+ { .desc = "IPv4 policy routing, mark points to a policy, but no flag",
+ .daddr = IPV4_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV4_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK, },
+ { .desc = "IPv6 policy routing, default",
+ .daddr = IPV6_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV6_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH, },
+ { .desc = "IPv6 policy routing, mark doesn't point to a policy",
+ .daddr = IPV6_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV6_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK_NO_POLICY, },
+ { .desc = "IPv6 policy routing, mark points to a policy",
+ .daddr = IPV6_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV6_GW2,
+ .lookup_flags = BPF_FIB_LOOKUP_MARK | BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK, },
+ { .desc = "IPv6 policy routing, mark points to a policy, but no flag",
+ .daddr = IPV6_REMOTE_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
+ .expected_dst = IPV6_GW1,
+ .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH,
+ .mark = MARK, },
+};
+
+static int setup_netns(void)
+{
+ int err;
+
+ SYS(fail, "ip link add veth1 type veth peer name veth2");
+ SYS(fail, "ip link set dev veth1 up");
+ SYS(fail, "ip link set dev veth2 up");
+
+ err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)"))
+ goto fail;
+
+ err = write_sysctl("/proc/sys/net/ipv6/neigh/veth1/gc_stale_time", "900");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv6.neigh.veth1.gc_stale_time)"))
+ goto fail;
+
+ SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
+ SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
+ SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
+
+ SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR);
+ SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
+ SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
+
+ /* Setup for prefsrc IP addr selection */
+ SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR_SEC);
+ SYS(fail, "ip route add %s/32 dev veth1 src %s", IPV4_ADDR_DST, IPV4_IFACE_ADDR_SEC);
+
+ SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR_SEC);
+ SYS(fail, "ip route add %s/128 dev veth1 src %s", IPV6_ADDR_DST, IPV6_IFACE_ADDR_SEC);
+
+ /* Setup for tbid lookup tests */
+ SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR);
+ SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET);
+ SYS(fail, "ip route add table 100 %s/24 dev veth2", IPV4_TBID_NET);
+ SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV4_TBID_DST, DMAC2);
+
+ SYS(fail, "ip addr add %s/64 dev veth2", IPV6_TBID_ADDR);
+ SYS(fail, "ip -6 route del %s/64 dev veth2", IPV6_TBID_NET);
+ SYS(fail, "ip -6 route add table 100 %s/64 dev veth2", IPV6_TBID_NET);
+ SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV6_TBID_DST, DMAC2);
+
+ err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
+ goto fail;
+
+ err = write_sysctl("/proc/sys/net/ipv6/conf/veth1/forwarding", "1");
+ if (!ASSERT_OK(err, "write_sysctl(net.ipv6.conf.veth1.forwarding)"))
+ goto fail;
+
+ /* Setup for policy routing tests */
+ SYS(fail, "ip addr add %s/24 dev veth1", IPV4_LOCAL);
+ SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_LOCAL);
+ SYS(fail, "ip route add %s/32 via %s", IPV4_REMOTE_DST, IPV4_GW1);
+ SYS(fail, "ip route add %s/32 via %s table %s", IPV4_REMOTE_DST, IPV4_GW2, MARK_TABLE);
+ SYS(fail, "ip -6 route add %s/128 via %s", IPV6_REMOTE_DST, IPV6_GW1);
+ SYS(fail, "ip -6 route add %s/128 via %s table %s", IPV6_REMOTE_DST, IPV6_GW2, MARK_TABLE);
+ SYS(fail, "ip rule add prio 2 fwmark %d lookup %s", MARK, MARK_TABLE);
+ SYS(fail, "ip -6 rule add prio 2 fwmark %d lookup %s", MARK, MARK_TABLE);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int set_lookup_params(struct bpf_fib_lookup *params,
+ const struct fib_lookup_test *test,
+ int ifindex)
+{
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+
+ params->l4_protocol = IPPROTO_TCP;
+ params->ifindex = ifindex;
+ params->tbid = test->tbid;
+ params->mark = test->mark;
+
+ if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) {
+ params->family = AF_INET6;
+ if (!(test->lookup_flags & BPF_FIB_LOOKUP_SRC)) {
+ ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
+ return -1;
+ }
+
+ return 0;
+ }
+
+ ret = inet_pton(AF_INET, test->daddr, &params->ipv4_dst);
+ if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
+ return -1;
+ params->family = AF_INET;
+
+ if (!(test->lookup_flags & BPF_FIB_LOOKUP_SRC)) {
+ ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, &params->ipv4_src);
+ if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mac_str(char *b, const __u8 *mac)
+{
+ sprintf(b, "%02X:%02X:%02X:%02X:%02X:%02X",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+}
+
+static void assert_ip_address(int family, void *addr, const char *expected_str)
+{
+ char str[INET6_ADDRSTRLEN];
+ u8 expected_addr[16];
+ int addr_len = 0;
+ int ret;
+
+ switch (family) {
+ case AF_INET6:
+ ret = inet_pton(AF_INET6, expected_str, expected_addr);
+ ASSERT_EQ(ret, 1, "inet_pton(AF_INET6, expected_str)");
+ addr_len = 16;
+ break;
+ case AF_INET:
+ ret = inet_pton(AF_INET, expected_str, expected_addr);
+ ASSERT_EQ(ret, 1, "inet_pton(AF_INET, expected_str)");
+ addr_len = 4;
+ break;
+ default:
+ PRINT_FAIL("invalid address family: %d", family);
+ break;
+ }
+
+ if (memcmp(addr, expected_addr, addr_len)) {
+ inet_ntop(family, addr, str, sizeof(str));
+ PRINT_FAIL("expected %s actual %s ", expected_str, str);
+ }
+}
+
+static void assert_src_ip(struct bpf_fib_lookup *params, const char *expected)
+{
+ assert_ip_address(params->family, params->ipv6_src, expected);
+}
+
+static void assert_dst_ip(struct bpf_fib_lookup *params, const char *expected)
+{
+ assert_ip_address(params->family, params->ipv6_dst, expected);
+}
+
+void test_fib_lookup(void)
+{
+ struct bpf_fib_lookup *fib_params;
+ struct nstoken *nstoken = NULL;
+ struct __sk_buff skb = { };
+ struct fib_lookup *skel;
+ int prog_fd, err, ret, i;
+
+ /* The test does not use the skb->data, so
+ * use pkt_v6 for both v6 and v4 test.
+ */
+ LIBBPF_OPTS(bpf_test_run_opts, run_opts,
+ .data_in = &pkt_v6,
+ .data_size_in = sizeof(pkt_v6),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = fib_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.fib_lookup);
+
+ SYS(fail, "ip netns add %s", NS_TEST);
+
+ nstoken = open_netns(NS_TEST);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto fail;
+
+ if (setup_netns())
+ goto fail;
+
+ skb.ifindex = if_nametoindex("veth1");
+ if (!ASSERT_NEQ(skb.ifindex, 0, "if_nametoindex(veth1)"))
+ goto fail;
+
+ fib_params = &skel->bss->fib_params;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ printf("Testing %s ", tests[i].desc);
+
+ if (set_lookup_params(fib_params, &tests[i], skb.ifindex))
+ continue;
+
+ skel->bss->fib_lookup_ret = -1;
+ skel->bss->lookup_flags = tests[i].lookup_flags;
+
+ err = bpf_prog_test_run_opts(prog_fd, &run_opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ continue;
+
+ ASSERT_EQ(skel->bss->fib_lookup_ret, tests[i].expected_ret,
+ "fib_lookup_ret");
+
+ if (tests[i].expected_src)
+ assert_src_ip(fib_params, tests[i].expected_src);
+
+ if (tests[i].expected_dst)
+ assert_dst_ip(fib_params, tests[i].expected_dst);
+
+ ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
+ if (!ASSERT_EQ(ret, 0, "dmac not match")) {
+ char expected[18], actual[18];
+
+ mac_str(expected, tests[i].dmac);
+ mac_str(actual, fib_params->dmac);
+ printf("dmac expected %s actual %s ", expected, actual);
+ }
+
+ // ensure tbid is zero'd out after fib lookup.
+ if (tests[i].lookup_flags & BPF_FIB_LOOKUP_DIRECT) {
+ if (!ASSERT_EQ(skel->bss->fib_params.tbid, 0,
+ "expected fib_params.tbid to be zero"))
+ goto fail;
+ }
+ }
+
+fail:
+ if (nstoken)
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del " NS_TEST);
+ fib_lookup__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/file_reader.c b/tools/testing/selftests/bpf/prog_tests/file_reader.c
new file mode 100644
index 000000000000..5cde32b35da4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/file_reader.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "file_reader.skel.h"
+#include "file_reader_fail.skel.h"
+#include <dlfcn.h>
+#include <sys/mman.h>
+
+const char *user_ptr = "hello world";
+char file_contents[256000];
+
+void *get_executable_base_addr(void)
+{
+ Dl_info info;
+
+ if (!dladdr((void *)&get_executable_base_addr, &info)) {
+ fprintf(stderr, "dladdr failed\n");
+ return NULL;
+ }
+
+ return info.dli_fbase;
+}
+
+static int initialize_file_contents(void)
+{
+ int fd, page_sz = sysconf(_SC_PAGESIZE);
+ ssize_t n = 0, cur, off;
+ void *addr;
+
+ fd = open("/proc/self/exe", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "Open /proc/self/exe\n"))
+ return 1;
+
+ do {
+ cur = read(fd, file_contents + n, sizeof(file_contents) - n);
+ if (!ASSERT_GT(cur, 0, "read success"))
+ break;
+ n += cur;
+ } while (n < sizeof(file_contents));
+
+ close(fd);
+
+ if (!ASSERT_EQ(n, sizeof(file_contents), "Read /proc/self/exe\n"))
+ return 1;
+
+ addr = get_executable_base_addr();
+ if (!ASSERT_NEQ(addr, NULL, "get executable address"))
+ return 1;
+
+ /* page-align base file address */
+ addr = (void *)((unsigned long)addr & ~(page_sz - 1));
+
+ /*
+ * Page out range 0..512K, use 0..256K for positive tests and
+ * 256K..512K for negative tests expecting page faults
+ */
+ for (off = 0; off < sizeof(file_contents) * 2; off += page_sz) {
+ if (!ASSERT_OK(madvise(addr + off, page_sz, MADV_PAGEOUT),
+ "madvise pageout"))
+ return errno;
+ }
+
+ return 0;
+}
+
+static void run_test(const char *prog_name)
+{
+ struct file_reader *skel;
+ struct bpf_program *prog;
+ int err, fd;
+
+ err = initialize_file_contents();
+ if (!ASSERT_OK(err, "initialize file contents"))
+ return;
+
+ skel = file_reader__open();
+ if (!ASSERT_OK_PTR(skel, "file_reader__open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ bpf_program__set_autoload(prog, strcmp(bpf_program__name(prog), prog_name) == 0);
+ }
+
+ memcpy(skel->bss->user_buf, file_contents, sizeof(file_contents));
+ skel->bss->pid = getpid();
+
+ err = file_reader__load(skel);
+ if (!ASSERT_OK(err, "file_reader__load"))
+ goto cleanup;
+
+ err = file_reader__attach(skel);
+ if (!ASSERT_OK(err, "file_reader__attach"))
+ goto cleanup;
+
+ fd = open("/proc/self/exe", O_RDONLY);
+ if (fd >= 0)
+ close(fd);
+
+ ASSERT_EQ(skel->bss->err, 0, "err");
+ ASSERT_EQ(skel->bss->run_success, 1, "run_success");
+cleanup:
+ file_reader__destroy(skel);
+}
+
+void test_file_reader(void)
+{
+ if (test__start_subtest("on_open_expect_fault"))
+ run_test("on_open_expect_fault");
+
+ if (test__start_subtest("on_open_validate_file_read"))
+ run_test("on_open_validate_file_read");
+
+ if (test__start_subtest("negative"))
+ RUN_TESTS(file_reader_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
new file mode 100644
index 000000000000..e40114620751
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/limits.h>
+#include <test_progs.h>
+#include "trace_helpers.h"
+#include "test_fill_link_info.skel.h"
+#include "bpf/libbpf_internal.h"
+
+#define TP_CAT "sched"
+#define TP_NAME "sched_switch"
+
+static const char *kmulti_syms[] = {
+ "bpf_fentry_test2",
+ "bpf_fentry_test1",
+ "bpf_fentry_test3",
+};
+#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
+static __u64 kmulti_addrs[KMULTI_CNT];
+static __u64 kmulti_cookies[] = { 3, 1, 2 };
+
+#define KPROBE_FUNC "bpf_fentry_test1"
+static __u64 kprobe_addr;
+
+#define UPROBE_FILE "/proc/self/exe"
+static ssize_t uprobe_offset;
+/* uprobe attach point */
+static noinline void uprobe_func(void)
+{
+ asm volatile ("");
+}
+
+#define PERF_EVENT_COOKIE 0xdeadbeef
+
+static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
+ ssize_t offset, ssize_t entry_offset)
+{
+ ssize_t ref_ctr_offset = entry_offset /* ref_ctr_offset for uprobes */;
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ char buf[PATH_MAX];
+ int err;
+
+ memset(&info, 0, sizeof(info));
+ buf[0] = '\0';
+
+again:
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"))
+ return -1;
+ if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
+ return -1;
+
+ switch (info.perf_event.type) {
+ case BPF_PERF_EVENT_KPROBE:
+ case BPF_PERF_EVENT_KRETPROBE:
+ ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
+
+ /* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */
+ if (addr)
+ ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
+ "kprobe_addr");
+
+ ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
+
+ ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1,
+ "name_len");
+ if (!info.perf_event.kprobe.func_name) {
+ info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
+ info.perf_event.kprobe.name_len = sizeof(buf);
+ goto again;
+ }
+
+ err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
+ strlen(KPROBE_FUNC));
+ ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
+ break;
+ case BPF_PERF_EVENT_TRACEPOINT:
+ ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1,
+ "name_len");
+ if (!info.perf_event.tracepoint.tp_name) {
+ info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
+ info.perf_event.tracepoint.name_len = sizeof(buf);
+ goto again;
+ }
+
+ ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie");
+
+ err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
+ strlen(TP_NAME));
+ ASSERT_EQ(err, 0, "cmp_tp_name");
+ break;
+ case BPF_PERF_EVENT_UPROBE:
+ case BPF_PERF_EVENT_URETPROBE:
+ ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
+ ASSERT_EQ(info.perf_event.uprobe.ref_ctr_offset, ref_ctr_offset, "uprobe_ref_ctr_offset");
+
+ ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1,
+ "name_len");
+ if (!info.perf_event.uprobe.file_name) {
+ info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
+ info.perf_event.uprobe.name_len = sizeof(buf);
+ goto again;
+ }
+
+ ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie");
+
+ err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
+ strlen(UPROBE_FILE));
+ ASSERT_EQ(err, 0, "cmp_file_name");
+ break;
+ case BPF_PERF_EVENT_EVENT:
+ ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type");
+ ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config");
+ ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie");
+ break;
+ default:
+ err = -1;
+ break;
+ }
+ return err;
+}
+
+static void kprobe_fill_invalid_user_buffer(int fd)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ int err;
+
+ memset(&info, 0, sizeof(info));
+
+ info.perf_event.kprobe.func_name = 0x1; /* invalid address */
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
+
+ info.perf_event.kprobe.name_len = 64;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "invalid_buff");
+
+ info.perf_event.kprobe.func_name = 0;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "invalid_len");
+
+ ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
+ ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
+ ASSERT_EQ(info.perf_event.type, 0, "type");
+}
+
+static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
+ enum bpf_perf_event_type type,
+ bool invalid)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
+ .attach_mode = PROBE_ATTACH_MODE_LINK,
+ .retprobe = type == BPF_PERF_EVENT_KRETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ ssize_t entry_offset = 0;
+ struct bpf_link *link;
+ int link_fd, err;
+
+ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_kprobe"))
+ return;
+
+ link_fd = bpf_link__fd(link);
+ if (!invalid) {
+ /* See also arch_adjust_kprobe_addr(). */
+ if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
+ entry_offset = 4;
+ if (skel->kconfig->CONFIG_PPC64 &&
+ skel->kconfig->CONFIG_KPROBES_ON_FTRACE &&
+ !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE)
+ entry_offset = 4;
+ err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
+ ASSERT_OK(err, "verify_perf_link_info");
+ } else {
+ kprobe_fill_invalid_user_buffer(link_fd);
+ }
+ bpf_link__destroy(link);
+}
+
+static void test_tp_fill_link_info(struct test_fill_link_info *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ struct bpf_link *link;
+ int link_fd, err;
+
+ link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_tp"))
+ return;
+
+ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+ bpf_link__destroy(link);
+}
+
+static void test_event_fill_link_info(struct test_fill_link_info *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ struct bpf_link *link;
+ int link_fd, err, pfd;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_PAGE_FAULTS,
+ .freq = 1,
+ .sample_freq = 1,
+ .size = sizeof(struct perf_event_attr),
+ };
+
+ pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */,
+ -1 /* group id */, 0 /* flags */);
+ if (!ASSERT_GE(pfd, 0, "perf_event_open"))
+ return;
+
+ link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_event"))
+ goto error;
+
+ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+ bpf_link__destroy(link);
+
+error:
+ close(pfd);
+}
+
+static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
+ enum bpf_perf_event_type type)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = type == BPF_PERF_EVENT_URETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ const char *sema[1] = {
+ "uprobe_link_info_sema_1",
+ };
+ __u64 *ref_ctr_offset;
+ struct bpf_link *link;
+ int link_fd, err;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 1, sema,
+ (unsigned long **) &ref_ctr_offset, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object"))
+ return;
+
+ opts.ref_ctr_offset = *ref_ctr_offset;
+ link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run,
+ 0, /* self pid */
+ UPROBE_FILE, uprobe_offset,
+ &opts);
+ if (!ASSERT_OK_PTR(link, "attach_uprobe"))
+ goto out;
+
+ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, *ref_ctr_offset);
+ ASSERT_OK(err, "verify_perf_link_info");
+ bpf_link__destroy(link);
+out:
+ free(ref_ctr_offset);
+}
+
+static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies)
+{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ int flags, i, err;
+
+ memset(&info, 0, sizeof(info));
+
+again:
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type"))
+ return -1;
+
+ ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
+ flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
+ if (!retprobe)
+ ASSERT_EQ(flags, 0, "kmulti_flags");
+ else
+ ASSERT_NEQ(flags, 0, "kretmulti_flags");
+
+ if (!info.kprobe_multi.addrs) {
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
+ goto again;
+ }
+ for (i = 0; i < KMULTI_CNT; i++) {
+ ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
+ ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0,
+ "kmulti_cookies_value");
+ }
+ return 0;
+}
+
+static void verify_kmulti_invalid_user_buffer(int fd)
+{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ int err, i;
+
+ memset(&info, 0, sizeof(info));
+
+ info.kprobe_multi.count = KMULTI_CNT;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "no_addr");
+
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.count = 0;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "no_cnt");
+
+ for (i = 0; i < KMULTI_CNT; i++)
+ addrs[i] = 0;
+ info.kprobe_multi.count = KMULTI_CNT - 1;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
+ for (i = 0; i < KMULTI_CNT - 1; i++)
+ ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
+ ASSERT_EQ(addrs[i], 0, "kmulti_addrs");
+
+ for (i = 0; i < KMULTI_CNT; i++)
+ addrs[i] = 0;
+ info.kprobe_multi.count = KMULTI_CNT + 1;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, 0, "bigger_cnt");
+ for (i = 0; i < KMULTI_CNT; i++)
+ ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
+
+ info.kprobe_multi.count = KMULTI_CNT;
+ info.kprobe_multi.addrs = 0x1; /* invalid addr */
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs");
+
+ info.kprobe_multi.count = KMULTI_CNT;
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = 0x1; /* invalid addr */
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies");
+
+ /* cookies && !count */
+ info.kprobe_multi.count = 0;
+ info.kprobe_multi.addrs = ptr_to_u64(NULL);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "invalid_cookies_count");
+}
+
+static int symbols_cmp_r(const void *a, const void *b)
+{
+ const char **str_a = (const char **) a;
+ const char **str_b = (const char **) b;
+
+ return strcmp(*str_a, *str_b);
+}
+
+static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
+ bool retprobe, bool cookies,
+ bool invalid)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct bpf_link *link;
+ int link_fd, err;
+
+ opts.syms = kmulti_syms;
+ opts.cookies = cookies ? kmulti_cookies : NULL;
+ opts.cnt = KMULTI_CNT;
+ opts.retprobe = retprobe;
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
+ return;
+
+ link_fd = bpf_link__fd(link);
+ if (!invalid) {
+ err = verify_kmulti_link_info(link_fd, retprobe, cookies);
+ ASSERT_OK(err, "verify_kmulti_link_info");
+ } else {
+ verify_kmulti_invalid_user_buffer(link_fd);
+ }
+ bpf_link__destroy(link);
+}
+
+#define SEC(name) __attribute__((section(name), used))
+
+static short uprobe_link_info_sema_1 SEC(".probes");
+static short uprobe_link_info_sema_2 SEC(".probes");
+static short uprobe_link_info_sema_3 SEC(".probes");
+
+noinline void uprobe_link_info_func_1(void)
+{
+ asm volatile ("");
+ uprobe_link_info_sema_1++;
+}
+
+noinline void uprobe_link_info_func_2(void)
+{
+ asm volatile ("");
+ uprobe_link_info_sema_2++;
+}
+
+noinline void uprobe_link_info_func_3(void)
+{
+ asm volatile ("");
+ uprobe_link_info_sema_3++;
+}
+
+static int
+verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
+ __u64 *cookies, __u64 *ref_ctr_offsets)
+{
+ char path[PATH_MAX], path_buf[PATH_MAX];
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ __u64 ref_ctr_offsets_buf[3];
+ __u64 offsets_buf[3];
+ __u64 cookies_buf[3];
+ int i, err, bit;
+ __u32 count = 0;
+
+ memset(path, 0, sizeof(path));
+ err = readlink("/proc/self/exe", path, sizeof(path));
+ if (!ASSERT_NEQ(err, -1, "readlink"))
+ return -1;
+
+ memset(&info, 0, sizeof(info));
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ return -1;
+
+ ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
+ ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1,
+ "info.uprobe_multi.path_size");
+
+ for (bit = 0; bit < 8; bit++) {
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.path = ptr_to_u64(path_buf);
+ info.uprobe_multi.path_size = sizeof(path_buf);
+ info.uprobe_multi.count = count;
+
+ if (bit & 0x1)
+ info.uprobe_multi.offsets = ptr_to_u64(offsets_buf);
+ if (bit & 0x2)
+ info.uprobe_multi.cookies = ptr_to_u64(cookies_buf);
+ if (bit & 0x4)
+ info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets_buf);
+
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_UPROBE_MULTI, "info.type"))
+ return -1;
+
+ ASSERT_EQ(info.uprobe_multi.pid, getpid(), "info.uprobe_multi.pid");
+ ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
+ ASSERT_EQ(info.uprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN,
+ retprobe, "info.uprobe_multi.flags.retprobe");
+ ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1, "info.uprobe_multi.path_size");
+ ASSERT_STREQ(path_buf, path, "info.uprobe_multi.path");
+
+ for (i = 0; i < info.uprobe_multi.count; i++) {
+ if (info.uprobe_multi.offsets)
+ ASSERT_EQ(offsets_buf[i], offsets[i], "info.uprobe_multi.offsets");
+ if (info.uprobe_multi.cookies)
+ ASSERT_EQ(cookies_buf[i], cookies[i], "info.uprobe_multi.cookies");
+ if (info.uprobe_multi.ref_ctr_offsets) {
+ ASSERT_EQ(ref_ctr_offsets_buf[i], ref_ctr_offsets[i],
+ "info.uprobe_multi.ref_ctr_offsets");
+ }
+ }
+ count = count ?: info.uprobe_multi.count;
+ }
+
+ return 0;
+}
+
+static void verify_umulti_invalid_user_buffer(int fd)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ __u64 buf[3];
+ int err;
+
+ /* upath_size defined, not path */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.path_size = 3;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "failed_upath_size");
+
+ /* path defined, but small */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.path = ptr_to_u64(buf);
+ info.uprobe_multi.path_size = 3;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_LT(err, 0, "failed_upath_small");
+
+ /* path has wrong pointer */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.path_size = PATH_MAX;
+ info.uprobe_multi.path = 123;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "failed_bad_path_ptr");
+
+ /* count zero, with offsets */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.offsets = ptr_to_u64(buf);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "failed_count");
+
+ /* offsets not big enough */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.offsets = ptr_to_u64(buf);
+ info.uprobe_multi.count = 2;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -ENOSPC, "failed_small_count");
+
+ /* offsets has wrong pointer */
+ memset(&info, 0, sizeof(info));
+ info.uprobe_multi.offsets = 123;
+ info.uprobe_multi.count = 3;
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "failed_wrong_offsets");
+}
+
+static void test_uprobe_multi_fill_link_info(struct test_fill_link_info *skel,
+ bool retprobe, bool invalid)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = retprobe,
+ );
+ const char *syms[3] = {
+ "uprobe_link_info_func_1",
+ "uprobe_link_info_func_2",
+ "uprobe_link_info_func_3",
+ };
+ __u64 cookies[3] = {
+ 0xdead,
+ 0xbeef,
+ 0xcafe,
+ };
+ const char *sema[3] = {
+ "uprobe_link_info_sema_1",
+ "uprobe_link_info_sema_2",
+ "uprobe_link_info_sema_3",
+ };
+ __u64 *offsets = NULL, *ref_ctr_offsets;
+ struct bpf_link *link;
+ int link_fd, err;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 3, sema,
+ (unsigned long **) &ref_ctr_offsets, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object"))
+ return;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 3, syms,
+ (unsigned long **) &offsets, STT_FUNC);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
+ goto out;
+
+ opts.syms = syms;
+ opts.cookies = &cookies[0];
+ opts.ref_ctr_offsets = (unsigned long *) &ref_ctr_offsets[0];
+ opts.cnt = ARRAY_SIZE(syms);
+
+ link = bpf_program__attach_uprobe_multi(skel->progs.umulti_run, 0,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto out;
+
+ link_fd = bpf_link__fd(link);
+ if (invalid)
+ verify_umulti_invalid_user_buffer(link_fd);
+ else
+ verify_umulti_link_info(link_fd, retprobe, offsets, cookies, ref_ctr_offsets);
+
+ bpf_link__destroy(link);
+out:
+ free(ref_ctr_offsets);
+ free(offsets);
+}
+
+void test_fill_link_info(void)
+{
+ struct test_fill_link_info *skel;
+ int i;
+
+ skel = test_fill_link_info__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ /* load kallsyms to compare the addr */
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ goto cleanup;
+
+ kprobe_addr = ksym_get_addr(KPROBE_FUNC);
+ if (test__start_subtest("kprobe_link_info"))
+ test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false);
+ if (test__start_subtest("kretprobe_link_info"))
+ test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false);
+ if (test__start_subtest("kprobe_invalid_ubuff"))
+ test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
+ if (test__start_subtest("tracepoint_link_info"))
+ test_tp_fill_link_info(skel);
+ if (test__start_subtest("event_link_info"))
+ test_event_fill_link_info(skel);
+
+ uprobe_offset = get_uprobe_offset(&uprobe_func);
+ if (test__start_subtest("uprobe_link_info"))
+ test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE);
+ if (test__start_subtest("uretprobe_link_info"))
+ test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE);
+
+ qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
+ for (i = 0; i < KMULTI_CNT; i++)
+ kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
+ if (test__start_subtest("kprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, false, false, false);
+ test_kprobe_multi_fill_link_info(skel, false, true, false);
+ }
+ if (test__start_subtest("kretprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, true, false, false);
+ test_kprobe_multi_fill_link_info(skel, true, true, false);
+ }
+ if (test__start_subtest("kprobe_multi_invalid_ubuff"))
+ test_kprobe_multi_fill_link_info(skel, true, true, true);
+
+ if (test__start_subtest("uprobe_multi_link_info"))
+ test_uprobe_multi_fill_link_info(skel, false, false);
+ if (test__start_subtest("uretprobe_multi_link_info"))
+ test_uprobe_multi_fill_link_info(skel, true, false);
+ if (test__start_subtest("uprobe_multi_invalid"))
+ test_uprobe_multi_fill_link_info(skel, false, true);
+
+cleanup:
+ test_fill_link_info__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c
new file mode 100644
index 000000000000..f7619e0ade10
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "find_vma.skel.h"
+#include "find_vma_fail1.skel.h"
+#include "find_vma_fail2.skel.h"
+
+static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test)
+{
+ if (need_test) {
+ ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
+ ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
+ ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
+ ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
+ }
+
+ skel->bss->found_vm_exec = 0;
+ skel->data->find_addr_ret = -1;
+ skel->data->find_zero_ret = -1;
+ skel->bss->d_iname[0] = 0;
+}
+
+static int open_pe(void)
+{
+ struct perf_event_attr attr = {0};
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 1000;
+ pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+
+ return pfd >= 0 ? pfd : -errno;
+}
+
+static bool find_vma_pe_condition(struct find_vma *skel)
+{
+ return skel->bss->found_vm_exec == 0 ||
+ skel->data->find_addr_ret != 0 ||
+ skel->data->find_zero_ret == -1 ||
+ strcmp(skel->bss->d_iname, "test_progs") != 0;
+}
+
+static void test_find_vma_pe(struct find_vma *skel)
+{
+ struct bpf_link *link = NULL;
+ volatile int j = 0;
+ int pfd, i;
+ const int one_bn = 1000000000;
+
+ pfd = open_pe();
+ if (pfd < 0) {
+ if (pfd == -ENOENT || pfd == -EOPNOTSUPP) {
+ printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_GE(pfd, 0, "perf_event_open"))
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd);
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
+ goto cleanup;
+
+ for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i)
+ ++j;
+
+ test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn);
+cleanup:
+ bpf_link__destroy(link);
+ close(pfd);
+}
+
+static void test_find_vma_kprobe(struct find_vma *skel)
+{
+ int err;
+
+ err = find_vma__attach(skel);
+ if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
+ return;
+
+ getpgid(skel->bss->target_pid);
+ test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true);
+}
+
+static void test_illegal_write_vma(void)
+{
+ struct find_vma_fail1 *skel;
+
+ skel = find_vma_fail1__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load"))
+ find_vma_fail1__destroy(skel);
+}
+
+static void test_illegal_write_task(void)
+{
+ struct find_vma_fail2 *skel;
+
+ skel = find_vma_fail2__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load"))
+ find_vma_fail2__destroy(skel);
+}
+
+void serial_test_find_vma(void)
+{
+ struct find_vma *skel;
+
+ skel = find_vma__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
+
+ test_find_vma_pe(skel);
+ test_find_vma_kprobe(skel);
+
+ find_vma__destroy(skel);
+ test_illegal_write_vma();
+ test_illegal_write_task();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index cd6dc80edf18..08bae13248c4 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -1,43 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
-#include <error.h>
-#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#include "bpf_flow.skel.h"
+#define TEST_NS "flow_dissector_ns"
+#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
+#define TEST_NAME_MAX_LEN 64
+
#ifndef IP_MF
#define IP_MF 0x2000
#endif
-#define CHECK_FLOW_KEYS(desc, got, expected) \
- CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
- desc, \
- "nhoff=%u/%u " \
- "thoff=%u/%u " \
- "addr_proto=0x%x/0x%x " \
- "is_frag=%u/%u " \
- "is_first_frag=%u/%u " \
- "is_encap=%u/%u " \
- "ip_proto=0x%x/0x%x " \
- "n_proto=0x%x/0x%x " \
- "flow_label=0x%x/0x%x " \
- "sport=%u/%u " \
- "dport=%u/%u\n", \
- got.nhoff, expected.nhoff, \
- got.thoff, expected.thoff, \
- got.addr_proto, expected.addr_proto, \
- got.is_frag, expected.is_frag, \
- got.is_first_frag, expected.is_first_frag, \
- got.is_encap, expected.is_encap, \
- got.ip_proto, expected.ip_proto, \
- got.n_proto, expected.n_proto, \
- got.flow_label, expected.flow_label, \
- got.sport, expected.sport, \
- got.dport, expected.dport)
-
struct ipv4_pkt {
struct ethhdr eth;
struct iphdr iph;
@@ -87,6 +64,19 @@ struct dvlan_ipv6_pkt {
struct tcphdr tcp;
} __packed;
+struct gre_base_hdr {
+ __be16 flags;
+ __be16 protocol;
+} gre_base_hdr;
+
+struct gre_minimal_pkt {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct gre_base_hdr gre_hdr;
+ struct iphdr iph_inner;
+ struct tcphdr tcp;
+} __packed;
+
struct test {
const char *name;
union {
@@ -96,14 +86,15 @@ struct test {
struct ipv6_pkt ipv6;
struct ipv6_frag_pkt ipv6_frag;
struct dvlan_ipv6_pkt dvlan_ipv6;
+ struct gre_minimal_pkt gre_minimal;
} pkt;
struct bpf_flow_keys keys;
__u32 flags;
+ __u32 retval;
};
#define VLAN_HLEN 4
-static __u32 duration;
struct test tests[] = {
{
.name = "ipv4",
@@ -125,6 +116,7 @@ struct test tests[] = {
.sport = 80,
.dport = 8080,
},
+ .retval = BPF_OK,
},
{
.name = "ipv6",
@@ -145,6 +137,7 @@ struct test tests[] = {
.sport = 80,
.dport = 8080,
},
+ .retval = BPF_OK,
},
{
.name = "802.1q-ipv4",
@@ -167,6 +160,7 @@ struct test tests[] = {
.sport = 80,
.dport = 8080,
},
+ .retval = BPF_OK,
},
{
.name = "802.1ad-ipv6",
@@ -190,6 +184,7 @@ struct test tests[] = {
.sport = 80,
.dport = 8080,
},
+ .retval = BPF_OK,
},
{
.name = "ipv4-frag",
@@ -216,6 +211,7 @@ struct test tests[] = {
.dport = 8080,
},
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
+ .retval = BPF_OK,
},
{
.name = "ipv4-no-frag",
@@ -238,6 +234,7 @@ struct test tests[] = {
.is_frag = true,
.is_first_frag = true,
},
+ .retval = BPF_OK,
},
{
.name = "ipv6-frag",
@@ -264,6 +261,7 @@ struct test tests[] = {
.dport = 8080,
},
.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
+ .retval = BPF_OK,
},
{
.name = "ipv6-no-frag",
@@ -286,6 +284,7 @@ struct test tests[] = {
.is_frag = true,
.is_first_frag = true,
},
+ .retval = BPF_OK,
},
{
.name = "ipv6-flow-label",
@@ -308,6 +307,7 @@ struct test tests[] = {
.dport = 8080,
.flow_label = __bpf_constant_htonl(0xbeeef),
},
+ .retval = BPF_OK,
},
{
.name = "ipv6-no-flow-label",
@@ -330,6 +330,31 @@ struct test tests[] = {
.flow_label = __bpf_constant_htonl(0xbeeef),
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
+ .retval = BPF_OK,
+ },
+ {
+ .name = "ipv6-empty-flow-label",
+ .pkt.ipv6 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .iph.nexthdr = IPPROTO_TCP,
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .iph.flow_lbl = { 0x00, 0x00, 0x00 },
+ .tcp.doff = 5,
+ .tcp.source = 80,
+ .tcp.dest = 8080,
+ },
+ .keys = {
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .sport = 80,
+ .dport = 8080,
+ },
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
+ .retval = BPF_OK,
},
{
.name = "ipip-encap",
@@ -341,8 +366,8 @@ struct test tests[] = {
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
- __bpf_constant_htons(MAGIC_BYTES) -
- sizeof(struct iphdr),
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
@@ -358,6 +383,7 @@ struct test tests[] = {
.sport = 80,
.dport = 8080,
},
+ .retval = BPF_OK,
},
{
.name = "ipip-no-encap",
@@ -369,8 +395,8 @@ struct test tests[] = {
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
- __bpf_constant_htons(MAGIC_BYTES) -
- sizeof(struct iphdr),
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
@@ -385,9 +411,158 @@ struct test tests[] = {
.is_encap = true,
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
+ .retval = BPF_OK,
+ },
+ {
+ .name = "ipip-encap-dissector-continue",
+ .pkt.ipip = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_IPIP,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
+ .iph_inner.ihl = 5,
+ .iph_inner.protocol = IPPROTO_TCP,
+ .iph_inner.tot_len =
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
+ .tcp.doff = 5,
+ .tcp.source = 99,
+ .tcp.dest = 9090,
+ },
+ .retval = BPF_FLOW_DISSECTOR_CONTINUE,
+ },
+ {
+ .name = "ip-gre",
+ .pkt.gre_minimal = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_GRE,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .gre_hdr = {
+ .flags = 0,
+ .protocol = __bpf_constant_htons(ETH_P_IP),
+ },
+ .iph_inner.ihl = 5,
+ .iph_inner.protocol = IPPROTO_TCP,
+ .iph_inner.tot_len =
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
+ .tcp.doff = 5,
+ .tcp.source = 80,
+ .tcp.dest = 8080,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr) * 2 +
+ sizeof(struct gre_base_hdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ .is_encap = true,
+ .sport = 80,
+ .dport = 8080,
+ },
+ .retval = BPF_OK,
+ },
+ {
+ .name = "ip-gre-no-encap",
+ .pkt.ipip = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_GRE,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .iph_inner.ihl = 5,
+ .iph_inner.protocol = IPPROTO_TCP,
+ .iph_inner.tot_len =
+ __bpf_constant_htons(MAGIC_BYTES -
+ sizeof(struct iphdr)),
+ .tcp.doff = 5,
+ .tcp.source = 80,
+ .tcp.dest = 8080,
+ },
+ .keys = {
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr)
+ + sizeof(struct gre_base_hdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_GRE,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ .is_encap = true,
+ },
+ .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
+ .retval = BPF_OK,
},
};
+void serial_test_flow_dissector_namespace(void)
+{
+ struct bpf_flow *skel;
+ struct nstoken *ns;
+ int err, prog_fd;
+
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "get dissector fd"))
+ goto out_destroy_skel;
+
+ /* We must be able to attach a flow dissector to root namespace */
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_OK(err, "attach on root namespace ok"))
+ goto out_destroy_skel;
+
+ err = make_netns(TEST_NS);
+ if (!ASSERT_OK(err, "create non-root net namespace"))
+ goto out_destroy_skel;
+
+ /* We must not be able to additionally attach a flow dissector to a
+ * non-root net namespace
+ */
+ ns = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(ns, "enter non-root net namespace"))
+ goto out_clean_ns;
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_ERR(err,
+ "refuse new flow dissector in non-root net namespace"))
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ else
+ ASSERT_EQ(errno, EEXIST,
+ "refused because of already attached prog");
+ close_netns(ns);
+
+ /* If no flow dissector is attached to the root namespace, we must
+ * be able to attach one to a non-root net namespace
+ */
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ ns = open_netns(TEST_NS);
+ ASSERT_OK_PTR(ns, "enter non-root net namespace");
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ close_netns(ns);
+ ASSERT_OK(err, "accept new flow dissector in non-root net namespace");
+
+ /* If a flow dissector is attached to non-root net namespace, attaching
+ * a flow dissector to root namespace must fail
+ */
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace"))
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ else
+ ASSERT_EQ(errno, EEXIST,
+ "refused because of already attached prog");
+
+ ns = open_netns(TEST_NS);
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+ close_netns(ns);
+out_clean_ns:
+ remove_netns(TEST_NS);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+}
+
static int create_tap(const char *ifname)
{
struct ifreq ifr = {
@@ -457,10 +632,10 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
if (map_fd < 0)
return -1;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "flow_dissector/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (!prog)
return -1;
@@ -475,22 +650,27 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
return 0;
}
-static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
+static void run_tests_skb_less(int tap_fd, struct bpf_map *keys,
+ char *test_suffix)
{
+ char test_name[TEST_NAME_MAX_LEN];
int i, err, keys_fd;
keys_fd = bpf_map__fd(keys);
- if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
+ if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
/* Keep in sync with 'flags' from eth_get_headlen. */
__u32 eth_get_headlen_flags =
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
- struct bpf_prog_test_run_attr tattr = {};
struct bpf_flow_keys flow_keys = {};
__u32 key = (__u32)(tests[i].keys.sport) << 16 |
tests[i].keys.dport;
+ snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name,
+ test_suffix);
+ if (!test__start_subtest(test_name))
+ continue;
/* For skb-less case we can't pass input flags; run
* only the tests that have a matching set of flags.
@@ -500,120 +680,175 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
continue;
err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
- CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
+ if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap"))
+ continue;
+
+ /* check the stored flow_keys only if BPF_OK expected */
+ if (tests[i].retval != BPF_OK)
+ continue;
err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
- CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ continue;
- CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
- CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
+ sizeof(struct bpf_flow_keys),
+ "returned flow keys");
err = bpf_map_delete_elem(keys_fd, &key);
- CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
+ ASSERT_OK(err, "bpf_map_delete_elem");
}
}
-static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
+void test_flow_dissector_skb_less_direct_attach(void)
{
- int err, prog_fd;
+ int err, prog_fd, tap_fd;
+ struct bpf_flow *skel;
+ struct netns_obj *ns;
- prog_fd = bpf_program__fd(skel->progs._dissect);
- if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
+ ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
+ if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ goto out_clean_ns;
+
+ err = init_prog_array(skel->obj, skel->maps.jmp_table);
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
+ goto out_destroy_skel;
+
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
- if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
- return;
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out_destroy_skel;
- run_tests_skb_less(tap_fd, skel->maps.last_dissection);
+ tap_fd = create_tap("tap0");
+ if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+ goto out_destroy_skel;
+ err = ifup("tap0");
+ if (!ASSERT_OK(err, "ifup"))
+ goto out_close_tap;
+
+ run_tests_skb_less(tap_fd, skel->maps.last_dissection,
+ "non-skb-direct-attach");
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
- CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
+ ASSERT_OK(err, "bpf_prog_detach2");
+
+out_close_tap:
+ close(tap_fd);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+out_clean_ns:
+ netns_free(ns);
}
-static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
+void test_flow_dissector_skb_less_indirect_attach(void)
{
+ int err, net_fd, tap_fd;
+ struct bpf_flow *skel;
struct bpf_link *link;
- int err, net_fd;
+ struct netns_obj *ns;
- net_fd = open("/proc/self/ns/net", O_RDONLY);
- if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
+ ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
+ if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
+ skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+ goto out_clean_ns;
+
+ net_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net"))
+ goto out_destroy_skel;
+
+ err = init_prog_array(skel->obj, skel->maps.jmp_table);
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ tap_fd = create_tap("tap0");
+ if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+ goto out_close_ns;
+ err = ifup("tap0");
+ if (!ASSERT_OK(err, "ifup"))
+ goto out_close_tap;
+
link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
- if (CHECK(IS_ERR(link), "attach_netns", "err %ld\n", PTR_ERR(link)))
- goto out_close;
+ if (!ASSERT_OK_PTR(link, "attach_netns"))
+ goto out_close_tap;
- run_tests_skb_less(tap_fd, skel->maps.last_dissection);
+ run_tests_skb_less(tap_fd, skel->maps.last_dissection,
+ "non-skb-indirect-attach");
err = bpf_link__destroy(link);
- CHECK(err, "bpf_link__destroy", "err %d\n", err);
-out_close:
+ ASSERT_OK(err, "bpf_link__destroy");
+
+out_close_tap:
+ close(tap_fd);
+out_close_ns:
close(net_fd);
+out_destroy_skel:
+ bpf_flow__destroy(skel);
+out_clean_ns:
+ netns_free(ns);
}
-void test_flow_dissector(void)
+void test_flow_dissector_skb(void)
{
- int i, err, prog_fd, keys_fd = -1, tap_fd;
+ char test_name[TEST_NAME_MAX_LEN];
struct bpf_flow *skel;
+ int i, err, prog_fd;
skel = bpf_flow__open_and_load();
- if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
return;
- prog_fd = bpf_program__fd(skel->progs._dissect);
- if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
- goto out_destroy_skel;
- keys_fd = bpf_map__fd(skel->maps.last_dissection);
- if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
- goto out_destroy_skel;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
- if (CHECK(err, "init_prog_array", "err %d\n", err))
+ if (!ASSERT_OK(err, "init_prog_array"))
+ goto out_destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
goto out_destroy_skel;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_flow_keys flow_keys;
- struct bpf_prog_test_run_attr tattr = {
- .prog_fd = prog_fd,
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &tests[i].pkt,
.data_size_in = sizeof(tests[i].pkt),
.data_out = &flow_keys,
- };
+ );
static struct bpf_flow_keys ctx = {};
+ snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name);
+ if (!test__start_subtest(test_name))
+ continue;
+
if (tests[i].flags) {
- tattr.ctx_in = &ctx;
- tattr.ctx_size_in = sizeof(ctx);
+ topts.ctx_in = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
ctx.flags = tests[i].flags;
}
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
- err || tattr.retval != 1,
- tests[i].name,
- "err %d errno %d retval %d duration %d size %u/%zu\n",
- err, errno, tattr.retval, tattr.duration,
- tattr.data_size_out, sizeof(flow_keys));
- CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
- }
-
- /* Do the same tests but for skb-less flow dissector.
- * We use a known path in the net/tun driver that calls
- * eth_get_headlen and we manually export bpf_flow_keys
- * via BPF map in this case.
- */
-
- tap_fd = create_tap("tap0");
- CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
- err = ifup("tap0");
- CHECK(err, "ifup", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
- /* Test direct prog attachment */
- test_skb_less_prog_attach(skel, tap_fd);
- /* Test indirect prog attachment via link */
- test_skb_less_link_create(skel, tap_fd);
+ /* check the resulting flow_keys only if BPF_OK returned */
+ if (topts.retval != BPF_OK)
+ continue;
+ ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
+ "test_run data_size_out");
+ ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
+ sizeof(struct bpf_flow_keys),
+ "returned flow keys");
+ }
- close(tap_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c
new file mode 100644
index 000000000000..80b153d3ddec
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <bpf/bpf.h>
+#include <linux/bpf.h>
+#include <bpf/libbpf.h>
+#include <arpa/inet.h>
+#include <asm/byteorder.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "bpf_util.h"
+#include "bpf_flow.skel.h"
+
+#define CFG_PORT_INNER 8000
+#define CFG_PORT_GUE 6080
+#define SUBTEST_NAME_MAX_LEN 32
+#define TEST_NAME_MAX_LEN (32 + SUBTEST_NAME_MAX_LEN)
+#define MAX_SOURCE_PORTS 3
+#define TEST_PACKETS_COUNT 10
+#define TEST_PACKET_LEN 100
+#define TEST_PACKET_PATTERN 'a'
+#define TEST_IPV4 "192.168.0.1/32"
+#define TEST_IPV6 "100::a/128"
+#define TEST_TUNNEL_REMOTE "127.0.0.2"
+#define TEST_TUNNEL_LOCAL "127.0.0.1"
+
+#define INIT_ADDR4(addr4, port) \
+ { \
+ .sin_family = AF_INET, \
+ .sin_port = __constant_htons(port), \
+ .sin_addr.s_addr = __constant_htonl(addr4), \
+ }
+
+#define INIT_ADDR6(addr6, port) \
+ { \
+ .sin6_family = AF_INET6, \
+ .sin6_port = __constant_htons(port), \
+ .sin6_addr = addr6, \
+ }
+#define TEST_IN4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 2, 0)
+#define TEST_IN4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, CFG_PORT_INNER)
+#define TEST_OUT4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 1, 0)
+#define TEST_OUT4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, 0)
+
+#define TEST_IN6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+#define TEST_IN6_DST_ADDR_DEFAULT \
+ INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
+#define TEST_OUT6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+#define TEST_OUT6_DST_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
+
+#define TEST_IN4_SRC_ADDR_DISSECT_CONTINUE INIT_ADDR4(INADDR_LOOPBACK + 126, 0)
+#define TEST_IN4_SRC_ADDR_IPIP INIT_ADDR4((in_addr_t)0x01010101, 0)
+#define TEST_IN4_DST_ADDR_IPIP INIT_ADDR4((in_addr_t)0xC0A80001, CFG_PORT_INNER)
+
+struct grehdr {
+ uint16_t unused;
+ uint16_t protocol;
+} __packed;
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen : 5, control : 1, version : 2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version : 2, control : 1, hlen : 5;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 proto_ctype;
+ __be16 flags;
+ };
+ __be32 word;
+ };
+};
+
+static char buf[ETH_DATA_LEN];
+
+struct test_configuration {
+ char name[SUBTEST_NAME_MAX_LEN];
+ int (*test_setup)(void);
+ void (*test_teardown)(void);
+ int source_ports[MAX_SOURCE_PORTS];
+ int cfg_l3_inner;
+ struct sockaddr_in in_saddr4;
+ struct sockaddr_in in_daddr4;
+ struct sockaddr_in6 in_saddr6;
+ struct sockaddr_in6 in_daddr6;
+ int cfg_l3_outer;
+ struct sockaddr_in out_saddr4;
+ struct sockaddr_in out_daddr4;
+ struct sockaddr_in6 out_saddr6;
+ struct sockaddr_in6 out_daddr6;
+ int cfg_encap_proto;
+ uint8_t cfg_dsfield_inner;
+ uint8_t cfg_dsfield_outer;
+ int cfg_l3_extra;
+ struct sockaddr_in extra_saddr4;
+ struct sockaddr_in extra_daddr4;
+ struct sockaddr_in6 extra_saddr6;
+ struct sockaddr_in6 extra_daddr6;
+};
+
+static unsigned long util_gettime(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+}
+
+static void build_ipv4_header(void *header, uint8_t proto, uint32_t src,
+ uint32_t dst, int payload_len, uint8_t tos)
+{
+ struct iphdr *iph = header;
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->tos = tos;
+ iph->ttl = 8;
+ iph->tot_len = htons(sizeof(*iph) + payload_len);
+ iph->id = htons(1337);
+ iph->protocol = proto;
+ iph->saddr = src;
+ iph->daddr = dst;
+ iph->check = build_ip_csum((void *)iph);
+}
+
+static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
+{
+ uint16_t val, *ptr = (uint16_t *)ip6h;
+
+ val = ntohs(*ptr);
+ val &= 0xF00F;
+ val |= ((uint16_t)dsfield) << 4;
+ *ptr = htons(val);
+}
+
+static void build_ipv6_header(void *header, uint8_t proto,
+ const struct sockaddr_in6 *src,
+ const struct sockaddr_in6 *dst, int payload_len,
+ uint8_t dsfield)
+{
+ struct ipv6hdr *ip6h = header;
+
+ ip6h->version = 6;
+ ip6h->payload_len = htons(payload_len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = 8;
+ ipv6_set_dsfield(ip6h, dsfield);
+
+ memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
+ memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
+}
+
+static void build_udp_header(void *header, int payload_len, uint16_t sport,
+ uint16_t dport, int family)
+{
+ struct udphdr *udph = header;
+ int len = sizeof(*udph) + payload_len;
+
+ udph->source = htons(sport);
+ udph->dest = htons(dport);
+ udph->len = htons(len);
+ udph->check = 0;
+ if (family == AF_INET)
+ udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
+ udph);
+ else
+ udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
+ udph);
+}
+
+static void build_gue_header(void *header, uint8_t proto)
+{
+ struct guehdr *gueh = header;
+
+ gueh->proto_ctype = proto;
+}
+
+static void build_gre_header(void *header, uint16_t proto)
+{
+ struct grehdr *greh = header;
+
+ greh->protocol = htons(proto);
+}
+
+static int l3_length(int family)
+{
+ if (family == AF_INET)
+ return sizeof(struct iphdr);
+ else
+ return sizeof(struct ipv6hdr);
+}
+
+static int build_packet(const struct test_configuration *test, uint16_t sport)
+{
+ int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
+ int el3_len = 0, packet_len;
+
+ memset(buf, 0, ETH_DATA_LEN);
+
+ if (test->cfg_l3_extra)
+ el3_len = l3_length(test->cfg_l3_extra);
+
+ /* calculate header offsets */
+ if (test->cfg_encap_proto) {
+ ol3_len = l3_length(test->cfg_l3_outer);
+
+ if (test->cfg_encap_proto == IPPROTO_GRE)
+ ol4_len = sizeof(struct grehdr);
+ else if (test->cfg_encap_proto == IPPROTO_UDP)
+ ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ }
+
+ il3_len = l3_length(test->cfg_l3_inner);
+ il4_len = sizeof(struct udphdr);
+
+ packet_len = el3_len + ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN;
+ if (!ASSERT_LE(packet_len, sizeof(buf), "check packet size"))
+ return -1;
+
+ /*
+ * Fill packet from inside out, to calculate correct checksums.
+ * But create ip before udp headers, as udp uses ip for pseudo-sum.
+ */
+ memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
+ TEST_PACKET_PATTERN, TEST_PACKET_LEN);
+
+ /* add zero byte for udp csum padding */
+ buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + TEST_PACKET_LEN] =
+ 0;
+
+ switch (test->cfg_l3_inner) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP, test->in_saddr4.sin_addr.s_addr,
+ test->in_daddr4.sin_addr.s_addr,
+ il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_inner);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP, &test->in_saddr6,
+ &test->in_daddr6, il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_inner);
+ break;
+ }
+
+ build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
+ TEST_PACKET_LEN, sport, CFG_PORT_INNER,
+ test->cfg_l3_inner);
+
+ if (!test->cfg_encap_proto)
+ return il3_len + il4_len + TEST_PACKET_LEN;
+
+ switch (test->cfg_l3_outer) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len, test->cfg_encap_proto,
+ test->out_saddr4.sin_addr.s_addr,
+ test->out_daddr4.sin_addr.s_addr,
+ ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_outer);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len, test->cfg_encap_proto,
+ &test->out_saddr6, &test->out_daddr6,
+ ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
+ test->cfg_dsfield_outer);
+ break;
+ }
+
+ switch (test->cfg_encap_proto) {
+ case IPPROTO_UDP:
+ build_gue_header(buf + el3_len + ol3_len + ol4_len -
+ sizeof(struct guehdr),
+ test->cfg_l3_inner == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6);
+ build_udp_header(buf + el3_len + ol3_len,
+ sizeof(struct guehdr) + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ sport, CFG_PORT_GUE, test->cfg_l3_outer);
+ break;
+ case IPPROTO_GRE:
+ build_gre_header(buf + el3_len + ol3_len,
+ test->cfg_l3_inner == PF_INET ? ETH_P_IP :
+ ETH_P_IPV6);
+ break;
+ }
+
+ switch (test->cfg_l3_extra) {
+ case PF_INET:
+ build_ipv4_header(buf,
+ test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6,
+ test->extra_saddr4.sin_addr.s_addr,
+ test->extra_daddr4.sin_addr.s_addr,
+ ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ 0);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf,
+ test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
+ IPPROTO_IPV6,
+ &test->extra_saddr6, &test->extra_daddr6,
+ ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN,
+ 0);
+ break;
+ }
+
+ return el3_len + ol3_len + ol4_len + il3_len + il4_len +
+ TEST_PACKET_LEN;
+}
+
+/* sender transmits encapsulated over RAW or unencap'd over UDP */
+static int setup_tx(const struct test_configuration *test)
+{
+ int family, fd, ret;
+
+ if (test->cfg_l3_extra)
+ family = test->cfg_l3_extra;
+ else if (test->cfg_l3_outer)
+ family = test->cfg_l3_outer;
+ else
+ family = test->cfg_l3_inner;
+
+ fd = socket(family, SOCK_RAW, IPPROTO_RAW);
+ if (!ASSERT_OK_FD(fd, "setup tx socket"))
+ return fd;
+
+ if (test->cfg_l3_extra) {
+ if (test->cfg_l3_extra == PF_INET)
+ ret = connect(fd, (void *)&test->extra_daddr4,
+ sizeof(test->extra_daddr4));
+ else
+ ret = connect(fd, (void *)&test->extra_daddr6,
+ sizeof(test->extra_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ } else if (test->cfg_l3_outer) {
+ /* connect to destination if not encapsulated */
+ if (test->cfg_l3_outer == PF_INET)
+ ret = connect(fd, (void *)&test->out_daddr4,
+ sizeof(test->out_daddr4));
+ else
+ ret = connect(fd, (void *)&test->out_daddr6,
+ sizeof(test->out_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ } else {
+ /* otherwise using loopback */
+ if (test->cfg_l3_inner == PF_INET)
+ ret = connect(fd, (void *)&test->in_daddr4,
+ sizeof(test->in_daddr4));
+ else
+ ret = connect(fd, (void *)&test->in_daddr6,
+ sizeof(test->in_daddr6));
+ if (!ASSERT_OK(ret, "connect")) {
+ close(fd);
+ return ret;
+ }
+ }
+
+ return fd;
+}
+
+/* receiver reads unencapsulated UDP */
+static int setup_rx(const struct test_configuration *test)
+{
+ int fd, ret;
+
+ fd = socket(test->cfg_l3_inner, SOCK_DGRAM, 0);
+ if (!ASSERT_OK_FD(fd, "socket rx"))
+ return fd;
+
+ if (test->cfg_l3_inner == PF_INET)
+ ret = bind(fd, (void *)&test->in_daddr4,
+ sizeof(test->in_daddr4));
+ else
+ ret = bind(fd, (void *)&test->in_daddr6,
+ sizeof(test->in_daddr6));
+ if (!ASSERT_OK(ret, "bind rx")) {
+ close(fd);
+ return ret;
+ }
+
+ return fd;
+}
+
+static int do_tx(int fd, const char *pkt, int len)
+{
+ int ret;
+
+ ret = write(fd, pkt, len);
+ return ret != len;
+}
+
+static int do_poll(int fd, short events, int timeout)
+{
+ struct pollfd pfd;
+ int ret;
+
+ pfd.fd = fd;
+ pfd.events = events;
+
+ ret = poll(&pfd, 1, timeout);
+ return ret;
+}
+
+static int do_rx(int fd)
+{
+ char rbuf;
+ int ret, num = 0;
+
+ while (1) {
+ ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
+ if (ret == -1 && errno == EAGAIN)
+ break;
+ if (ret < 0)
+ return -1;
+ if (!ASSERT_EQ(rbuf, TEST_PACKET_PATTERN, "check pkt pattern"))
+ return -1;
+ num++;
+ }
+
+ return num;
+}
+
+static int run_test(const struct test_configuration *test,
+ int source_port_index)
+{
+ int fdt = -1, fdr = -1, len, tx = 0, rx = 0, err;
+ unsigned long tstop, tcur;
+
+ fdr = setup_rx(test);
+ fdt = setup_tx(test);
+ if (!ASSERT_OK_FD(fdr, "setup rx") || !ASSERT_OK_FD(fdt, "setup tx")) {
+ err = -1;
+ goto out_close_sockets;
+ }
+
+ len = build_packet(test,
+ (uint16_t)test->source_ports[source_port_index]);
+ if (!ASSERT_GT(len, 0, "build test packet"))
+ return -1;
+
+ tcur = util_gettime();
+ tstop = tcur;
+
+ while (tx < TEST_PACKETS_COUNT) {
+ if (!ASSERT_OK(do_tx(fdt, buf, len), "do_tx"))
+ break;
+ tx++;
+ err = do_rx(fdr);
+ if (!ASSERT_GE(err, 0, "do_rx"))
+ break;
+ rx += err;
+ }
+
+ /* read straggler packets, if any */
+ if (rx < tx) {
+ tstop = util_gettime() + 100;
+ while (rx < tx) {
+ tcur = util_gettime();
+ if (tcur >= tstop)
+ break;
+
+ err = do_poll(fdr, POLLIN, tstop - tcur);
+ if (err < 0)
+ break;
+ err = do_rx(fdr);
+ if (err >= 0)
+ rx += err;
+ }
+ }
+
+out_close_sockets:
+ close(fdt);
+ close(fdr);
+ return rx;
+}
+
+static int attach_and_configure_program(struct bpf_flow *skel)
+{
+ struct bpf_map *prog_array = skel->maps.jmp_table;
+ int main_prog_fd, sub_prog_fd, map_fd, i, err;
+ struct bpf_program *prog;
+ char prog_name[32];
+
+ main_prog_fd = bpf_program__fd(skel->progs._dissect);
+ if (main_prog_fd < 0)
+ return main_prog_fd;
+
+ err = bpf_prog_attach(main_prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ if (err)
+ return err;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (map_fd < 0)
+ return map_fd;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog)
+ return -1;
+
+ sub_prog_fd = bpf_program__fd(prog);
+ if (sub_prog_fd < 0)
+ return -1;
+
+ err = bpf_map_update_elem(map_fd, &i, &sub_prog_fd, BPF_ANY);
+ if (err)
+ return -1;
+ }
+
+ return main_prog_fd;
+}
+
+static void detach_program(struct bpf_flow *skel, int prog_fd)
+{
+ bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+}
+
+static int set_port_drop(int pf, bool multi_port)
+{
+ char dst_port[16];
+
+ snprintf(dst_port, sizeof(dst_port), "%d", CFG_PORT_INNER);
+
+ SYS(fail, "tc qdisc add dev lo ingress");
+ SYS(fail_delete_qdisc, "tc filter add %s %s %s %s %s %s %s %s %s %s %s %s",
+ "dev lo",
+ "parent FFFF:",
+ "protocol", pf == PF_INET6 ? "ipv6" : "ip",
+ "pref 1337",
+ "flower",
+ "ip_proto udp",
+ "src_port", multi_port ? "8-10" : "9",
+ "dst_port", dst_port,
+ "action drop");
+ return 0;
+
+fail_delete_qdisc:
+ SYS_NOFAIL("tc qdisc del dev lo ingress");
+fail:
+ return 1;
+}
+
+static void remove_filter(void)
+{
+ SYS_NOFAIL("tc filter del dev lo ingress");
+ SYS_NOFAIL("tc qdisc del dev lo ingress");
+}
+
+static int ipv4_setup(void)
+{
+ return set_port_drop(PF_INET, false);
+}
+
+static int ipv6_setup(void)
+{
+ return set_port_drop(PF_INET6, false);
+}
+
+static int port_range_setup(void)
+{
+ return set_port_drop(PF_INET, true);
+}
+
+static int set_addresses(void)
+{
+ SYS(out, "ip -4 addr add %s dev lo", TEST_IPV4);
+ SYS(out_remove_ipv4, "ip -6 addr add %s dev lo", TEST_IPV6);
+ return 0;
+out_remove_ipv4:
+ SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
+out:
+ return -1;
+}
+
+static void unset_addresses(void)
+{
+ SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
+ SYS_NOFAIL("ip -6 addr del %s dev lo", TEST_IPV6);
+}
+
+static int ipip_setup(void)
+{
+ if (!ASSERT_OK(set_addresses(), "configure addresses"))
+ return -1;
+ if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
+ goto out_unset_addresses;
+ SYS(out_remove_filter,
+ "ip link add ipip_test type ipip remote %s local %s dev lo",
+ TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
+ SYS(out_clean_netif, "ip link set ipip_test up");
+ return 0;
+
+out_clean_netif:
+ SYS_NOFAIL("ip link del ipip_test");
+out_remove_filter:
+ remove_filter();
+out_unset_addresses:
+ unset_addresses();
+ return -1;
+}
+
+static void ipip_shutdown(void)
+{
+ SYS_NOFAIL("ip link del ipip_test");
+ remove_filter();
+ unset_addresses();
+}
+
+static int gre_setup(void)
+{
+ if (!ASSERT_OK(set_addresses(), "configure addresses"))
+ return -1;
+ if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
+ goto out_unset_addresses;
+ SYS(out_remove_filter,
+ "ip link add gre_test type gre remote %s local %s dev lo",
+ TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
+ SYS(out_clean_netif, "ip link set gre_test up");
+ return 0;
+
+out_clean_netif:
+ SYS_NOFAIL("ip link del ipip_test");
+out_remove_filter:
+ remove_filter();
+out_unset_addresses:
+ unset_addresses();
+ return -1;
+}
+
+static void gre_shutdown(void)
+{
+ SYS_NOFAIL("ip link del gre_test");
+ remove_filter();
+ unset_addresses();
+}
+
+static const struct test_configuration tests_input[] = {
+ {
+ .name = "ipv4",
+ .test_setup = ipv4_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT
+ },
+ {
+ .name = "ipv4_continue_dissect",
+ .test_setup = ipv4_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DISSECT_CONTINUE,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
+ {
+ .name = "ipip",
+ .test_setup = ipip_setup,
+ .test_teardown = ipip_shutdown,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
+ .in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
+ .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
+ .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
+ .cfg_l3_outer = PF_INET,
+ .cfg_encap_proto = IPPROTO_IPIP,
+
+ },
+ {
+ .name = "gre",
+ .test_setup = gre_setup,
+ .test_teardown = gre_shutdown,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
+ .in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
+ .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
+ .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
+ .cfg_l3_outer = PF_INET,
+ .cfg_encap_proto = IPPROTO_GRE,
+ },
+ {
+ .name = "port_range",
+ .test_setup = port_range_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 7, 9, 11 },
+ .cfg_l3_inner = PF_INET,
+ .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
+ .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
+ {
+ .name = "ipv6",
+ .test_setup = ipv6_setup,
+ .test_teardown = remove_filter,
+ .source_ports = { 8, 9, 10 },
+ .cfg_l3_inner = PF_INET6,
+ .in_saddr6 = TEST_IN6_SRC_ADDR_DEFAULT,
+ .in_daddr6 = TEST_IN6_DST_ADDR_DEFAULT
+ },
+};
+
+struct test_ctx {
+ struct bpf_flow *skel;
+ struct netns_obj *ns;
+ int prog_fd;
+};
+
+static int test_global_init(struct test_ctx *ctx)
+{
+ int err;
+
+ ctx->skel = bpf_flow__open_and_load();
+ if (!ASSERT_OK_PTR(ctx->skel, "open and load flow_dissector"))
+ return -1;
+
+ ctx->ns = netns_new("flow_dissector_classification", true);
+ if (!ASSERT_OK_PTR(ctx->ns, "switch ns"))
+ goto out_destroy_skel;
+
+ err = write_sysctl("/proc/sys/net/ipv4/conf/default/rp_filter", "0");
+ err |= write_sysctl("/proc/sys/net/ipv4/conf/all/rp_filter", "0");
+ err |= write_sysctl("/proc/sys/net/ipv4/conf/lo/rp_filter", "0");
+ if (!ASSERT_OK(err, "configure net tunables"))
+ goto out_clean_ns;
+
+ ctx->prog_fd = attach_and_configure_program(ctx->skel);
+ if (!ASSERT_OK_FD(ctx->prog_fd, "attach and configure program"))
+ goto out_clean_ns;
+ return 0;
+out_clean_ns:
+ netns_free(ctx->ns);
+out_destroy_skel:
+ bpf_flow__destroy(ctx->skel);
+ return -1;
+}
+
+static void test_global_shutdown(struct test_ctx *ctx)
+{
+ detach_program(ctx->skel, ctx->prog_fd);
+ netns_free(ctx->ns);
+ bpf_flow__destroy(ctx->skel);
+}
+
+void test_flow_dissector_classification(void)
+{
+ struct test_ctx ctx;
+ const struct test_configuration *test;
+ int i;
+
+ if (test_global_init(&ctx))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests_input); i++) {
+ if (!test__start_subtest(tests_input[i].name))
+ continue;
+ test = &tests_input[i];
+ /* All tests are expected to have one rx-ok port first,
+ * then a non-working rx port, and finally a rx-ok port
+ */
+ if (test->test_setup &&
+ !ASSERT_OK(test->test_setup(), "init filter"))
+ continue;
+
+ ASSERT_EQ(run_test(test, 0), TEST_PACKETS_COUNT,
+ "test first port");
+ ASSERT_EQ(run_test(test, 1), 0, "test second port");
+ ASSERT_EQ(run_test(test, 2), TEST_PACKETS_COUNT,
+ "test third port");
+ if (test->test_teardown)
+ test->test_teardown();
+ }
+ test_global_shutdown(&ctx);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
index 0e8a4d2f023d..c7a47b57ac91 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
@@ -2,10 +2,9 @@
#include <test_progs.h>
#include <network_helpers.h>
-void test_flow_dissector_load_bytes(void)
+void serial_test_flow_dissector_load_bytes(void)
{
struct bpf_flow_keys flow_keys;
- __u32 duration = 0, retval, size;
struct bpf_insn prog[] = {
// BPF_REG_1 - 1st argument: context
// BPF_REG_2 - 2nd argument: offset, start at first byte
@@ -27,22 +26,25 @@ void test_flow_dissector_load_bytes(void)
BPF_EXIT_INSN(),
};
int fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = &flow_keys,
+ .data_size_out = sizeof(flow_keys),
+ .repeat = 1,
+ );
/* make sure bpf_skb_load_bytes is not allowed from skb-less context
*/
- fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
+ fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
- CHECK(fd < 0,
- "flow_dissector-bpf_skb_load_bytes-load",
- "fd %d errno %d\n",
- fd, errno);
+ ASSERT_GE(fd, 0, "bpf_test_load_program good fd");
- err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4),
- &flow_keys, &size, &retval, &duration);
- CHECK(size != sizeof(flow_keys) || err || retval != 1,
- "flow_dissector-bpf_skb_load_bytes",
- "err %d errno %d retval %d duration %d size %u/%zu\n",
- err, errno, retval, duration, size, sizeof(flow_keys));
+ err = bpf_prog_test_run_opts(fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
+ "test_run data_size_out");
+ ASSERT_EQ(topts.retval, BPF_OK, "test_run retval");
if (fd >= -1)
close(fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
index 172c586b6996..9333f7346d15 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
@@ -47,9 +47,9 @@ static int load_prog(enum bpf_prog_type type)
};
int fd;
- fd = bpf_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
if (CHECK_FAIL(fd < 0))
- perror("bpf_load_program");
+ perror("bpf_test_load_program");
return fd;
}
@@ -60,9 +60,9 @@ static __u32 query_prog_id(int prog)
__u32 info_len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(prog, &info, &info_len);
+ err = bpf_prog_get_info_by_fd(prog, &info, &info_len);
if (CHECK_FAIL(err || info_len != sizeof(info))) {
- perror("bpf_obj_get_info_by_fd");
+ perror("bpf_prog_get_info_by_fd");
return 0;
}
@@ -134,9 +134,9 @@ static void test_link_create_link_create(int netns, int prog1, int prog2)
/* Expect failure creating link when another link exists */
errno = 0;
link2 = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
- if (CHECK_FAIL(link2 != -1 || errno != E2BIG))
+ if (CHECK_FAIL(link2 >= 0 || errno != E2BIG))
perror("bpf_prog_attach(prog2) expected E2BIG");
- if (link2 != -1)
+ if (link2 >= 0)
close(link2);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -159,9 +159,9 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
/* Expect failure creating link when prog attached */
errno = 0;
link = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts);
- if (CHECK_FAIL(link != -1 || errno != EEXIST))
+ if (CHECK_FAIL(link >= 0 || errno != EEXIST))
perror("bpf_link_create(prog2) expected EEXIST");
- if (link != -1)
+ if (link >= 0)
close(link);
CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -497,7 +497,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
}
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
@@ -521,7 +521,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
link_id = info.id;
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
@@ -546,7 +546,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
netns = -1;
info_len = sizeof(info);
- err = bpf_obj_get_info_by_fd(link, &info, &info_len);
+ err = bpf_link_get_info_by_fd(link, &info, &info_len);
if (CHECK_FAIL(err)) {
perror("bpf_obj_get_info");
goto out_unlink;
@@ -623,12 +623,12 @@ static void run_tests(int netns)
}
out_close:
for (i = 0; i < ARRAY_SIZE(progs); i++) {
- if (progs[i] != -1)
+ if (progs[i] >= 0)
CHECK_FAIL(close(progs[i]));
}
}
-void test_flow_dissector_reattach(void)
+void serial_test_flow_dissector_reattach(void)
{
int err, new_net, saved_net;
diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c
new file mode 100644
index 000000000000..5fea3209566e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/for_each.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "for_each_hash_map_elem.skel.h"
+#include "for_each_array_map_elem.skel.h"
+#include "for_each_map_elem_write_key.skel.h"
+#include "for_each_multi_maps.skel.h"
+#include "for_each_hash_modify.skel.h"
+
+static unsigned int duration;
+
+static void test_hash_map(void)
+{
+ int i, err, max_entries;
+ struct for_each_hash_map_elem *skel;
+ __u64 *percpu_valbuf = NULL;
+ size_t percpu_val_sz;
+ __u32 key, num_cpus;
+ __u64 val;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = for_each_hash_map_elem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
+ return;
+
+ max_entries = bpf_map__max_entries(skel->maps.hashmap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i + 1;
+ err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ num_cpus = bpf_num_possible_cpus();
+ percpu_val_sz = sizeof(__u64) * num_cpus;
+ percpu_valbuf = malloc(percpu_val_sz);
+ if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
+ goto out;
+
+ key = 1;
+ for (i = 0; i < num_cpus; i++)
+ percpu_valbuf[i] = i + 1;
+ err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
+ percpu_valbuf, percpu_val_sz, BPF_ANY);
+ if (!ASSERT_OK(err, "percpu_map_update"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
+ duration = topts.duration;
+ if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
+ err, errno, topts.retval))
+ goto out;
+
+ ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
+ ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
+
+ key = 1;
+ err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
+ ASSERT_ERR(err, "hashmap_lookup");
+
+ ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
+ ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
+ ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
+ ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
+ ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
+ ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
+out:
+ free(percpu_valbuf);
+ for_each_hash_map_elem__destroy(skel);
+}
+
+static void test_array_map(void)
+{
+ __u32 key, num_cpus, max_entries;
+ int i, err;
+ struct for_each_array_map_elem *skel;
+ __u64 *percpu_valbuf = NULL;
+ size_t percpu_val_sz;
+ __u64 val, expected_total;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = for_each_array_map_elem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
+ return;
+
+ expected_total = 0;
+ max_entries = bpf_map__max_entries(skel->maps.arraymap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i + 1;
+ /* skip the last iteration for expected total */
+ if (i != max_entries - 1)
+ expected_total += val;
+ err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ num_cpus = bpf_num_possible_cpus();
+ percpu_val_sz = sizeof(__u64) * num_cpus;
+ percpu_valbuf = malloc(percpu_val_sz);
+ if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
+ goto out;
+
+ key = 0;
+ for (i = 0; i < num_cpus; i++)
+ percpu_valbuf[i] = i + 1;
+ err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
+ percpu_valbuf, percpu_val_sz, BPF_ANY);
+ if (!ASSERT_OK(err, "percpu_map_update"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
+ duration = topts.duration;
+ if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
+ err, errno, topts.retval))
+ goto out;
+
+ ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
+ ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
+
+out:
+ free(percpu_valbuf);
+ for_each_array_map_elem__destroy(skel);
+}
+
+static void test_write_map_key(void)
+{
+ struct for_each_map_elem_write_key *skel;
+
+ skel = for_each_map_elem_write_key__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
+ for_each_map_elem_write_key__destroy(skel);
+}
+
+static void test_multi_maps(void)
+{
+ struct for_each_multi_maps *skel;
+ __u64 val, array_total, hash_total;
+ __u32 key, max_entries;
+ int i, err;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = for_each_multi_maps__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_multi_maps__open_and_load"))
+ return;
+
+ array_total = 0;
+ max_entries = bpf_map__max_entries(skel->maps.arraymap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i + 1;
+ array_total += val;
+ err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
+ if (!ASSERT_OK(err, "array_map_update"))
+ goto out;
+ }
+
+ hash_total = 0;
+ max_entries = bpf_map__max_entries(skel->maps.hashmap);
+ for (i = 0; i < max_entries; i++) {
+ key = i + 100;
+ val = i + 1;
+ hash_total += val;
+ err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
+ if (!ASSERT_OK(err, "hash_map_update"))
+ goto out;
+ }
+
+ skel->bss->data_output = 0;
+ skel->bss->use_array = 1;
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_OK(topts.retval, "retval");
+ ASSERT_EQ(skel->bss->data_output, array_total, "array output");
+
+ skel->bss->data_output = 0;
+ skel->bss->use_array = 0;
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_OK(topts.retval, "retval");
+ ASSERT_EQ(skel->bss->data_output, hash_total, "hash output");
+
+out:
+ for_each_multi_maps__destroy(skel);
+}
+
+static void test_hash_modify(void)
+{
+ struct for_each_hash_modify *skel;
+ int max_entries, i, err;
+ __u64 key, val;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1
+ );
+
+ skel = for_each_hash_modify__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "for_each_hash_modify__open_and_load"))
+ return;
+
+ max_entries = bpf_map__max_entries(skel->maps.hashmap);
+ for (i = 0; i < max_entries; i++) {
+ key = i;
+ val = i;
+ err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
+ if (!ASSERT_OK(err, "map_update"))
+ goto out;
+ }
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_OK(topts.retval, "retval");
+
+out:
+ for_each_hash_modify__destroy(skel);
+}
+
+void test_for_each(void)
+{
+ if (test__start_subtest("hash_map"))
+ test_hash_map();
+ if (test__start_subtest("array_map"))
+ test_array_map();
+ if (test__start_subtest("write_map_key"))
+ test_write_map_key();
+ if (test__start_subtest("multi_maps"))
+ test_multi_maps();
+ if (test__start_subtest("hash_modify"))
+ test_hash_modify();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c
new file mode 100644
index 000000000000..0de8facca4c5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+
+#include "free_timer.skel.h"
+
+struct run_ctx {
+ struct bpf_program *start_prog;
+ struct bpf_program *overwrite_prog;
+ pthread_barrier_t notify;
+ int loop;
+ bool start;
+ bool stop;
+};
+
+static void start_threads(struct run_ctx *ctx)
+{
+ ctx->start = true;
+}
+
+static void stop_threads(struct run_ctx *ctx)
+{
+ ctx->stop = true;
+ /* Guarantee the order between ->stop and ->start */
+ __atomic_store_n(&ctx->start, true, __ATOMIC_RELEASE);
+}
+
+static int wait_for_start(struct run_ctx *ctx)
+{
+ while (!__atomic_load_n(&ctx->start, __ATOMIC_ACQUIRE))
+ usleep(10);
+
+ return ctx->stop;
+}
+
+static void *overwrite_timer_fn(void *arg)
+{
+ struct run_ctx *ctx = arg;
+ int loop, fd, err;
+ cpu_set_t cpuset;
+ long ret = 0;
+
+ /* Pin on CPU 0 */
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+
+ /* Is the thread being stopped ? */
+ err = wait_for_start(ctx);
+ if (err)
+ return NULL;
+
+ fd = bpf_program__fd(ctx->overwrite_prog);
+ loop = ctx->loop;
+ while (loop-- > 0) {
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ /* Wait for start thread to complete */
+ pthread_barrier_wait(&ctx->notify);
+
+ /* Overwrite timers */
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err)
+ ret |= 1;
+ else if (opts.retval)
+ ret |= 2;
+
+ /* Notify start thread to start timers */
+ pthread_barrier_wait(&ctx->notify);
+ }
+
+ return (void *)ret;
+}
+
+static void *start_timer_fn(void *arg)
+{
+ struct run_ctx *ctx = arg;
+ int loop, fd, err;
+ cpu_set_t cpuset;
+ long ret = 0;
+
+ /* Pin on CPU 1 */
+ CPU_ZERO(&cpuset);
+ CPU_SET(1, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+
+ /* Is the thread being stopped ? */
+ err = wait_for_start(ctx);
+ if (err)
+ return NULL;
+
+ fd = bpf_program__fd(ctx->start_prog);
+ loop = ctx->loop;
+ while (loop-- > 0) {
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ /* Run the prog to start timer */
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err)
+ ret |= 4;
+ else if (opts.retval)
+ ret |= 8;
+
+ /* Notify overwrite thread to do overwrite */
+ pthread_barrier_wait(&ctx->notify);
+
+ /* Wait for overwrite thread to complete */
+ pthread_barrier_wait(&ctx->notify);
+ }
+
+ return (void *)ret;
+}
+
+void test_free_timer(void)
+{
+ struct free_timer *skel;
+ struct bpf_program *prog;
+ struct run_ctx ctx;
+ pthread_t tid[2];
+ void *ret;
+ int err;
+
+ skel = free_timer__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ prog = bpf_object__find_program_by_name(skel->obj, "start_timer");
+ if (!ASSERT_OK_PTR(prog, "find start prog"))
+ goto out;
+ ctx.start_prog = prog;
+
+ prog = bpf_object__find_program_by_name(skel->obj, "overwrite_timer");
+ if (!ASSERT_OK_PTR(prog, "find overwrite prog"))
+ goto out;
+ ctx.overwrite_prog = prog;
+
+ pthread_barrier_init(&ctx.notify, NULL, 2);
+ ctx.loop = 10;
+
+ err = pthread_create(&tid[0], NULL, start_timer_fn, &ctx);
+ if (!ASSERT_OK(err, "create start_timer"))
+ goto out;
+
+ err = pthread_create(&tid[1], NULL, overwrite_timer_fn, &ctx);
+ if (!ASSERT_OK(err, "create overwrite_timer")) {
+ stop_threads(&ctx);
+ goto out;
+ }
+
+ start_threads(&ctx);
+
+ ret = NULL;
+ err = pthread_join(tid[0], &ret);
+ ASSERT_EQ(err | (long)ret, 0, "start_timer");
+ ret = NULL;
+ err = pthread_join(tid[1], &ret);
+ ASSERT_EQ(err | (long)ret, 0, "overwrite_timer");
+out:
+ free_timer__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c
new file mode 100644
index 000000000000..43a26ec69a8e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+#include <linux/fsverity.h>
+#include <unistd.h>
+#include <test_progs.h>
+#include "test_get_xattr.skel.h"
+#include "test_set_remove_xattr.skel.h"
+#include "test_fsverity.skel.h"
+
+static const char testfile[] = "/tmp/test_progs_fs_kfuncs";
+
+static void test_get_xattr(const char *name, const char *value, bool allow_access)
+{
+ struct test_get_xattr *skel = NULL;
+ int fd = -1, err;
+ int v[32];
+
+ fd = open(testfile, O_CREAT | O_RDONLY, 0644);
+ if (!ASSERT_GE(fd, 0, "create_file"))
+ return;
+
+ close(fd);
+ fd = -1;
+
+ err = setxattr(testfile, name, value, strlen(value) + 1, 0);
+ if (err && errno == EOPNOTSUPP) {
+ printf("%s:SKIP:local fs doesn't support xattr (%d)\n"
+ "To run this test, make sure /tmp filesystem supports xattr.\n",
+ __func__, errno);
+ test__skip();
+ goto out;
+ }
+
+ if (!ASSERT_OK(err, "setxattr"))
+ goto out;
+
+ skel = test_get_xattr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_get_xattr__open_and_load"))
+ goto out;
+
+ skel->bss->monitored_pid = getpid();
+ err = test_get_xattr__attach(skel);
+
+ if (!ASSERT_OK(err, "test_get_xattr__attach"))
+ goto out;
+
+ fd = open(testfile, O_RDONLY, 0644);
+
+ if (!ASSERT_GE(fd, 0, "open_file"))
+ goto out;
+
+ /* Trigger security_inode_getxattr */
+ err = getxattr(testfile, name, v, sizeof(v));
+
+ if (allow_access) {
+ ASSERT_EQ(err, -1, "getxattr_return");
+ ASSERT_EQ(errno, EINVAL, "getxattr_errno");
+ ASSERT_EQ(skel->bss->found_xattr_from_file, 1, "found_xattr_from_file");
+ ASSERT_EQ(skel->bss->found_xattr_from_dentry, 1, "found_xattr_from_dentry");
+ } else {
+ ASSERT_EQ(err, strlen(value) + 1, "getxattr_return");
+ ASSERT_EQ(skel->bss->found_xattr_from_file, 0, "found_xattr_from_file");
+ ASSERT_EQ(skel->bss->found_xattr_from_dentry, 0, "found_xattr_from_dentry");
+ }
+
+out:
+ close(fd);
+ test_get_xattr__destroy(skel);
+ remove(testfile);
+}
+
+/* xattr value we will set to security.bpf.foo */
+static const char value_foo[] = "hello";
+
+static void read_and_validate_foo(struct test_set_remove_xattr *skel)
+{
+ char value_out[32];
+ int err;
+
+ err = getxattr(testfile, skel->rodata->xattr_foo, value_out, sizeof(value_out));
+ ASSERT_EQ(err, sizeof(value_foo), "getxattr size foo");
+ ASSERT_EQ(strncmp(value_out, value_foo, sizeof(value_foo)), 0, "strncmp value_foo");
+}
+
+static void set_foo(struct test_set_remove_xattr *skel)
+{
+ ASSERT_OK(setxattr(testfile, skel->rodata->xattr_foo, value_foo, strlen(value_foo) + 1, 0),
+ "setxattr foo");
+}
+
+static void validate_bar_match(struct test_set_remove_xattr *skel)
+{
+ char value_out[32];
+ int err;
+
+ err = getxattr(testfile, skel->rodata->xattr_bar, value_out, sizeof(value_out));
+ ASSERT_EQ(err, sizeof(skel->data->value_bar), "getxattr size bar");
+ ASSERT_EQ(strncmp(value_out, skel->data->value_bar, sizeof(skel->data->value_bar)), 0,
+ "strncmp value_bar");
+}
+
+static void validate_bar_removed(struct test_set_remove_xattr *skel)
+{
+ char value_out[32];
+ int err;
+
+ err = getxattr(testfile, skel->rodata->xattr_bar, value_out, sizeof(value_out));
+ ASSERT_LT(err, 0, "getxattr size bar should fail");
+}
+
+static void test_set_remove_xattr(void)
+{
+ struct test_set_remove_xattr *skel = NULL;
+ int fd = -1, err;
+
+ fd = open(testfile, O_CREAT | O_RDONLY, 0644);
+ if (!ASSERT_GE(fd, 0, "create_file"))
+ return;
+
+ close(fd);
+ fd = -1;
+
+ skel = test_set_remove_xattr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_set_remove_xattr__open_and_load"))
+ return;
+
+ /* Set security.bpf.foo to "hello" */
+ err = setxattr(testfile, skel->rodata->xattr_foo, value_foo, strlen(value_foo) + 1, 0);
+ if (err && errno == EOPNOTSUPP) {
+ printf("%s:SKIP:local fs doesn't support xattr (%d)\n"
+ "To run this test, make sure /tmp filesystem supports xattr.\n",
+ __func__, errno);
+ test__skip();
+ goto out;
+ }
+
+ if (!ASSERT_OK(err, "setxattr"))
+ goto out;
+
+ skel->bss->monitored_pid = getpid();
+ err = test_set_remove_xattr__attach(skel);
+ if (!ASSERT_OK(err, "test_set_remove_xattr__attach"))
+ goto out;
+
+ /* First, test not _locked version of the kfuncs with getxattr. */
+
+ /* Read security.bpf.foo and trigger test_inode_getxattr. This
+ * bpf program will set security.bpf.bar to "world".
+ */
+ read_and_validate_foo(skel);
+ validate_bar_match(skel);
+
+ /* Read security.bpf.foo and trigger test_inode_getxattr again.
+ * This will remove xattr security.bpf.bar.
+ */
+ read_and_validate_foo(skel);
+ validate_bar_removed(skel);
+
+ ASSERT_TRUE(skel->bss->set_security_bpf_bar_success, "set_security_bpf_bar_success");
+ ASSERT_TRUE(skel->bss->remove_security_bpf_bar_success, "remove_security_bpf_bar_success");
+ ASSERT_TRUE(skel->bss->set_security_selinux_fail, "set_security_selinux_fail");
+ ASSERT_TRUE(skel->bss->remove_security_selinux_fail, "remove_security_selinux_fail");
+
+ /* Second, test _locked version of the kfuncs, with setxattr */
+
+ /* Set security.bpf.foo and trigger test_inode_setxattr. This
+ * bpf program will set security.bpf.bar to "world".
+ */
+ set_foo(skel);
+ validate_bar_match(skel);
+
+ /* Set security.bpf.foo and trigger test_inode_setxattr again.
+ * This will remove xattr security.bpf.bar.
+ */
+ set_foo(skel);
+ validate_bar_removed(skel);
+
+ ASSERT_TRUE(skel->bss->locked_set_security_bpf_bar_success,
+ "locked_set_security_bpf_bar_success");
+ ASSERT_TRUE(skel->bss->locked_remove_security_bpf_bar_success,
+ "locked_remove_security_bpf_bar_success");
+ ASSERT_TRUE(skel->bss->locked_set_security_selinux_fail,
+ "locked_set_security_selinux_fail");
+ ASSERT_TRUE(skel->bss->locked_remove_security_selinux_fail,
+ "locked_remove_security_selinux_fail");
+
+out:
+ close(fd);
+ test_set_remove_xattr__destroy(skel);
+ remove(testfile);
+}
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+static void test_fsverity(void)
+{
+ struct fsverity_enable_arg arg = {0};
+ struct test_fsverity *skel = NULL;
+ struct fsverity_digest *d;
+ int fd, err;
+ char buffer[4096];
+
+ fd = open(testfile, O_CREAT | O_RDWR, 0644);
+ if (!ASSERT_GE(fd, 0, "create_file"))
+ return;
+
+ /* Write random buffer, so the file is not empty */
+ err = write(fd, buffer, 4096);
+ if (!ASSERT_EQ(err, 4096, "write_file"))
+ goto out;
+ close(fd);
+
+ /* Reopen read-only, otherwise FS_IOC_ENABLE_VERITY will fail */
+ fd = open(testfile, O_RDONLY, 0644);
+ if (!ASSERT_GE(fd, 0, "open_file1"))
+ return;
+
+ /* Enable fsverity for the file.
+ * If the file system doesn't support verity, this will fail. Skip
+ * the test in such case.
+ */
+ arg.version = 1;
+ arg.hash_algorithm = FS_VERITY_HASH_ALG_SHA256;
+ arg.block_size = 4096;
+ err = ioctl(fd, FS_IOC_ENABLE_VERITY, &arg);
+ if (err) {
+ printf("%s:SKIP:local fs doesn't support fsverity (%d)\n"
+ "To run this test, try enable CONFIG_FS_VERITY and enable FSVerity for the filesystem.\n",
+ __func__, errno);
+ test__skip();
+ goto out;
+ }
+
+ skel = test_fsverity__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_fsverity__open_and_load"))
+ goto out;
+
+ /* Get fsverity_digest from ioctl */
+ d = (struct fsverity_digest *)skel->bss->expected_digest;
+ d->digest_algorithm = FS_VERITY_HASH_ALG_SHA256;
+ d->digest_size = SHA256_DIGEST_SIZE;
+ err = ioctl(fd, FS_IOC_MEASURE_VERITY, skel->bss->expected_digest);
+ if (!ASSERT_OK(err, "ioctl_FS_IOC_MEASURE_VERITY"))
+ goto out;
+
+ skel->bss->monitored_pid = getpid();
+ err = test_fsverity__attach(skel);
+ if (!ASSERT_OK(err, "test_fsverity__attach"))
+ goto out;
+
+ /* Reopen the file to trigger the program */
+ close(fd);
+ fd = open(testfile, O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "open_file2"))
+ goto out;
+
+ ASSERT_EQ(skel->bss->got_fsverity, 1, "got_fsverity");
+ ASSERT_EQ(skel->bss->digest_matches, 1, "digest_matches");
+out:
+ close(fd);
+ test_fsverity__destroy(skel);
+ remove(testfile);
+}
+
+void test_fs_kfuncs(void)
+{
+ /* Matches xattr_names in progs/test_get_xattr.c */
+ if (test__start_subtest("user_xattr"))
+ test_get_xattr("user.kfuncs", "hello", true);
+
+ if (test__start_subtest("security_bpf_xattr"))
+ test_get_xattr("security.bpf.xxx", "hello", true);
+
+ if (test__start_subtest("security_bpf_xattr_error"))
+ test_get_xattr("security.bpf", "hello", false);
+
+ if (test__start_subtest("security_selinux_xattr_error"))
+ test_get_xattr("security.selinux", "hello", false);
+
+ if (test__start_subtest("set_remove_xattr"))
+ test_set_remove_xattr();
+
+ if (test__start_subtest("fsverity"))
+ test_fsverity();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
new file mode 100644
index 000000000000..0394a1156d99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "get_branch_snapshot.skel.h"
+
+static int *pfd_array;
+static int cpu_cnt;
+
+static bool is_hypervisor(void)
+{
+ char *line = NULL;
+ bool ret = false;
+ size_t len;
+ FILE *fp;
+
+ fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ return false;
+
+ while (getline(&line, &len, fp) != -1) {
+ if (!strncmp(line, "flags", 5)) {
+ if (strstr(line, "hypervisor") != NULL)
+ ret = true;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(fp);
+ return ret;
+}
+
+static int create_perf_events(void)
+{
+ struct perf_event_attr attr = {0};
+ int cpu;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
+ PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
+
+ cpu_cnt = libbpf_num_possible_cpus();
+ pfd_array = malloc(sizeof(int) * cpu_cnt);
+ if (!pfd_array) {
+ cpu_cnt = 0;
+ return 1;
+ }
+
+ for (cpu = 0; cpu < cpu_cnt; cpu++) {
+ pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
+ -1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
+ if (pfd_array[cpu] < 0)
+ break;
+ }
+
+ return cpu == 0;
+}
+
+static void close_perf_events(void)
+{
+ int cpu, fd;
+
+ for (cpu = 0; cpu < cpu_cnt; cpu++) {
+ fd = pfd_array[cpu];
+ if (fd < 0)
+ break;
+ close(fd);
+ }
+ free(pfd_array);
+}
+
+void serial_test_get_branch_snapshot(void)
+{
+ struct get_branch_snapshot *skel = NULL;
+ int err;
+
+ /* Skip the test before we fix LBR snapshot for hypervisor. */
+ if (is_hypervisor()) {
+ test__skip();
+ return;
+ }
+
+ if (create_perf_events()) {
+ test__skip(); /* system doesn't support LBR */
+ goto cleanup;
+ }
+
+ skel = get_branch_snapshot__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
+ goto cleanup;
+
+ err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
+ if (!ASSERT_OK(err, "kallsyms_find"))
+ goto cleanup;
+
+ /* Just a guess for the end of this function, as module functions
+ * in /proc/kallsyms could come in any order.
+ */
+ skel->bss->address_high = skel->bss->address_low + 128;
+
+ err = get_branch_snapshot__attach(skel);
+ if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
+ goto cleanup;
+
+ trigger_module_test_read(100);
+
+ if (skel->bss->total_entries < 16) {
+ /* too few entries for the hit/waste test */
+ test__skip();
+ goto cleanup;
+ }
+
+ ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
+
+ /* Given we stop LBR in software, we will waste a few entries.
+ * But we should try to waste as few as possible entries. We are at
+ * about 7 on x86_64 systems.
+ * Add a check for < 10 so that we get heads-up when something
+ * changes and wastes too many entries.
+ */
+ ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
+
+cleanup:
+ get_branch_snapshot__destroy(skel);
+ close_perf_events();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
new file mode 100644
index 000000000000..64a9c95d4acf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "get_func_args_test.skel.h"
+
+void test_get_func_args_test(void)
+{
+ struct get_func_args_test *skel = NULL;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = get_func_args_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load"))
+ return;
+
+ err = get_func_args_test__attach(skel);
+ if (!ASSERT_OK(err, "get_func_args_test__attach"))
+ goto cleanup;
+
+ /* This runs bpf_fentry_test* functions and triggers
+ * fentry/fexit programs.
+ */
+ prog_fd = bpf_program__fd(skel->progs.test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ /* This runs bpf_modify_return_test function and triggers
+ * fmod_ret_test and fexit_test programs.
+ */
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+
+ ASSERT_EQ(topts.retval >> 16, 1, "test_run");
+ ASSERT_EQ(topts.retval & 0xffff, 1234 + 29, "test_run");
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
+ ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
+ ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
+ ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
+
+cleanup:
+ get_func_args_test__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c
new file mode 100644
index 000000000000..c40242dfa8fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "get_func_ip_test.skel.h"
+#include "get_func_ip_uprobe_test.skel.h"
+
+static noinline void uprobe_trigger(void)
+{
+}
+
+static void test_function_entry(void)
+{
+ struct get_func_ip_test *skel = NULL;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = get_func_ip_test__open();
+ if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
+ return;
+
+ err = get_func_ip_test__load(skel);
+ if (!ASSERT_OK(err, "get_func_ip_test__load"))
+ goto cleanup;
+
+ err = get_func_ip_test__attach(skel);
+ if (!ASSERT_OK(err, "get_func_ip_test__attach"))
+ goto cleanup;
+
+ skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger;
+
+ prog_fd = bpf_program__fd(skel->progs.test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ prog_fd = bpf_program__fd(skel->progs.test5);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "test_run");
+
+ uprobe_trigger();
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
+ ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
+ ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
+ ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
+ ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
+ ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
+ ASSERT_EQ(skel->bss->test8_result, 1, "test8_result");
+
+cleanup:
+ get_func_ip_test__destroy(skel);
+}
+
+#ifdef __x86_64__
+extern void uprobe_trigger_body(void);
+asm(
+".globl uprobe_trigger_body\n"
+".type uprobe_trigger_body, @function\n"
+"uprobe_trigger_body:\n"
+" nop\n"
+" ret\n"
+);
+
+static void test_function_body_kprobe(void)
+{
+ struct get_func_ip_test *skel = NULL;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ LIBBPF_OPTS(bpf_kprobe_opts, kopts);
+ struct bpf_link *link6 = NULL;
+ int err, prog_fd;
+
+ skel = get_func_ip_test__open();
+ if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
+ return;
+
+ /* test6 is x86_64 specific and is disabled by default,
+ * enable it for body test.
+ */
+ bpf_program__set_autoload(skel->progs.test6, true);
+
+ err = get_func_ip_test__load(skel);
+ if (!ASSERT_OK(err, "get_func_ip_test__load"))
+ goto cleanup;
+
+ kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5;
+
+ link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts);
+ if (!ASSERT_OK_PTR(link6, "link6"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->test6_result, 1, "test6_result");
+
+cleanup:
+ bpf_link__destroy(link6);
+ get_func_ip_test__destroy(skel);
+}
+
+static void test_function_body_uprobe(void)
+{
+ struct get_func_ip_uprobe_test *skel = NULL;
+ int err;
+
+ skel = get_func_ip_uprobe_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "get_func_ip_uprobe_test__open_and_load"))
+ return;
+
+ err = get_func_ip_uprobe_test__attach(skel);
+ if (!ASSERT_OK(err, "get_func_ip_test__attach"))
+ goto cleanup;
+
+ skel->bss->uprobe_trigger_body = (unsigned long) uprobe_trigger_body;
+
+ uprobe_trigger_body();
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
+
+cleanup:
+ get_func_ip_uprobe_test__destroy(skel);
+}
+
+static void test_function_body(void)
+{
+ test_function_body_kprobe();
+ test_function_body_uprobe();
+}
+#else
+#define test_function_body()
+#endif
+
+void test_get_func_ip_test(void)
+{
+ test_function_entry();
+ test_function_body();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
index 925722217edf..858e0575f502 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -24,12 +24,15 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
{
bool good_kern_stack = false, good_user_stack = false;
const char *nonjit_func = "___bpf_prog_run";
- struct get_stack_trace_t *e = data;
+ /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte
+ * alignment, so copy data into a local variable, for simplicity
+ */
+ struct get_stack_trace_t e;
int i, num_stack;
- static __u64 cnt;
struct ksym *ks;
- cnt++;
+ memset(&e, 0, sizeof(e));
+ memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
if (size < sizeof(struct get_stack_trace_t)) {
__u64 *raw_data = data;
@@ -57,19 +60,19 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
good_user_stack = true;
}
} else {
- num_stack = e->kern_stack_size / sizeof(__u64);
+ num_stack = e.kern_stack_size / sizeof(__u64);
if (env.jit_enabled) {
good_kern_stack = num_stack > 0;
} else {
for (i = 0; i < num_stack; i++) {
- ks = ksym_search(e->kern_stack[i]);
+ ks = ksym_search(e.kern_stack[i]);
if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
good_kern_stack = true;
break;
}
}
}
- if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
+ if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
good_user_stack = true;
}
@@ -81,11 +84,10 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
void test_get_stack_raw_tp(void)
{
- const char *file = "./test_get_stack_rawtp.o";
- const char *file_err = "./test_get_stack_rawtp_err.o";
- const char *prog_name = "raw_tracepoint/sys_enter";
+ const char *file = "./test_get_stack_rawtp.bpf.o";
+ const char *file_err = "./test_get_stack_rawtp_err.bpf.o";
+ const char *prog_name = "bpf_prog1";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
- struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
struct bpf_link *link = NULL;
struct timespec tv = {0, 10};
@@ -94,15 +96,15 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map;
cpu_set_t cpu_set;
- err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
goto close_prog;
@@ -121,12 +123,12 @@ void test_get_stack_raw_tp(void)
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
- pb_opts.sample_cb = get_stack_print_output;
- pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
+ NULL, NULL, NULL);
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
/* trigger some syscall action */
@@ -141,9 +143,7 @@ void test_get_stack_raw_tp(void)
}
close_prog:
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
- if (!IS_ERR_OR_NULL(pb))
- perf_buffer__free(pb);
+ bpf_link__destroy(link);
+ perf_buffer__free(pb);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
index d884b2ed5bc5..2715c68301f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
@@ -27,7 +27,7 @@ void test_get_stackid_cannot_attach(void)
return;
/* override program type */
- bpf_program__set_perf_event(skel->progs.oncpu);
+ bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
@@ -48,8 +48,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_no_callchain",
- "should have failed\n");
+ ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain");
close(pmu_fd);
/* add PERF_SAMPLE_CALLCHAIN, attach should succeed */
@@ -65,8 +64,8 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event_callchain",
- "err: %ld\n", PTR_ERR(skel->links.oncpu));
+ ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
+ bpf_link__destroy(skel->links.oncpu);
close(pmu_fd);
/* add exclude_callchain_kernel, attach should fail */
@@ -82,8 +81,7 @@ void test_get_stackid_cannot_attach(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_exclude_callchain_kernel",
- "should have failed\n");
+ ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel");
close(pmu_fd);
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
index 9efa7e50eab2..fadfb64e2a71 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data.c
@@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{ "relocate .rodata reference", 10, ~0 },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %llx expected %llx\n",
@@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration)
{ "relocate .bss reference", 4, "\0\0hello" },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
@@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
{ "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
};
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",
@@ -103,18 +103,25 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
{
int err = -ENOMEM, map_fd, zero = 0;
- struct bpf_map *map;
+ struct bpf_map *map, *map2;
__u8 *buff;
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
- if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
+ if (!ASSERT_OK_PTR(map, "map"))
+ return;
+ if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal"))
+ return;
+
+ /* ensure we can lookup internal maps by their ELF names */
+ map2 = bpf_object__find_map_by_name(obj, ".rodata");
+ if (!ASSERT_EQ(map, map2, "same_maps"))
return;
map_fd = bpf_map__fd(map);
if (CHECK_FAIL(map_fd < 0))
return;
- buff = malloc(bpf_map__def(map)->value_size);
+ buff = malloc(bpf_map__value_size(map));
if (buff)
err = bpf_map_update_elem(map_fd, &zero, buff, 0);
free(buff);
@@ -124,25 +131,27 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
void test_global_data(void)
{
- const char *file = "./test_global_data.o";
- __u32 duration = 0, retval;
+ const char *file = "./test_global_data.bpf.o";
struct bpf_object *obj;
int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (CHECK(err, "load program", "error %d loading %s\n", err, file))
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (!ASSERT_OK(err, "load program"))
return;
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "pass global data run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "pass global data run err");
+ ASSERT_OK(topts.retval, "pass global data run retval");
- test_global_data_number(obj, duration);
- test_global_data_string(obj, duration);
- test_global_data_struct(obj, duration);
- test_global_data_rdonly(obj, duration);
+ test_global_data_number(obj, topts.duration);
+ test_global_data_string(obj, topts.duration);
+ test_global_data_struct(obj, topts.duration);
+ test_global_data_rdonly(obj, topts.duration);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
index ee46b11f1f9a..8466332d7406 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
@@ -3,7 +3,7 @@
void test_global_data_init(void)
{
- const char *file = "./test_global_data.o";
+ const char *file = "./test_global_data.bpf.o";
int err = -ENOMEM, map_fd, zero = 0;
__u8 *buff = NULL, *newval = NULL;
struct bpf_object *obj;
@@ -16,11 +16,11 @@ void test_global_data_init(void)
if (CHECK_FAIL(err))
return;
- map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
+ map = bpf_object__find_map_by_name(obj, ".rodata");
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
goto out;
- sz = bpf_map__def(map)->value_size;
+ sz = bpf_map__value_size(map);
newval = malloc(sz);
if (CHECK_FAIL(!newval))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c
new file mode 100644
index 000000000000..d997099f62d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "test_progs.h"
+#include "network_helpers.h"
+
+static __u32 duration;
+
+static void test_global_func_args0(struct bpf_object *obj)
+{
+ int err, i, map_fd, actual_value;
+ const char *map_name = "values";
+
+ map_fd = bpf_find_map(__func__, obj, map_name);
+ if (CHECK(map_fd < 0, "bpf_find_map", "cannot find BPF map %s: %s\n",
+ map_name, strerror(errno)))
+ return;
+
+ struct {
+ const char *descr;
+ int expected_value;
+ } tests[] = {
+ {"passing NULL pointer", 0},
+ {"returning value", 1},
+ {"reading local variable", 100 },
+ {"writing local variable", 101 },
+ {"reading global variable", 42 },
+ {"writing global variable", 43 },
+ {"writing to pointer-to-pointer", 1 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ const int expected_value = tests[i].expected_value;
+
+ err = bpf_map_lookup_elem(map_fd, &i, &actual_value);
+
+ CHECK(err || actual_value != expected_value, tests[i].descr,
+ "err %d result %d expected %d\n", err, actual_value, expected_value);
+ }
+}
+
+void test_global_func_args(void)
+{
+ const char *file = "./test_global_func_args.bpf.o";
+ struct bpf_object *obj;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (CHECK(err, "load program", "error %d loading %s\n", err, file))
+ return;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+
+ test_global_func_args0(obj);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_dead_code.c b/tools/testing/selftests/bpf/prog_tests/global_func_dead_code.c
new file mode 100644
index 000000000000..65309894b27a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_func_dead_code.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "verifier_global_subprogs.skel.h"
+#include "freplace_dead_global_func.skel.h"
+
+void test_global_func_dead_code(void)
+{
+ struct verifier_global_subprogs *tgt_skel = NULL;
+ struct freplace_dead_global_func *skel = NULL;
+ char log_buf[4096];
+ int err, tgt_fd;
+
+ /* first, try to load target with good global subprog */
+ tgt_skel = verifier_global_subprogs__open();
+ if (!ASSERT_OK_PTR(tgt_skel, "tgt_skel_good_open"))
+ return;
+
+ bpf_program__set_autoload(tgt_skel->progs.chained_global_func_calls_success, true);
+
+ err = verifier_global_subprogs__load(tgt_skel);
+ if (!ASSERT_OK(err, "tgt_skel_good_load"))
+ goto out;
+
+ tgt_fd = bpf_program__fd(tgt_skel->progs.chained_global_func_calls_success);
+
+ /* Attach to good non-eliminated subprog */
+ skel = freplace_dead_global_func__open();
+ if (!ASSERT_OK_PTR(skel, "skel_good_open"))
+ goto out;
+
+ err = bpf_program__set_attach_target(skel->progs.freplace_prog, tgt_fd, "global_good");
+ ASSERT_OK(err, "attach_target_good");
+
+ err = freplace_dead_global_func__load(skel);
+ if (!ASSERT_OK(err, "skel_good_load"))
+ goto out;
+
+ freplace_dead_global_func__destroy(skel);
+
+ /* Try attaching to dead code-eliminated subprog */
+ skel = freplace_dead_global_func__open();
+ if (!ASSERT_OK_PTR(skel, "skel_dead_open"))
+ goto out;
+
+ bpf_program__set_log_buf(skel->progs.freplace_prog, log_buf, sizeof(log_buf));
+ err = bpf_program__set_attach_target(skel->progs.freplace_prog, tgt_fd, "global_dead");
+ ASSERT_OK(err, "attach_target_dead");
+
+ err = freplace_dead_global_func__load(skel);
+ if (!ASSERT_ERR(err, "skel_dead_load"))
+ goto out;
+
+ ASSERT_HAS_SUBSTR(log_buf, "Subprog global_dead doesn't exist", "dead_subprog_missing_msg");
+
+out:
+ verifier_global_subprogs__destroy(tgt_skel);
+ freplace_dead_global_func__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c
new file mode 100644
index 000000000000..56b5baef35c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <errno.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include "test_global_map_resize.skel.h"
+#include "test_progs.h"
+
+static void run_prog_bss_array_sum(void)
+{
+ (void)syscall(__NR_getpid);
+}
+
+static void run_prog_data_array_sum(void)
+{
+ (void)syscall(__NR_getuid);
+}
+
+static void global_map_resize_bss_subtest(void)
+{
+ int err;
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2;
+ size_t array_len, actual_sz, new_sz;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ goto teardown;
+
+ /* set some initial value before resizing.
+ * it is expected this non-zero value will be preserved
+ * while resizing.
+ */
+ skel->bss->array[0] = 1;
+
+ /* resize map value and verify the new size */
+ map = skel->maps.bss;
+ err = bpf_map__set_value_size(map, desired_sz);
+ if (!ASSERT_OK(err, "bpf_map__set_value_size"))
+ goto teardown;
+ if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
+ goto teardown;
+
+ new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
+ err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
+ ASSERT_OK(err, "percpu_arr_resize");
+
+ /* set the expected number of elements based on the resized array */
+ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);
+ if (!ASSERT_GT(array_len, 1, "array_len"))
+ goto teardown;
+
+ skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz);
+ if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)"))
+ goto teardown;
+ if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
+ goto teardown;
+
+ /* fill the newly resized array with ones,
+ * skipping the first element which was previously set
+ */
+ for (int i = 1; i < array_len; i++)
+ skel->bss->array[i] = 1;
+
+ /* set global const values before loading */
+ skel->rodata->pid = getpid();
+ skel->rodata->bss_array_len = array_len;
+ skel->rodata->data_array_len = 1;
+
+ err = test_global_map_resize__load(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__load"))
+ goto teardown;
+ err = test_global_map_resize__attach(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__attach"))
+ goto teardown;
+
+ /* run the bpf program which will sum the contents of the array.
+ * since the array was filled with ones,verify the sum equals array_len
+ */
+ run_prog_bss_array_sum();
+ if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+static void global_map_resize_data_subtest(void)
+{
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2;
+ size_t array_len, actual_sz, new_sz;
+ int err;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ goto teardown;
+
+ /* set some initial value before resizing.
+ * it is expected this non-zero value will be preserved
+ * while resizing.
+ */
+ skel->data_custom->my_array[0] = 1;
+
+ /* resize map value and verify the new size */
+ map = skel->maps.data_custom;
+ err = bpf_map__set_value_size(map, desired_sz);
+ if (!ASSERT_OK(err, "bpf_map__set_value_size"))
+ goto teardown;
+ if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
+ goto teardown;
+
+ new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
+ err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
+ ASSERT_OK(err, "percpu_arr_resize");
+
+ /* set the expected number of elements based on the resized array */
+ array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);
+ if (!ASSERT_GT(array_len, 1, "array_len"))
+ goto teardown;
+
+ skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz);
+ if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)"))
+ goto teardown;
+ if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)"))
+ goto teardown;
+
+ /* fill the newly resized array with ones,
+ * skipping the first element which was previously set
+ */
+ for (int i = 1; i < array_len; i++)
+ skel->data_custom->my_array[i] = 1;
+
+ /* set global const values before loading */
+ skel->rodata->pid = getpid();
+ skel->rodata->bss_array_len = 1;
+ skel->rodata->data_array_len = array_len;
+
+ err = test_global_map_resize__load(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__load"))
+ goto teardown;
+ err = test_global_map_resize__attach(skel);
+ if (!ASSERT_OK(err, "test_global_map_resize__attach"))
+ goto teardown;
+
+ /* run the bpf program which will sum the contents of the array.
+ * since the array was filled with ones,verify the sum equals array_len
+ */
+ run_prog_data_array_sum();
+ if (!ASSERT_EQ(skel->bss->sum, array_len, "sum"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+static void global_map_resize_invalid_subtest(void)
+{
+ int err;
+ struct test_global_map_resize *skel;
+ struct bpf_map *map;
+ __u32 element_sz, desired_sz;
+
+ skel = test_global_map_resize__open();
+ if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
+ return;
+
+ /* attempt to resize a global datasec map to size
+ * which does NOT align with array
+ */
+ map = skel->maps.data_custom;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf"))
+ goto teardown;
+ /* set desired size a fraction of element size beyond an aligned size */
+ element_sz = sizeof(skel->data_custom->my_array[0]);
+ desired_sz = element_sz + element_sz / 2;
+ /* confirm desired size does NOT align with array */
+ if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment"))
+ goto teardown;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val"))
+ goto teardown;
+
+ /* attempt to resize a global datasec map whose only var is NOT an array */
+ map = skel->maps.data_non_array;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf"))
+ goto teardown;
+ /* set desired size to arbitrary value */
+ desired_sz = 1024;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val"))
+ goto teardown;
+
+ /* attempt to resize a global datasec map
+ * whose last var is NOT an array
+ */
+ map = skel->maps.data_array_not_last;
+ if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf"))
+ goto teardown;
+ /* set desired size to a multiple of element size */
+ element_sz = sizeof(skel->data_array_not_last->my_array_first[0]);
+ desired_sz = element_sz * 8;
+ /* confirm desired size aligns with array */
+ if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment"))
+ goto teardown;
+ err = bpf_map__set_value_size(map, desired_sz);
+ /* confirm resize is OK but BTF info is cleared */
+ if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") ||
+ !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") ||
+ !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val"))
+ goto teardown;
+
+teardown:
+ test_global_map_resize__destroy(skel);
+}
+
+void test_global_map_resize(void)
+{
+ if (test__start_subtest("global_map_resize_bss"))
+ global_map_resize_bss_subtest();
+
+ if (test__start_subtest("global_map_resize_data"))
+ global_map_resize_data_subtest();
+
+ if (test__start_subtest("global_map_resize_invalid"))
+ global_map_resize_invalid_subtest();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/hashmap.c b/tools/testing/selftests/bpf/prog_tests/hashmap.c
index 428d488830c6..d358a223fd2d 100644
--- a/tools/testing/selftests/bpf/prog_tests/hashmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/hashmap.c
@@ -7,17 +7,18 @@
*/
#include "test_progs.h"
#include "bpf/hashmap.h"
+#include <stddef.h>
static int duration = 0;
-static size_t hash_fn(const void *k, void *ctx)
+static size_t hash_fn(long k, void *ctx)
{
- return (long)k;
+ return k;
}
-static bool equal_fn(const void *a, const void *b, void *ctx)
+static bool equal_fn(long a, long b, void *ctx)
{
- return (long)a == (long)b;
+ return a == b;
}
static inline size_t next_pow_2(size_t n)
@@ -48,13 +49,12 @@ static void test_hashmap_generic(void)
struct hashmap *map;
map = hashmap__new(hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
for (i = 0; i < ELEM_CNT; i++) {
- const void *oldk, *k = (const void *)(long)i;
- void *oldv, *v = (void *)(long)(1024 + i);
+ long oldk, k = i;
+ long oldv, v = 1024 + i;
err = hashmap__update(map, k, v, &oldk, &oldv);
if (CHECK(err != -ENOENT, "hashmap__update",
@@ -65,20 +65,18 @@ static void test_hashmap_generic(void)
err = hashmap__add(map, k, v);
} else {
err = hashmap__set(map, k, v, &oldk, &oldv);
- if (CHECK(oldk != NULL || oldv != NULL, "check_kv",
- "unexpected k/v: %p=%p\n", oldk, oldv))
+ if (CHECK(oldk != 0 || oldv != 0, "check_kv",
+ "unexpected k/v: %ld=%ld\n", oldk, oldv))
goto cleanup;
}
- if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n",
- (long)k, (long)v, err))
+ if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
- "failed to find key %ld\n", (long)k))
+ "failed to find key %ld\n", k))
goto cleanup;
- if (CHECK(oldv != v, "elem_val",
- "found value is wrong: %ld\n", (long)oldv))
+ if (CHECK(oldv != v, "elem_val", "found value is wrong: %ld\n", oldv))
goto cleanup;
}
@@ -92,8 +90,8 @@ static void test_hashmap_generic(void)
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
- long k = (long)entry->key;
- long v = (long)entry->value;
+ long k = entry->key;
+ long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 1024, "check_kv",
@@ -105,8 +103,8 @@ static void test_hashmap_generic(void)
goto cleanup;
for (i = 0; i < ELEM_CNT; i++) {
- const void *oldk, *k = (const void *)(long)i;
- void *oldv, *v = (void *)(long)(256 + i);
+ long oldk, k = i;
+ long oldv, v = 256 + i;
err = hashmap__add(map, k, v);
if (CHECK(err != -EEXIST, "hashmap__add",
@@ -120,13 +118,13 @@ static void test_hashmap_generic(void)
if (CHECK(err, "elem_upd",
"failed to update k/v %ld = %ld: %d\n",
- (long)k, (long)v, err))
+ k, v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
- "failed to find key %ld\n", (long)k))
+ "failed to find key %ld\n", k))
goto cleanup;
if (CHECK(oldv != v, "elem_val",
- "found value is wrong: %ld\n", (long)oldv))
+ "found value is wrong: %ld\n", oldv))
goto cleanup;
}
@@ -140,8 +138,8 @@ static void test_hashmap_generic(void)
found_msk = 0;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
- long k = (long)entry->key;
- long v = (long)entry->value;
+ long k = entry->key;
+ long v = entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 256, "elem_check",
@@ -153,7 +151,7 @@ static void test_hashmap_generic(void)
goto cleanup;
found_cnt = 0;
- hashmap__for_each_key_entry(map, entry, (void *)0) {
+ hashmap__for_each_key_entry(map, entry, 0) {
found_cnt++;
}
if (CHECK(!found_cnt, "found_cnt",
@@ -162,27 +160,25 @@ static void test_hashmap_generic(void)
found_msk = 0;
found_cnt = 0;
- hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) {
- const void *oldk, *k;
- void *oldv, *v;
+ hashmap__for_each_key_entry_safe(map, entry, tmp, 0) {
+ long oldk, k;
+ long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
- found_msk |= 1ULL << (long)k;
+ found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "failed to delete k/v %ld = %ld\n",
- (long)k, (long)v))
+ "failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "check_old",
"invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n",
- (long)k, (long)v, (long)oldk, (long)oldv))
+ k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "unexpectedly deleted k/v %ld = %ld\n",
- (long)oldk, (long)oldv))
+ "unexpectedly deleted k/v %ld = %ld\n", oldk, oldv))
goto cleanup;
}
@@ -199,26 +195,24 @@ static void test_hashmap_generic(void)
goto cleanup;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
- const void *oldk, *k;
- void *oldv, *v;
+ long oldk, k;
+ long oldv, v;
k = entry->key;
v = entry->value;
found_cnt++;
- found_msk |= 1ULL << (long)k;
+ found_msk |= 1ULL << k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "failed to delete k/v %ld = %ld\n",
- (long)k, (long)v))
+ "failed to delete k/v %ld = %ld\n", k, v))
goto cleanup;
if (CHECK(oldk != k || oldv != v, "elem_check",
"invalid old k/v: expect %ld = %ld, got %ld = %ld\n",
- (long)k, (long)v, (long)oldk, (long)oldv))
+ k, v, oldk, oldv))
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
- "unexpectedly deleted k/v %ld = %ld\n",
- (long)k, (long)v))
+ "unexpectedly deleted k/v %ld = %ld\n", k, v))
goto cleanup;
}
@@ -236,7 +230,7 @@ static void test_hashmap_generic(void)
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
- (long)entry->key, (long)entry->value);
+ entry->key, entry->value);
goto cleanup;
}
@@ -244,22 +238,107 @@ static void test_hashmap_generic(void)
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
- (long)entry->key, (long)entry->value);
+ entry->key, entry->value);
+ goto cleanup;
+ }
+
+cleanup:
+ hashmap__free(map);
+}
+
+static size_t str_hash_fn(long a, void *ctx)
+{
+ return str_hash((char *)a);
+}
+
+static bool str_equal_fn(long a, long b, void *ctx)
+{
+ return strcmp((char *)a, (char *)b) == 0;
+}
+
+/* Verify that hashmap interface works with pointer keys and values */
+static void test_hashmap_ptr_iface(void)
+{
+ const char *key, *value, *old_key, *old_value;
+ struct hashmap_entry *cur;
+ struct hashmap *map;
+ int err, i, bkt;
+
+ map = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n"))
goto cleanup;
+
+#define CHECK_STR(fn, var, expected) \
+ CHECK(strcmp(var, (expected)), (fn), \
+ "wrong value of " #var ": '%s' instead of '%s'\n", var, (expected))
+
+ err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL);
+ if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value);
+ if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__update", old_key, "a");
+ CHECK_STR("hashmap__update", old_value, "apricot");
+
+ err = hashmap__add(map, "b", "banana");
+ if (CHECK(err, "hashmap__add", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value);
+ if (CHECK(err, "hashmap__set", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__set", old_key, "b");
+ CHECK_STR("hashmap__set", old_value, "banana");
+
+ err = hashmap__update(map, "b", "blueberry", &old_key, &old_value);
+ if (CHECK(err, "hashmap__update", "unexpected error: %d\n", err))
+ goto cleanup;
+ CHECK_STR("hashmap__update", old_key, "b");
+ CHECK_STR("hashmap__update", old_value, "breadfruit");
+
+ err = hashmap__append(map, "c", "cherry");
+ if (CHECK(err, "hashmap__append", "unexpected error: %d\n", err))
+ goto cleanup;
+
+ if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value),
+ "hashmap__delete", "expected to have entry for 'c'\n"))
+ goto cleanup;
+ CHECK_STR("hashmap__delete", old_key, "c");
+ CHECK_STR("hashmap__delete", old_value, "cherry");
+
+ CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n");
+ CHECK_STR("hashmap__find", value, "blueberry");
+
+ if (CHECK(!hashmap__delete(map, "b", NULL, NULL),
+ "hashmap__delete", "expected to have entry for 'b'\n"))
+ goto cleanup;
+
+ i = 0;
+ hashmap__for_each_entry(map, cur, bkt) {
+ if (CHECK(i != 0, "hashmap__for_each_entry", "too many entries"))
+ goto cleanup;
+ key = cur->pkey;
+ value = cur->pvalue;
+ CHECK_STR("entry", key, "a");
+ CHECK_STR("entry", value, "apple");
+ i++;
}
+#undef CHECK_STR
cleanup:
hashmap__free(map);
}
-static size_t collision_hash_fn(const void *k, void *ctx)
+static size_t collision_hash_fn(long k, void *ctx)
{
return 0;
}
static void test_hashmap_multimap(void)
{
- void *k1 = (void *)0, *k2 = (void *)1;
+ long k1 = 0, k2 = 1;
struct hashmap_entry *entry;
struct hashmap *map;
long found_msk;
@@ -267,31 +346,30 @@ static void test_hashmap_multimap(void)
/* force collisions */
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
return;
/* set up multimap:
* [0] -> 1, 2, 4;
* [1] -> 8, 16, 32;
*/
- err = hashmap__append(map, k1, (void *)1);
+ err = hashmap__append(map, k1, 1);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k1, (void *)2);
+ err = hashmap__append(map, k1, 2);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k1, (void *)4);
+ err = hashmap__append(map, k1, 4);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)8);
+ err = hashmap__append(map, k2, 8);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)16);
+ err = hashmap__append(map, k2, 16);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
- err = hashmap__append(map, k2, (void *)32);
+ err = hashmap__append(map, k2, 32);
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
@@ -302,7 +380,7 @@ static void test_hashmap_multimap(void)
/* verify global iteration still works and sees all values */
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (1 << 6) - 1, "found_msk",
"not all keys iterated: %lx\n", found_msk))
@@ -311,7 +389,7 @@ static void test_hashmap_multimap(void)
/* iterate values for key 1 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k1) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (1 | 2 | 4), "found_msk",
"invalid k1 values: %lx\n", found_msk))
@@ -320,7 +398,7 @@ static void test_hashmap_multimap(void)
/* iterate values for key 2 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k2) {
- found_msk |= (long)entry->value;
+ found_msk |= entry->value;
}
if (CHECK(found_msk != (8 | 16 | 32), "found_msk",
"invalid k2 values: %lx\n", found_msk))
@@ -335,12 +413,11 @@ static void test_hashmap_empty()
struct hashmap_entry *entry;
int bkt;
struct hashmap *map;
- void *k = (void *)0;
+ long k = 0;
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
- if (CHECK(IS_ERR(map), "hashmap__new",
- "failed to create map: %ld\n", PTR_ERR(map)))
+ if (!ASSERT_OK_PTR(map, "hashmap__new"))
goto cleanup;
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
@@ -377,4 +454,6 @@ void test_hashmap()
test_hashmap_multimap();
if (test__start_subtest("empty"))
test_hashmap_empty();
+ if (test__start_subtest("ptr_iface"))
+ test_hashmap_ptr_iface();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
new file mode 100644
index 000000000000..0354f9b82c65
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_helper_restricted.skel.h"
+
+void test_helper_restricted(void)
+{
+ int prog_i = 0, prog_cnt;
+
+ do {
+ struct test_helper_restricted *test;
+ int err;
+
+ test = test_helper_restricted__open();
+ if (!ASSERT_OK_PTR(test, "open"))
+ return;
+
+ prog_cnt = test->skeleton->prog_cnt;
+
+ for (int j = 0; j < prog_cnt; ++j) {
+ struct bpf_program *prog = *test->skeleton->progs[j].prog;
+
+ bpf_program__set_autoload(prog, true);
+ }
+
+ err = test_helper_restricted__load(test);
+ ASSERT_ERR(err, "load_should_fail");
+
+ test_helper_restricted__destroy(test);
+ } while (++prog_i < prog_cnt);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
new file mode 100644
index 000000000000..a742dd994d60
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdbool.h>
+#include <test_progs.h>
+#include "htab_reuse.skel.h"
+
+struct htab_op_ctx {
+ int fd;
+ int loop;
+ bool stop;
+};
+
+struct htab_val {
+ unsigned int lock;
+ unsigned int data;
+};
+
+static void *htab_lookup_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ struct htab_val value;
+ unsigned int key;
+
+ /* Use BPF_F_LOCK to use spin-lock in map value. */
+ key = 7;
+ bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK);
+ }
+
+ return NULL;
+}
+
+static void *htab_update_fn(void *arg)
+{
+ struct htab_op_ctx *ctx = arg;
+ int i = 0;
+
+ while (i++ < ctx->loop && !ctx->stop) {
+ struct htab_val value;
+ unsigned int key;
+
+ key = 7;
+ value.lock = 0;
+ value.data = key;
+ bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
+ bpf_map_delete_elem(ctx->fd, &key);
+
+ key = 24;
+ value.lock = 0;
+ value.data = key;
+ bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
+ bpf_map_delete_elem(ctx->fd, &key);
+ }
+
+ return NULL;
+}
+
+void test_htab_reuse(void)
+{
+ unsigned int i, wr_nr = 1, rd_nr = 4;
+ pthread_t tids[wr_nr + rd_nr];
+ struct htab_reuse *skel;
+ struct htab_op_ctx ctx;
+ int err;
+
+ skel = htab_reuse__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load"))
+ return;
+
+ ctx.fd = bpf_map__fd(skel->maps.htab);
+ ctx.loop = 500;
+ ctx.stop = false;
+
+ memset(tids, 0, sizeof(tids));
+ for (i = 0; i < wr_nr; i++) {
+ err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+ for (i = 0; i < rd_nr; i++) {
+ err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ ctx.stop = true;
+ goto reap;
+ }
+ }
+
+reap:
+ for (i = 0; i < wr_nr + rd_nr; i++) {
+ if (!tids[i])
+ continue;
+ pthread_join(tids[i], NULL);
+ }
+ htab_reuse__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/htab_update.c b/tools/testing/selftests/bpf/prog_tests/htab_update.c
new file mode 100644
index 000000000000..d0b405eb2966
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/htab_update.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdbool.h>
+#include <test_progs.h>
+#include "htab_update.skel.h"
+
+struct htab_update_ctx {
+ int fd;
+ int loop;
+ bool stop;
+};
+
+static void test_reenter_update(void)
+{
+ struct htab_update *skel;
+ void *value = NULL;
+ unsigned int key, value_size;
+ int err;
+
+ skel = htab_update__open();
+ if (!ASSERT_OK_PTR(skel, "htab_update__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bpf_obj_free_fields, true);
+ err = htab_update__load(skel);
+ if (!ASSERT_TRUE(!err, "htab_update__load") || err)
+ goto out;
+
+ skel->bss->pid = getpid();
+ err = htab_update__attach(skel);
+ if (!ASSERT_OK(err, "htab_update__attach"))
+ goto out;
+
+ value_size = bpf_map__value_size(skel->maps.htab);
+
+ value = calloc(1, value_size);
+ if (!ASSERT_OK_PTR(value, "calloc value"))
+ goto out;
+ /*
+ * First update: plain insert. This should NOT trigger the re-entrancy
+ * path, because there is no old element to free yet.
+ */
+ key = 0;
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
+ if (!ASSERT_OK(err, "first update (insert)"))
+ goto out;
+
+ /*
+ * Second update: replace existing element with same key and trigger
+ * the reentrancy of bpf_map_update_elem().
+ * check_and_free_fields() calls bpf_obj_free_fields() on the old
+ * value, which is where fentry program runs and performs a nested
+ * bpf_map_update_elem(), triggering -EDEADLK.
+ */
+ memset(value, 0, value_size);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
+ if (!ASSERT_OK(err, "second update (replace)"))
+ goto out;
+
+ ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy");
+out:
+ htab_update__destroy(skel);
+}
+
+static void *htab_update_thread(void *arg)
+{
+ struct htab_update_ctx *ctx = arg;
+ cpu_set_t cpus;
+ int i;
+
+ /* Pinned on CPU 0 */
+ CPU_ZERO(&cpus);
+ CPU_SET(0, &cpus);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus);
+
+ i = 0;
+ while (i++ < ctx->loop && !ctx->stop) {
+ unsigned int key = 0, value = 0;
+ int err;
+
+ err = bpf_map_update_elem(ctx->fd, &key, &value, 0);
+ if (err) {
+ ctx->stop = true;
+ return (void *)(long)err;
+ }
+ }
+
+ return NULL;
+}
+
+static void test_concurrent_update(void)
+{
+ struct htab_update_ctx ctx;
+ struct htab_update *skel;
+ unsigned int i, nr;
+ pthread_t *tids;
+ int err;
+
+ skel = htab_update__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load"))
+ return;
+
+ ctx.fd = bpf_map__fd(skel->maps.htab);
+ ctx.loop = 1000;
+ ctx.stop = false;
+
+ nr = 4;
+ tids = calloc(nr, sizeof(*tids));
+ if (!ASSERT_NEQ(tids, NULL, "no mem"))
+ goto out;
+
+ for (i = 0; i < nr; i++) {
+ err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx);
+ if (!ASSERT_OK(err, "pthread_create")) {
+ unsigned int j;
+
+ ctx.stop = true;
+ for (j = 0; j < i; j++)
+ pthread_join(tids[j], NULL);
+ goto out;
+ }
+ }
+
+ for (i = 0; i < nr; i++) {
+ void *thread_err = NULL;
+
+ pthread_join(tids[i], &thread_err);
+ ASSERT_EQ(thread_err, NULL, "update error");
+ }
+
+out:
+ if (tids)
+ free(tids);
+ htab_update__destroy(skel);
+}
+
+void test_htab_update(void)
+{
+ if (test__start_subtest("reenter_update"))
+ test_reenter_update();
+ if (test__start_subtest("concurrent_update"))
+ test_concurrent_update();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c b/tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
new file mode 100644
index 000000000000..9ab4cd195108
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+
+#include "inner_array_lookup.skel.h"
+
+void test_inner_array_lookup(void)
+{
+ int map1_fd, err;
+ int key = 3;
+ int val = 1;
+ struct inner_array_lookup *skel;
+
+ skel = inner_array_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load_skeleton"))
+ return;
+
+ err = inner_array_lookup__attach(skel);
+ if (!ASSERT_OK(err, "skeleton_attach"))
+ goto cleanup;
+
+ map1_fd = bpf_map__fd(skel->maps.inner_map1);
+ bpf_map_update_elem(map1_fd, &key, &val, 0);
+
+ /* Probe should have set the element at index 3 to 2 */
+ bpf_map_lookup_elem(map1_fd, &key, &val);
+ ASSERT_EQ(val, 2, "value_is_2");
+
+cleanup:
+ inner_array_lookup__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
new file mode 100644
index 000000000000..4ddb8a5fece8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <net/if.h>
+#include <linux/netfilter.h>
+#include <network_helpers.h>
+#include "ip_check_defrag.skel.h"
+#include "ip_check_defrag_frags.h"
+
+/*
+ * This selftest spins up a client and an echo server, each in their own
+ * network namespace. The client will send a fragmented message to the server.
+ * The prog attached to the server will shoot down any fragments. Thus, if
+ * the server is able to correctly echo back the message to the client, we will
+ * have verified that netfilter is reassembling packets for us.
+ *
+ * Topology:
+ * =========
+ * NS0 | NS1
+ * |
+ * client | server
+ * ---------- | ----------
+ * | veth0 | --------- | veth1 |
+ * ---------- peer ----------
+ * |
+ * | with bpf
+ */
+
+#define NS0 "defrag_ns0"
+#define NS1 "defrag_ns1"
+#define VETH0 "veth0"
+#define VETH1 "veth1"
+#define VETH0_ADDR "172.16.1.100"
+#define VETH0_ADDR6 "fc00::100"
+/* The following constants must stay in sync with `generate_udp_fragments.py` */
+#define VETH1_ADDR "172.16.1.200"
+#define VETH1_ADDR6 "fc00::200"
+#define CLIENT_PORT 48878
+#define SERVER_PORT 48879
+#define MAGIC_MESSAGE "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME"
+
+static int setup_topology(bool ipv6)
+{
+ bool up;
+ int i;
+
+ SYS(fail, "ip netns add " NS0);
+ SYS(fail, "ip netns add " NS1);
+ SYS(fail, "ip link add " VETH0 " netns " NS0 " type veth peer name " VETH1 " netns " NS1);
+ if (ipv6) {
+ SYS(fail, "ip -6 -net " NS0 " addr add " VETH0_ADDR6 "/64 dev " VETH0 " nodad");
+ SYS(fail, "ip -6 -net " NS1 " addr add " VETH1_ADDR6 "/64 dev " VETH1 " nodad");
+ } else {
+ SYS(fail, "ip -net " NS0 " addr add " VETH0_ADDR "/24 dev " VETH0);
+ SYS(fail, "ip -net " NS1 " addr add " VETH1_ADDR "/24 dev " VETH1);
+ }
+ SYS(fail, "ip -net " NS0 " link set dev " VETH0 " up");
+ SYS(fail, "ip -net " NS1 " link set dev " VETH1 " up");
+
+ /* Wait for up to 5s for links to come up */
+ for (i = 0; i < 5; ++i) {
+ if (ipv6)
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6);
+ else
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR);
+
+ if (up)
+ break;
+ }
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void cleanup_topology(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0);
+ SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1);
+}
+
+static int attach(struct ip_check_defrag *skel, bool ipv6)
+{
+ LIBBPF_OPTS(bpf_netfilter_opts, opts,
+ .pf = ipv6 ? NFPROTO_IPV6 : NFPROTO_IPV4,
+ .priority = 42,
+ .flags = BPF_F_NETFILTER_IP_DEFRAG);
+ struct nstoken *nstoken;
+ int err = -1;
+
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ goto out;
+
+ skel->links.defrag = bpf_program__attach_netfilter(skel->progs.defrag, &opts);
+ if (!ASSERT_OK_PTR(skel->links.defrag, "program attach"))
+ goto out;
+
+ err = 0;
+out:
+ close_netns(nstoken);
+ return err;
+}
+
+static int send_frags(int client)
+{
+ struct sockaddr_storage saddr;
+ struct sockaddr *saddr_p;
+ socklen_t saddr_len;
+ int err;
+
+ saddr_p = (struct sockaddr *)&saddr;
+ err = make_sockaddr(AF_INET, VETH1_ADDR, SERVER_PORT, &saddr, &saddr_len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ return -1;
+
+ err = sendto(client, frag_0, sizeof(frag_0), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag_0"))
+ return -1;
+
+ err = sendto(client, frag_1, sizeof(frag_1), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag_1"))
+ return -1;
+
+ err = sendto(client, frag_2, sizeof(frag_2), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag_2"))
+ return -1;
+
+ return 0;
+}
+
+static int send_frags6(int client)
+{
+ struct sockaddr_storage saddr;
+ struct sockaddr *saddr_p;
+ socklen_t saddr_len;
+ int err;
+
+ saddr_p = (struct sockaddr *)&saddr;
+ /* Port needs to be set to 0 for raw ipv6 socket for some reason */
+ err = make_sockaddr(AF_INET6, VETH1_ADDR6, 0, &saddr, &saddr_len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ return -1;
+
+ err = sendto(client, frag6_0, sizeof(frag6_0), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag6_0"))
+ return -1;
+
+ err = sendto(client, frag6_1, sizeof(frag6_1), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag6_1"))
+ return -1;
+
+ err = sendto(client, frag6_2, sizeof(frag6_2), 0, saddr_p, saddr_len);
+ if (!ASSERT_GE(err, 0, "sendto frag6_2"))
+ return -1;
+
+ return 0;
+}
+
+void test_bpf_ip_check_defrag_ok(bool ipv6)
+{
+ int family = ipv6 ? AF_INET6 : AF_INET;
+ struct network_helper_opts rx_opts = {
+ .timeout_ms = 1000,
+ };
+ struct network_helper_opts tx_ops = {
+ .timeout_ms = 1000,
+ .proto = IPPROTO_RAW,
+ };
+ struct sockaddr_storage caddr;
+ struct ip_check_defrag *skel;
+ struct nstoken *nstoken;
+ int client_tx_fd = -1;
+ int client_rx_fd = -1;
+ socklen_t caddr_len;
+ int srv_fd = -1;
+ char buf[1024];
+ int len, err;
+
+ skel = ip_check_defrag__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(setup_topology(ipv6), "setup_topology"))
+ goto out;
+
+ if (!ASSERT_OK(attach(skel, ipv6), "attach"))
+ goto out;
+
+ /* Start server in ns1 */
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "setns ns1"))
+ goto out;
+ srv_fd = start_server(family, SOCK_DGRAM, NULL, SERVER_PORT, 0);
+ close_netns(nstoken);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto out;
+
+ /* Open tx raw socket in ns0 */
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
+ goto out;
+ client_tx_fd = client_socket(family, SOCK_RAW, &tx_ops);
+ close_netns(nstoken);
+ if (!ASSERT_GE(client_tx_fd, 0, "client_socket"))
+ goto out;
+
+ /* Open rx socket in ns0 */
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
+ goto out;
+ client_rx_fd = client_socket(family, SOCK_DGRAM, &rx_opts);
+ close_netns(nstoken);
+ if (!ASSERT_GE(client_rx_fd, 0, "client_socket"))
+ goto out;
+
+ /* Bind rx socket to a premeditated port */
+ memset(&caddr, 0, sizeof(caddr));
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns ns0"))
+ goto out;
+ if (ipv6) {
+ struct sockaddr_in6 *c = (struct sockaddr_in6 *)&caddr;
+
+ c->sin6_family = AF_INET6;
+ inet_pton(AF_INET6, VETH0_ADDR6, &c->sin6_addr);
+ c->sin6_port = htons(CLIENT_PORT);
+ err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c));
+ } else {
+ struct sockaddr_in *c = (struct sockaddr_in *)&caddr;
+
+ c->sin_family = AF_INET;
+ inet_pton(AF_INET, VETH0_ADDR, &c->sin_addr);
+ c->sin_port = htons(CLIENT_PORT);
+ err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c));
+ }
+ close_netns(nstoken);
+ if (!ASSERT_OK(err, "bind"))
+ goto out;
+
+ /* Send message in fragments */
+ if (ipv6) {
+ if (!ASSERT_OK(send_frags6(client_tx_fd), "send_frags6"))
+ goto out;
+ } else {
+ if (!ASSERT_OK(send_frags(client_tx_fd), "send_frags"))
+ goto out;
+ }
+
+ if (!ASSERT_EQ(skel->bss->shootdowns, 0, "shootdowns"))
+ goto out;
+
+ /* Receive reassembled msg on server and echo back to client */
+ caddr_len = sizeof(caddr);
+ len = recvfrom(srv_fd, buf, sizeof(buf), 0, (struct sockaddr *)&caddr, &caddr_len);
+ if (!ASSERT_GE(len, 0, "server recvfrom"))
+ goto out;
+ len = sendto(srv_fd, buf, len, 0, (struct sockaddr *)&caddr, caddr_len);
+ if (!ASSERT_GE(len, 0, "server sendto"))
+ goto out;
+
+ /* Expect reassembed message to be echoed back */
+ len = recvfrom(client_rx_fd, buf, sizeof(buf), 0, NULL, NULL);
+ if (!ASSERT_EQ(len, sizeof(MAGIC_MESSAGE) - 1, "client short read"))
+ goto out;
+
+out:
+ if (client_rx_fd != -1)
+ close(client_rx_fd);
+ if (client_tx_fd != -1)
+ close(client_tx_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
+ cleanup_topology();
+ ip_check_defrag__destroy(skel);
+}
+
+void test_bpf_ip_check_defrag(void)
+{
+ if (test__start_subtest("v4"))
+ test_bpf_ip_check_defrag_ok(false);
+ if (test__start_subtest("v6"))
+ test_bpf_ip_check_defrag_ok(true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c
new file mode 100644
index 000000000000..3cea71f9c500
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/iters.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+#include "iters.skel.h"
+#include "iters_state_safety.skel.h"
+#include "iters_looping.skel.h"
+#include "iters_num.skel.h"
+#include "iters_testmod.skel.h"
+#include "iters_testmod_seq.skel.h"
+#include "iters_task_vma.skel.h"
+#include "iters_task.skel.h"
+#include "iters_css_task.skel.h"
+#include "iters_css.skel.h"
+#include "iters_task_failure.skel.h"
+
+static void subtest_num_iters(void)
+{
+ struct iters_num *skel;
+ int err;
+
+ skel = iters_num__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = iters_num__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ usleep(1);
+ iters_num__detach(skel);
+
+#define VALIDATE_CASE(case_name) \
+ ASSERT_EQ(skel->bss->res_##case_name, \
+ skel->rodata->exp_##case_name, \
+ #case_name)
+
+ VALIDATE_CASE(empty_zero);
+ VALIDATE_CASE(empty_int_min);
+ VALIDATE_CASE(empty_int_max);
+ VALIDATE_CASE(empty_minus_one);
+
+ VALIDATE_CASE(simple_sum);
+ VALIDATE_CASE(neg_sum);
+ VALIDATE_CASE(very_neg_sum);
+ VALIDATE_CASE(neg_pos_sum);
+
+ VALIDATE_CASE(invalid_range);
+ VALIDATE_CASE(max_range);
+ VALIDATE_CASE(e2big_range);
+
+ VALIDATE_CASE(succ_elem_cnt);
+ VALIDATE_CASE(overfetched_elem_cnt);
+ VALIDATE_CASE(fail_elem_cnt);
+
+#undef VALIDATE_CASE
+
+cleanup:
+ iters_num__destroy(skel);
+}
+
+static void subtest_testmod_seq_iters(void)
+{
+ struct iters_testmod_seq *skel;
+ int err;
+
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ skel = iters_testmod_seq__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = iters_testmod_seq__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ usleep(1);
+ iters_testmod_seq__detach(skel);
+
+#define VALIDATE_CASE(case_name) \
+ ASSERT_EQ(skel->bss->res_##case_name, \
+ skel->rodata->exp_##case_name, \
+ #case_name)
+
+ VALIDATE_CASE(empty);
+ VALIDATE_CASE(full);
+ VALIDATE_CASE(truncated);
+
+#undef VALIDATE_CASE
+
+cleanup:
+ iters_testmod_seq__destroy(skel);
+}
+
+static void subtest_task_vma_iters(void)
+{
+ unsigned long start, end, bpf_iter_start, bpf_iter_end;
+ struct iters_task_vma *skel;
+ char rest_of_line[1000];
+ unsigned int seen;
+ FILE *f = NULL;
+ int err;
+
+ skel = iters_task_vma__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+
+ err = iters_task_vma__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ getpgid(skel->bss->target_pid);
+ iters_task_vma__detach(skel);
+
+ if (!ASSERT_GT(skel->bss->vmas_seen, 0, "vmas_seen_gt_zero"))
+ goto cleanup;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!ASSERT_OK_PTR(f, "proc_maps_fopen"))
+ goto cleanup;
+
+ seen = 0;
+ while (fscanf(f, "%lx-%lx %[^\n]\n", &start, &end, rest_of_line) == 3) {
+ /* [vsyscall] vma isn't _really_ part of task->mm vmas.
+ * /proc/PID/maps returns it when out of vmas - see get_gate_vma
+ * calls in fs/proc/task_mmu.c
+ */
+ if (strstr(rest_of_line, "[vsyscall]"))
+ continue;
+
+ bpf_iter_start = skel->bss->vm_ranges[seen].vm_start;
+ bpf_iter_end = skel->bss->vm_ranges[seen].vm_end;
+
+ ASSERT_EQ(bpf_iter_start, start, "vma->vm_start match");
+ ASSERT_EQ(bpf_iter_end, end, "vma->vm_end match");
+ seen++;
+ }
+
+ if (!ASSERT_EQ(skel->bss->vmas_seen, seen, "vmas_seen_eq"))
+ goto cleanup;
+
+cleanup:
+ if (f)
+ fclose(f);
+ iters_task_vma__destroy(skel);
+}
+
+static pthread_mutex_t do_nothing_mutex;
+
+static void *do_nothing_wait(void *arg)
+{
+ pthread_mutex_lock(&do_nothing_mutex);
+ pthread_mutex_unlock(&do_nothing_mutex);
+
+ pthread_exit(arg);
+}
+
+#define thread_num 2
+
+static void subtest_task_iters(void)
+{
+ struct iters_task *skel = NULL;
+ pthread_t thread_ids[thread_num];
+ void *ret;
+ int err;
+
+ skel = iters_task__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+ skel->bss->target_pid = getpid();
+ err = iters_task__attach(skel);
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+ pthread_mutex_lock(&do_nothing_mutex);
+ for (int i = 0; i < thread_num; i++)
+ ASSERT_OK(pthread_create(&thread_ids[i], NULL, &do_nothing_wait, NULL),
+ "pthread_create");
+
+ syscall(SYS_getpgid);
+ iters_task__detach(skel);
+ ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt");
+ ASSERT_EQ(skel->bss->threads_cnt, thread_num + 2, "threads_cnt");
+ ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 2, "proc_threads_cnt");
+ ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt");
+ pthread_mutex_unlock(&do_nothing_mutex);
+ for (int i = 0; i < thread_num; i++)
+ ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join");
+cleanup:
+ iters_task__destroy(skel);
+}
+
+extern int stack_mprotect(void);
+
+static void subtest_css_task_iters(void)
+{
+ struct iters_css_task *skel = NULL;
+ int err, cg_fd, cg_id;
+ const char *cgrp_path = "/cg1";
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto cleanup;
+ cg_fd = create_and_get_cgroup(cgrp_path);
+ if (!ASSERT_GE(cg_fd, 0, "create_and_get_cgroup"))
+ goto cleanup;
+ cg_id = get_cgroup_id(cgrp_path);
+ err = join_cgroup(cgrp_path);
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto cleanup;
+
+ skel = iters_css_task__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->cg_id = cg_id;
+ err = iters_css_task__attach(skel);
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+ err = stack_mprotect();
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ goto cleanup;
+ iters_css_task__detach(skel);
+ ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+
+cleanup:
+ cleanup_cgroup_environment();
+ iters_css_task__destroy(skel);
+}
+
+static void subtest_css_iters(void)
+{
+ struct iters_css *skel = NULL;
+ struct {
+ const char *path;
+ int fd;
+ } cgs[] = {
+ { "/cg1" },
+ { "/cg1/cg2" },
+ { "/cg1/cg2/cg3" },
+ { "/cg1/cg2/cg3/cg4" },
+ };
+ int err, cg_nr = ARRAY_SIZE(cgs);
+ int i;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto cleanup;
+ for (i = 0; i < cg_nr; i++) {
+ cgs[i].fd = create_and_get_cgroup(cgs[i].path);
+ if (!ASSERT_GE(cgs[i].fd, 0, "create_and_get_cgroup"))
+ goto cleanup;
+ }
+
+ skel = iters_css__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->root_cg_id = get_cgroup_id(cgs[0].path);
+ skel->bss->leaf_cg_id = get_cgroup_id(cgs[cg_nr - 1].path);
+ err = iters_css__attach(skel);
+
+ if (!ASSERT_OK(err, "iters_task__attach"))
+ goto cleanup;
+
+ syscall(SYS_getpgid);
+ ASSERT_EQ(skel->bss->pre_order_cnt, cg_nr, "pre_order_cnt");
+ ASSERT_EQ(skel->bss->first_cg_id, get_cgroup_id(cgs[0].path), "first_cg_id");
+
+ ASSERT_EQ(skel->bss->post_order_cnt, cg_nr, "post_order_cnt");
+ ASSERT_EQ(skel->bss->last_cg_id, get_cgroup_id(cgs[0].path), "last_cg_id");
+ ASSERT_EQ(skel->bss->tree_high, cg_nr - 1, "tree_high");
+ iters_css__detach(skel);
+cleanup:
+ cleanup_cgroup_environment();
+ iters_css__destroy(skel);
+}
+
+void test_iters(void)
+{
+ RUN_TESTS(iters_state_safety);
+ RUN_TESTS(iters_looping);
+ RUN_TESTS(iters);
+ RUN_TESTS(iters_css_task);
+
+ if (env.has_testmod) {
+ RUN_TESTS(iters_testmod);
+ RUN_TESTS(iters_testmod_seq);
+ }
+
+ if (test__start_subtest("num"))
+ subtest_num_iters();
+ if (test__start_subtest("testmod_seq"))
+ subtest_testmod_seq_iters();
+ if (test__start_subtest("task_vma"))
+ subtest_task_vma_iters();
+ if (test__start_subtest("task"))
+ subtest_task_iters();
+ if (test__start_subtest("css_task"))
+ subtest_css_task_iters();
+ if (test__start_subtest("css"))
+ subtest_css_iters();
+ RUN_TESTS(iters_task_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
new file mode 100644
index 000000000000..3add34df5767
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "jeq_infer_not_null_fail.skel.h"
+
+void test_jeq_infer_not_null(void)
+{
+ RUN_TESTS(jeq_infer_not_null_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c b/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c
new file mode 100644
index 000000000000..5639428607e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "jit_probe_mem.skel.h"
+
+void test_jit_probe_mem(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct jit_probe_mem *skel;
+ int ret;
+
+ skel = jit_probe_mem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "jit_probe_mem__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_jit_probe_mem), &opts);
+ ASSERT_OK(ret, "jit_probe_mem ret");
+ ASSERT_OK(opts.retval, "jit_probe_mem opts.retval");
+ ASSERT_EQ(skel->data->total_sum, 192, "jit_probe_mem total_sum");
+
+ jit_probe_mem__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kernel_flag.c b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c
new file mode 100644
index 000000000000..97b00c7efe94
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Microsoft */
+#include <test_progs.h>
+#include "kfunc_call_test.skel.h"
+#include "kfunc_call_test.lskel.h"
+#include "test_kernel_flag.skel.h"
+
+void test_kernel_flag(void)
+{
+ struct test_kernel_flag *lsm_skel;
+ struct kfunc_call_test *skel = NULL;
+ struct kfunc_call_test_lskel *lskel = NULL;
+ int ret;
+
+ lsm_skel = test_kernel_flag__open_and_load();
+ if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel"))
+ return;
+
+ lsm_skel->bss->monitored_tid = sys_gettid();
+
+ ret = test_kernel_flag__attach(lsm_skel);
+ if (!ASSERT_OK(ret, "test_kernel_flag__attach"))
+ goto close_prog;
+
+ /* Test with skel. This should pass the gatekeeper */
+ skel = kfunc_call_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto close_prog;
+
+ /* Test with lskel. This should fail due to blocking kernel-based bpf() invocations */
+ lskel = kfunc_call_test_lskel__open_and_load();
+ if (!ASSERT_ERR_PTR(lskel, "lskel"))
+ goto close_prog;
+
+close_prog:
+ if (skel)
+ kfunc_call_test__destroy(skel);
+ if (lskel)
+ kfunc_call_test_lskel__destroy(lskel);
+
+ lsm_skel->bss->monitored_tid = 0;
+ test_kernel_flag__destroy(lsm_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 42c3a3103c26..34f8822fd221 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
+#include "kfree_skb.skel.h"
struct meta {
int ifindex;
@@ -35,7 +37,7 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
"cb32_0 %x != %x\n",
meta->cb32_0, cb.cb32[0]))
return;
- if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth",
+ if (CHECK(pkt_v6->eth.h_proto != htons(ETH_P_IPV6), "check_eth",
"h_proto %x\n", pkt_v6->eth.h_proto))
return;
if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip",
@@ -48,83 +50,60 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
*(bool *)ctx = true;
}
-void test_kfree_skb(void)
+/* TODO: fix kernel panic caused by this test in parallel mode */
+void serial_test_kfree_skb(void)
{
struct __sk_buff skb = {};
- struct bpf_prog_test_run_attr tattr = {
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v6,
.data_size_in = sizeof(pkt_v6),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
- };
- struct bpf_prog_load_attr attr = {
- .file = "./kfree_skb.o",
- };
-
- struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL;
- struct bpf_map *perf_buf_map, *global_data;
- struct bpf_program *prog, *fentry, *fexit;
- struct bpf_object *obj, *obj2 = NULL;
- struct perf_buffer_opts pb_opts = {};
+ );
+ struct kfree_skb *skel = NULL;
+ struct bpf_link *link;
+ struct bpf_object *obj;
struct perf_buffer *pb = NULL;
- int err, kfree_skb_fd;
+ int err, prog_fd;
bool passed = false;
__u32 duration = 0;
const int zero = 0;
bool test_ok[2];
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
- &obj, &tattr.prog_fd);
+ err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
- err = bpf_prog_load_xattr(&attr, &obj2, &kfree_skb_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ skel = kfree_skb__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kfree_skb_skel"))
goto close_prog;
- prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb");
- if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n"))
- goto close_prog;
- fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans");
- if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n"))
- goto close_prog;
- fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans");
- if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n"))
- goto close_prog;
-
- global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss");
- if (CHECK(!global_data, "find global data", "not found\n"))
+ link = bpf_program__attach_raw_tracepoint(skel->progs.trace_kfree_skb, NULL);
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
+ skel->links.trace_kfree_skb = link;
- link = bpf_program__attach_raw_tracepoint(prog, NULL);
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
- goto close_prog;
- link_fentry = bpf_program__attach_trace(fentry);
- if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n",
- PTR_ERR(link_fentry)))
- goto close_prog;
- link_fexit = bpf_program__attach_trace(fexit);
- if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n",
- PTR_ERR(link_fexit)))
+ link = bpf_program__attach_trace(skel->progs.fentry_eth_type_trans);
+ if (!ASSERT_OK_PTR(link, "attach fentry"))
goto close_prog;
+ skel->links.fentry_eth_type_trans = link;
- perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map");
- if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
+ link = bpf_program__attach_trace(skel->progs.fexit_eth_type_trans);
+ if (!ASSERT_OK_PTR(link, "attach fexit"))
goto close_prog;
+ skel->links.fexit_eth_type_trans = link;
/* set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &passed;
- pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
+ on_sample, NULL, &passed, NULL);
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
memcpy(skb.cb, &cb, sizeof(cb));
- err = bpf_prog_test_run_xattr(&tattr);
- duration = tattr.duration;
- CHECK(err || tattr.retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, tattr.retval, duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6 test_run");
+ ASSERT_OK(topts.retval, "ipv6 test_run retval");
/* read perf buffer */
err = perf_buffer__poll(pb, 100);
@@ -134,9 +113,9 @@ void test_kfree_skb(void)
/* make sure kfree_skb program was triggered
* and it sent expected skb into ring buffer
*/
- CHECK_FAIL(!passed);
+ ASSERT_TRUE(passed, "passed");
- err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok);
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.bss), &zero, test_ok);
if (CHECK(err, "get_result",
"failed to get output data: %d\n", err))
goto close_prog;
@@ -144,12 +123,6 @@ void test_kfree_skb(void)
CHECK_FAIL(!test_ok[0] || !test_ok[1]);
close_prog:
perf_buffer__free(pb);
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
- if (!IS_ERR_OR_NULL(link_fentry))
- bpf_link__destroy(link_fentry);
- if (!IS_ERR_OR_NULL(link_fexit))
- bpf_link__destroy(link_fexit);
bpf_object__close(obj);
- bpf_object__close(obj2);
+ kfree_skb__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
new file mode 100644
index 000000000000..f79c8e53cb3e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "kfunc_call_fail.skel.h"
+#include "kfunc_call_test.skel.h"
+#include "kfunc_call_test.lskel.h"
+#include "kfunc_call_test_subprog.skel.h"
+#include "kfunc_call_test_subprog.lskel.h"
+#include "kfunc_call_destructive.skel.h"
+
+#include "cap_helpers.h"
+
+static size_t log_buf_sz = 1048576; /* 1 MB */
+static char obj_log_buf[1048576];
+
+enum kfunc_test_type {
+ tc_test = 0,
+ syscall_test,
+ syscall_null_ctx_test,
+};
+
+struct kfunc_test_params {
+ const char *prog_name;
+ unsigned long lskel_prog_desc_offset;
+ int retval;
+ enum kfunc_test_type test_type;
+ const char *expected_err_msg;
+};
+
+#define __BPF_TEST_SUCCESS(name, __retval, type) \
+ { \
+ .prog_name = #name, \
+ .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \
+ .retval = __retval, \
+ .test_type = type, \
+ .expected_err_msg = NULL, \
+ }
+
+#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \
+ { \
+ .prog_name = #name, \
+ .lskel_prog_desc_offset = 0 /* unused when test is failing */, \
+ .retval = __retval, \
+ .test_type = type, \
+ .expected_err_msg = error_msg, \
+ }
+
+#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test)
+#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test)
+#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test)
+
+#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg)
+#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \
+ __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg)
+
+static struct kfunc_test_params kfunc_tests[] = {
+ /* failure cases:
+ * if retval is 0 -> the program will fail to load and the error message is an error
+ * if retval is not 0 -> the program can be loaded but running it will gives the
+ * provided return value. The error message is thus the one
+ * from a successful load
+ */
+ SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"),
+ SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"),
+ TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"),
+ TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"),
+ TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
+ TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
+ TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
+ TC_FAIL(kfunc_call_test_pointer_arg_type_mismatch, 0, "arg#0 expected pointer to ctx, but got scalar"),
+
+ /* success cases */
+ TC_TEST(kfunc_call_test1, 12),
+ TC_TEST(kfunc_call_test2, 3),
+ TC_TEST(kfunc_call_test4, -1234),
+ TC_TEST(kfunc_call_test_ref_btf_id, 0),
+ TC_TEST(kfunc_call_test_get_mem, 42),
+ SYSCALL_TEST(kfunc_syscall_test, 0),
+ SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
+ TC_TEST(kfunc_call_test_static_unused_arg, 0),
+ TC_TEST(kfunc_call_ctx, 0),
+};
+
+struct syscall_test_args {
+ __u8 data[16];
+ size_t size;
+};
+
+static void verify_success(struct kfunc_test_params *param)
+{
+ struct kfunc_call_test_lskel *lskel = NULL;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_prog_desc *lskel_prog;
+ struct kfunc_call_test *skel;
+ struct bpf_program *prog;
+ int prog_fd, err;
+ struct syscall_test_args args = {
+ .size = 10,
+ };
+
+ switch (param->test_type) {
+ case syscall_test:
+ topts.ctx_in = &args;
+ topts.ctx_size_in = sizeof(args);
+ /* fallthrough */
+ case syscall_null_ctx_test:
+ break;
+ case tc_test:
+ topts.data_in = &pkt_v4;
+ topts.data_size_in = sizeof(pkt_v4);
+ topts.repeat = 1;
+ break;
+ }
+
+ /* first test with normal libbpf */
+ skel = kfunc_call_test__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, param->prog_name))
+ goto cleanup;
+
+ if (!ASSERT_EQ(topts.retval, param->retval, "retval"))
+ goto cleanup;
+
+ /* second test with light skeletons */
+ lskel = kfunc_call_test_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(lskel, "lskel"))
+ goto cleanup;
+
+ lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset);
+
+ prog_fd = lskel_prog->prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, param->prog_name))
+ goto cleanup;
+
+ ASSERT_EQ(topts.retval, param->retval, "retval");
+
+cleanup:
+ kfunc_call_test__destroy(skel);
+ if (lskel)
+ kfunc_call_test_lskel__destroy(lskel);
+}
+
+static void verify_fail(struct kfunc_test_params *param)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_program *prog;
+ struct kfunc_call_fail *skel;
+ int prog_fd, err;
+ struct syscall_test_args args = {
+ .size = 10,
+ };
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 1;
+
+ switch (param->test_type) {
+ case syscall_test:
+ topts.ctx_in = &args;
+ topts.ctx_size_in = sizeof(args);
+ /* fallthrough */
+ case syscall_null_ctx_test:
+ break;
+ case tc_test:
+ topts.data_in = &pkt_v4;
+ topts.data_size_in = sizeof(pkt_v4);
+ topts.repeat = 1;
+ break;
+ }
+
+ skel = kfunc_call_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, param->prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, true);
+
+ err = kfunc_call_fail__load(skel);
+ if (!param->retval) {
+ /* the verifier is supposed to complain and refuses to load */
+ if (!ASSERT_ERR(err, "unexpected load success"))
+ goto out_err;
+
+ } else {
+ /* the program is loaded but must dynamically fail */
+ if (!ASSERT_OK(err, "unexpected load error"))
+ goto out_err;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_EQ(err, param->retval, param->prog_name))
+ goto out_err;
+ }
+
+out_err:
+ if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) {
+ fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg);
+ fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ }
+
+cleanup:
+ kfunc_call_fail__destroy(skel);
+}
+
+static void test_main(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) {
+ if (!test__start_subtest(kfunc_tests[i].prog_name))
+ continue;
+
+ if (!kfunc_tests[i].expected_err_msg)
+ verify_success(&kfunc_tests[i]);
+ else
+ verify_fail(&kfunc_tests[i]);
+ }
+}
+
+static void test_subprog(void)
+{
+ struct kfunc_call_test_subprog *skel;
+ int prog_fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = kfunc_call_test_subprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run(test1)");
+ ASSERT_EQ(topts.retval, 10, "test1-retval");
+ ASSERT_NEQ(skel->data->active_res, -1, "active_res");
+ ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res");
+
+ kfunc_call_test_subprog__destroy(skel);
+}
+
+static void test_subprog_lskel(void)
+{
+ struct kfunc_call_test_subprog_lskel *skel;
+ int prog_fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = kfunc_call_test_subprog_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ prog_fd = skel->progs.kfunc_call_test1.prog_fd;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run(test1)");
+ ASSERT_EQ(topts.retval, 10, "test1-retval");
+ ASSERT_NEQ(skel->data->active_res, -1, "active_res");
+ ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res");
+
+ kfunc_call_test_subprog_lskel__destroy(skel);
+}
+
+static int test_destructive_open_and_load(void)
+{
+ struct kfunc_call_destructive *skel;
+ int err;
+
+ skel = kfunc_call_destructive__open();
+ if (!ASSERT_OK_PTR(skel, "prog_open"))
+ return -1;
+
+ err = kfunc_call_destructive__load(skel);
+
+ kfunc_call_destructive__destroy(skel);
+
+ return err;
+}
+
+static void test_destructive(void)
+{
+ __u64 save_caps = 0;
+
+ ASSERT_OK(test_destructive_open_and_load(), "successful_load");
+
+ if (!ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "drop_caps"))
+ return;
+
+ ASSERT_EQ(test_destructive_open_and_load(), -13, "no_caps_failure");
+
+ cap_enable_effective(save_caps, NULL);
+}
+
+void test_kfunc_call(void)
+{
+ test_main();
+
+ if (test__start_subtest("subprog"))
+ test_subprog();
+
+ if (test__start_subtest("subprog_lskel"))
+ test_subprog_lskel();
+
+ if (test__start_subtest("destructive"))
+ test_destructive();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
new file mode 100644
index 000000000000..8cd298b78e44
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2022 Facebook
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <test_progs.h>
+#include "test_kfunc_dynptr_param.skel.h"
+
+static struct {
+ const char *prog_name;
+ int expected_runtime_err;
+} kfunc_dynptr_tests[] = {
+ {"dynptr_data_null", -EBADMSG},
+};
+
+static bool kfunc_not_supported;
+
+static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
+ va_list args)
+{
+ if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
+ return 0;
+
+ if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature"))
+ return 0;
+
+ kfunc_not_supported = true;
+ return 0;
+}
+
+static bool has_pkcs7_kfunc_support(void)
+{
+ struct test_kfunc_dynptr_param *skel;
+ libbpf_print_fn_t old_print_cb;
+ int err;
+
+ skel = test_kfunc_dynptr_param__open();
+ if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
+ return false;
+
+ kfunc_not_supported = false;
+
+ old_print_cb = libbpf_set_print(libbpf_print_cb);
+ err = test_kfunc_dynptr_param__load(skel);
+ libbpf_set_print(old_print_cb);
+
+ if (err < 0 && kfunc_not_supported) {
+ fprintf(stderr,
+ "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
+ __func__);
+ test_kfunc_dynptr_param__destroy(skel);
+ return false;
+ }
+
+ test_kfunc_dynptr_param__destroy(skel);
+
+ return true;
+}
+
+static void verify_success(const char *prog_name, int expected_runtime_err)
+{
+ struct test_kfunc_dynptr_param *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ __u32 next_id;
+ int err;
+
+ skel = test_kfunc_dynptr_param__open();
+ if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ err = test_kfunc_dynptr_param__load(skel);
+
+ if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ err = bpf_prog_get_next_id(0, &next_id);
+
+ bpf_link__destroy(link);
+
+ if (!ASSERT_OK(err, "bpf_prog_get_next_id"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->err, expected_runtime_err, "err");
+
+cleanup:
+ test_kfunc_dynptr_param__destroy(skel);
+}
+
+void test_kfunc_dynptr_param(void)
+{
+ int i;
+
+ if (!has_pkcs7_kfunc_support())
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) {
+ if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name))
+ continue;
+
+ verify_success(kfunc_dynptr_tests[i].prog_name,
+ kfunc_dynptr_tests[i].expected_runtime_err);
+ }
+ RUN_TESTS(test_kfunc_dynptr_param);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c b/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
new file mode 100644
index 000000000000..48c0560d398e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <testing_helpers.h>
+
+#include "kfunc_module_order.skel.h"
+
+static int test_run_prog(const struct bpf_program *prog,
+ struct bpf_test_run_opts *opts)
+{
+ int err;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ return err;
+
+ if (!ASSERT_EQ((int)opts->retval, 0, bpf_program__name(prog)))
+ return -EINVAL;
+
+ return 0;
+}
+
+void test_kfunc_module_order(void)
+{
+ struct kfunc_module_order *skel;
+ char pkt_data[64] = {};
+ int err = 0;
+
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, test_opts, .data_in = pkt_data,
+ .data_size_in = sizeof(pkt_data));
+
+ err = load_module("bpf_test_modorder_x.ko",
+ env_verbosity > VERBOSE_NONE);
+ if (!ASSERT_OK(err, "load bpf_test_modorder_x.ko"))
+ return;
+
+ err = load_module("bpf_test_modorder_y.ko",
+ env_verbosity > VERBOSE_NONE);
+ if (!ASSERT_OK(err, "load bpf_test_modorder_y.ko"))
+ goto exit_modx;
+
+ skel = kfunc_module_order__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kfunc_module_order__open_and_load()")) {
+ err = -EINVAL;
+ goto exit_mods;
+ }
+
+ test_run_prog(skel->progs.call_kfunc_xy, &test_opts);
+ test_run_prog(skel->progs.call_kfunc_yx, &test_opts);
+
+ kfunc_module_order__destroy(skel);
+exit_mods:
+ unload_module("bpf_test_modorder_y", env_verbosity > VERBOSE_NONE);
+exit_modx:
+ unload_module("bpf_test_modorder_x", env_verbosity > VERBOSE_NONE);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
new file mode 100644
index 000000000000..c8f4dcaac7c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2024 Meta Platforms, Inc */
+
+#include <test_progs.h>
+#include "test_kfunc_param_nullable.skel.h"
+
+void test_kfunc_param_nullable(void)
+{
+ RUN_TESTS(test_kfunc_param_nullable);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
new file mode 100644
index 000000000000..6e35e13c2022
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "kmem_cache_iter.skel.h"
+
+#define SLAB_NAME_MAX 32
+
+struct kmem_cache_result {
+ char name[SLAB_NAME_MAX];
+ long obj_size;
+};
+
+static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .flags = 0, /* Run it with the current task */
+ );
+ int prog_fd = bpf_program__fd(skel->progs.check_task_struct);
+
+ /* Get task_struct and check it if's from a slab cache */
+ ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run");
+
+ /* The BPF program should set 'found' variable */
+ ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found");
+}
+
+static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
+{
+ FILE *fp;
+ int map_fd;
+ char name[SLAB_NAME_MAX];
+ unsigned long objsize;
+ char rest_of_line[1000];
+ struct kmem_cache_result r;
+ int seen = 0;
+
+ fp = fopen("/proc/slabinfo", "r");
+ if (fp == NULL) {
+ /* CONFIG_SLUB_DEBUG is not enabled */
+ return;
+ }
+
+ map_fd = bpf_map__fd(skel->maps.slab_result);
+
+ /* Ignore first two lines for header */
+ fscanf(fp, "slabinfo - version: %*d.%*d\n");
+ fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line);
+
+ /* Compare name and objsize only - others can be changes frequently */
+ while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n",
+ name, &objsize, rest_of_line) == 3) {
+ int ret = bpf_map_lookup_elem(map_fd, &seen, &r);
+
+ if (!ASSERT_OK(ret, "kmem_cache_lookup"))
+ break;
+
+ ASSERT_STRNEQ(r.name, name, sizeof(r.name) - 1,
+ "kmem_cache_name");
+ ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize");
+
+ seen++;
+ }
+
+ ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq");
+
+ fclose(fp);
+}
+
+static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, fd;
+
+ /* No need to attach it, just run it directly */
+ fd = bpf_program__fd(skel->progs.open_coded_iter);
+
+ err = bpf_prog_test_run_opts(fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ /* It should be same as we've seen from the explicit iterator */
+ ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
+}
+
+void test_kmem_cache_iter(void)
+{
+ struct kmem_cache_iter *skel = NULL;
+ char buf[256];
+ int iter_fd;
+
+ skel = kmem_cache_iter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
+ goto destroy;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
+ if (!ASSERT_GE(iter_fd, 0, "iter_create"))
+ goto destroy;
+
+ memset(buf, 0, sizeof(buf));
+ while (read(iter_fd, buf, sizeof(buf)) > 0) {
+ /* Read out all contents */
+ printf("%s", buf);
+ }
+
+ /* Next reads should return 0 */
+ ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
+
+ if (test__start_subtest("check_task_struct"))
+ subtest_kmem_cache_iter_check_task_struct(skel);
+ if (test__start_subtest("check_slabinfo"))
+ subtest_kmem_cache_iter_check_slabinfo(skel);
+ if (test__start_subtest("open_coded_iter"))
+ subtest_kmem_cache_iter_open_coded(skel);
+
+ close(iter_fd);
+
+destroy:
+ kmem_cache_iter__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
new file mode 100644
index 000000000000..6cfaa978bc9a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "kprobe_multi.skel.h"
+#include "trace_helpers.h"
+#include "kprobe_multi_empty.skel.h"
+#include "kprobe_multi_override.skel.h"
+#include "kprobe_multi_session.skel.h"
+#include "kprobe_multi_session_cookie.skel.h"
+#include "kprobe_multi_verifier.skel.h"
+#include "kprobe_write_ctx.skel.h"
+#include "bpf/libbpf_internal.h"
+#include "bpf/hashmap.h"
+
+static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
+ ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
+ ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
+ ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
+ ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
+ ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
+
+ if (test_return) {
+ ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
+ ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
+ ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
+ ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
+ ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
+ ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
+ }
+}
+
+static void test_skel_api(void)
+{
+ struct kprobe_multi *skel = NULL;
+ int err;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ err = kprobe_multi__attach(skel);
+ if (!ASSERT_OK(err, "kprobe_multi__attach"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel, true);
+
+cleanup:
+ kprobe_multi__destroy(skel);
+}
+
+static void test_link_api(struct bpf_link_create_opts *opts)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1;
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ prog_fd = bpf_program__fd(skel->progs.test_kprobe);
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
+ if (!ASSERT_GE(link1_fd, 0, "link_fd"))
+ goto cleanup;
+
+ opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
+ if (!ASSERT_GE(link2_fd, 0, "link_fd"))
+ goto cleanup;
+
+ kprobe_multi_test_run(skel, true);
+
+cleanup:
+ if (link1_fd != -1)
+ close(link1_fd);
+ if (link2_fd != -1)
+ close(link2_fd);
+ kprobe_multi__destroy(skel);
+}
+
+#define GET_ADDR(__sym, __addr) ({ \
+ __addr = ksym_get_addr(__sym); \
+ if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \
+ return; \
+})
+
+static void test_link_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ unsigned long long addrs[8];
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test2", addrs[1]);
+ GET_ADDR("bpf_fentry_test3", addrs[2]);
+ GET_ADDR("bpf_fentry_test4", addrs[3]);
+ GET_ADDR("bpf_fentry_test5", addrs[4]);
+ GET_ADDR("bpf_fentry_test6", addrs[5]);
+ GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+ opts.kprobe_multi.addrs = (const unsigned long*) addrs;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
+ test_link_api(&opts);
+}
+
+static void test_link_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test8",
+ };
+
+ opts.kprobe_multi.syms = syms;
+ opts.kprobe_multi.cnt = ARRAY_SIZE(syms);
+ test_link_api(&opts);
+}
+
+static void
+test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ pattern, opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ if (opts) {
+ opts->retprobe = true;
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual,
+ pattern, opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+ }
+
+ kprobe_multi_test_run(skel, !!opts);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ kprobe_multi__destroy(skel);
+}
+
+static void test_attach_api_pattern(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+
+ test_attach_api("bpf_fentry_test*", &opts);
+ test_attach_api("bpf_fentry_test?", NULL);
+}
+
+static void test_attach_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ unsigned long long addrs[8];
+
+ GET_ADDR("bpf_fentry_test1", addrs[0]);
+ GET_ADDR("bpf_fentry_test2", addrs[1]);
+ GET_ADDR("bpf_fentry_test3", addrs[2]);
+ GET_ADDR("bpf_fentry_test4", addrs[3]);
+ GET_ADDR("bpf_fentry_test5", addrs[4]);
+ GET_ADDR("bpf_fentry_test6", addrs[5]);
+ GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test8", addrs[7]);
+
+ opts.addrs = (const unsigned long *) addrs;
+ opts.cnt = ARRAY_SIZE(addrs);
+ test_attach_api(NULL, &opts);
+}
+
+static void test_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *syms[8] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ "bpf_fentry_test3",
+ "bpf_fentry_test4",
+ "bpf_fentry_test5",
+ "bpf_fentry_test6",
+ "bpf_fentry_test7",
+ "bpf_fentry_test8",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_attach_api(NULL, &opts);
+}
+
+static void test_attach_api_fails(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi *skel = NULL;
+ struct bpf_link *link = NULL;
+ unsigned long long addrs[2];
+ const char *syms[2] = {
+ "bpf_fentry_test1",
+ "bpf_fentry_test2",
+ };
+ __u64 cookies[2];
+ int saved_error;
+
+ addrs[0] = ksym_get_addr("bpf_fentry_test1");
+ addrs[1] = ksym_get_addr("bpf_fentry_test2");
+
+ if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr"))
+ goto cleanup;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ /* fail_1 - pattern and opts NULL */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ NULL, NULL);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_1"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_1_error"))
+ goto cleanup;
+
+ /* fail_2 - both addrs and syms set */
+ opts.addrs = (const unsigned long *) addrs;
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ NULL, &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_2"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_2_error"))
+ goto cleanup;
+
+ /* fail_3 - pattern and addrs set */
+ opts.addrs = (const unsigned long *) addrs;
+ opts.syms = NULL;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "ksys_*", &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_3"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_3_error"))
+ goto cleanup;
+
+ /* fail_4 - pattern and cnt set */
+ opts.addrs = NULL;
+ opts.syms = NULL;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "ksys_*", &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_4"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_4_error"))
+ goto cleanup;
+
+ /* fail_5 - pattern and cookies */
+ opts.addrs = NULL;
+ opts.syms = NULL;
+ opts.cnt = 0;
+ opts.cookies = cookies;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "ksys_*", &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_5"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -EINVAL, "fail_5_error"))
+ goto cleanup;
+
+ /* fail_6 - abnormal cnt */
+ opts.addrs = (const unsigned long *) addrs;
+ opts.syms = NULL;
+ opts.cnt = INT_MAX;
+ opts.cookies = NULL;
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ NULL, &opts);
+ saved_error = -errno;
+ if (!ASSERT_ERR_PTR(link, "fail_6"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(saved_error, -E2BIG, "fail_6_error"))
+ goto cleanup;
+
+cleanup:
+ bpf_link__destroy(link);
+ kprobe_multi__destroy(skel);
+}
+
+static void test_session_skel_api(void)
+{
+ struct kprobe_multi_session *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_link *link = NULL;
+ int i, err, prog_fd;
+
+ skel = kprobe_multi_session__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_session__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ err = kprobe_multi_session__attach(skel);
+ if (!ASSERT_OK(err, " kprobe_multi_session__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ /* bpf_fentry_test1-4 trigger return probe, result is 2 */
+ for (i = 0; i < 4; i++)
+ ASSERT_EQ(skel->bss->kprobe_session_result[i], 2, "kprobe_session_result");
+
+ /* bpf_fentry_test5-8 trigger only entry probe, result is 1 */
+ for (i = 4; i < 8; i++)
+ ASSERT_EQ(skel->bss->kprobe_session_result[i], 1, "kprobe_session_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ kprobe_multi_session__destroy(skel);
+}
+
+static void test_session_cookie_skel_api(void)
+{
+ struct kprobe_multi_session_cookie *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_link *link = NULL;
+ int err, prog_fd;
+
+ skel = kprobe_multi_session_cookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ err = kprobe_multi_session_cookie__attach(skel);
+ if (!ASSERT_OK(err, " kprobe_multi_wrapper__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(skel->bss->test_kprobe_1_result, 1, "test_kprobe_1_result");
+ ASSERT_EQ(skel->bss->test_kprobe_2_result, 2, "test_kprobe_2_result");
+ ASSERT_EQ(skel->bss->test_kprobe_3_result, 3, "test_kprobe_3_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ kprobe_multi_session_cookie__destroy(skel);
+}
+
+static void test_unique_match(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
+ return;
+
+ opts.unique_match = true;
+ skel->bss->pid = getpid();
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "bpf_fentry_test*", &opts);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ bpf_link__destroy(link);
+
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
+ "bpf_fentry_test8*", &opts);
+ if (ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_multi__destroy(skel);
+}
+
+static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
+{
+ long attach_start_ns, attach_end_ns;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+ struct bpf_link *link = NULL;
+
+ attach_start_ns = get_time_ns();
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
+ NULL, opts);
+ attach_end_ns = get_time_ns();
+
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ return;
+
+ detach_start_ns = get_time_ns();
+ bpf_link__destroy(link);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: found %lu functions\n", __func__, opts->cnt);
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+}
+
+static void test_kprobe_multi_bench_attach(bool kernel)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi_empty *skel = NULL;
+ char **syms = NULL;
+ size_t cnt = 0;
+
+ if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
+ return;
+
+ skel = kprobe_multi_empty__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ opts.syms = (const char **) syms;
+ opts.cnt = cnt;
+
+ do_bench_test(skel, &opts);
+
+cleanup:
+ kprobe_multi_empty__destroy(skel);
+ if (syms)
+ free(syms);
+}
+
+static void test_kprobe_multi_bench_attach_addr(bool kernel)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi_empty *skel = NULL;
+ unsigned long *addrs = NULL;
+ size_t cnt = 0;
+ int err;
+
+ err = bpf_get_addrs(&addrs, &cnt, kernel);
+ if (err == -ENOENT) {
+ test__skip();
+ return;
+ }
+
+ if (!ASSERT_OK(err, "bpf_get_addrs"))
+ return;
+
+ skel = kprobe_multi_empty__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ opts.addrs = addrs;
+ opts.cnt = cnt;
+
+ do_bench_test(skel, &opts);
+
+cleanup:
+ kprobe_multi_empty__destroy(skel);
+ free(addrs);
+}
+
+static void test_attach_override(void)
+{
+ struct kprobe_multi_override *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_multi_override__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ /* The test_override calls bpf_override_return so it should fail
+ * to attach to bpf_fentry_test1 function, which is not on error
+ * injection list.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ /* The should_fail_bio function is on error injection list,
+ * attach should succeed.
+ */
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
+ "should_fail_bio", NULL);
+ if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio"))
+ goto cleanup;
+
+ bpf_link__destroy(link);
+
+cleanup:
+ kprobe_multi_override__destroy(skel);
+}
+
+#ifdef __x86_64__
+static void test_attach_write_ctx(void)
+{
+ struct kprobe_write_ctx *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_write_ctx__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
+ return;
+
+ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_multi_write_ctx,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_write_ctx__destroy(skel);
+}
+#else
+static void test_attach_write_ctx(void)
+{
+ test__skip();
+}
+#endif
+
+void serial_test_kprobe_multi_bench_attach(void)
+{
+ if (test__start_subtest("kernel"))
+ test_kprobe_multi_bench_attach(true);
+ if (test__start_subtest("modules"))
+ test_kprobe_multi_bench_attach(false);
+ if (test__start_subtest("kernel"))
+ test_kprobe_multi_bench_attach_addr(true);
+ if (test__start_subtest("modules"))
+ test_kprobe_multi_bench_attach_addr(false);
+}
+
+void test_kprobe_multi_test(void)
+{
+ if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
+ return;
+
+ if (test__start_subtest("skel_api"))
+ test_skel_api();
+ if (test__start_subtest("link_api_addrs"))
+ test_link_api_syms();
+ if (test__start_subtest("link_api_syms"))
+ test_link_api_addrs();
+ if (test__start_subtest("attach_api_pattern"))
+ test_attach_api_pattern();
+ if (test__start_subtest("attach_api_addrs"))
+ test_attach_api_addrs();
+ if (test__start_subtest("attach_api_syms"))
+ test_attach_api_syms();
+ if (test__start_subtest("attach_api_fails"))
+ test_attach_api_fails();
+ if (test__start_subtest("attach_override"))
+ test_attach_override();
+ if (test__start_subtest("session"))
+ test_session_skel_api();
+ if (test__start_subtest("session_cookie"))
+ test_session_cookie_skel_api();
+ if (test__start_subtest("unique_match"))
+ test_unique_match();
+ if (test__start_subtest("attach_write_ctx"))
+ test_attach_write_ctx();
+ RUN_TESTS(kprobe_multi_verifier);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
new file mode 100644
index 000000000000..9d03528f05db
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "kprobe_multi.skel.h"
+#include "trace_helpers.h"
+#include "bpf/libbpf_internal.h"
+
+static struct ksyms *ksyms;
+
+static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
+{
+ ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
+ ASSERT_EQ(skel->bss->kprobe_testmod_test2_result, 1, "kprobe_test2_result");
+ ASSERT_EQ(skel->bss->kprobe_testmod_test3_result, 1, "kprobe_test3_result");
+
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test1_result, 1, "kretprobe_test1_result");
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test2_result, 1, "kretprobe_test2_result");
+ ASSERT_EQ(skel->bss->kretprobe_testmod_test3_result, 1, "kretprobe_test3_result");
+}
+
+static void test_testmod_attach_api(struct bpf_kprobe_multi_opts *opts)
+{
+ struct kprobe_multi *skel = NULL;
+
+ skel = kprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ skel->links.test_kprobe_testmod = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.test_kprobe_testmod,
+ NULL, opts);
+ if (!skel->links.test_kprobe_testmod)
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.test_kretprobe_testmod = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.test_kretprobe_testmod,
+ NULL, opts);
+ if (!skel->links.test_kretprobe_testmod)
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(1), "trigger_read");
+ kprobe_multi_testmod_check(skel);
+
+cleanup:
+ kprobe_multi__destroy(skel);
+}
+
+static void test_testmod_attach_api_addrs(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ unsigned long long addrs[3];
+
+ addrs[0] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test1");
+ ASSERT_NEQ(addrs[0], 0, "ksym_get_addr_local");
+ addrs[1] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test2");
+ ASSERT_NEQ(addrs[1], 0, "ksym_get_addr_local");
+ addrs[2] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test3");
+ ASSERT_NEQ(addrs[2], 0, "ksym_get_addr_local");
+
+ opts.addrs = (const unsigned long *) addrs;
+ opts.cnt = ARRAY_SIZE(addrs);
+
+ test_testmod_attach_api(&opts);
+}
+
+static void test_testmod_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "bpf_testmod_fentry_test1",
+ "bpf_testmod_fentry_test2",
+ "bpf_testmod_fentry_test3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_testmod_attach_api(&opts);
+}
+
+void serial_test_kprobe_multi_testmod_test(void)
+{
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
+ return;
+
+ if (test__start_subtest("testmod_attach_api_syms"))
+ test_testmod_attach_api_syms();
+
+ if (test__start_subtest("testmod_attach_api_addrs"))
+ test_testmod_attach_api_addrs();
+
+ free_kallsyms_local(ksyms);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
new file mode 100644
index 000000000000..7def158da9eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+
+#include "linux/filter.h"
+#include "kptr_xchg_inline.skel.h"
+
+void test_kptr_xchg_inline(void)
+{
+ struct kptr_xchg_inline *skel;
+ struct bpf_insn *insn = NULL;
+ struct bpf_insn exp;
+ unsigned int cnt;
+ int err;
+
+#if !(defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64))
+ test__skip();
+ return;
+#endif
+
+ skel = kptr_xchg_inline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
+ if (!ASSERT_OK(err, "prog insn"))
+ goto out;
+
+ /* The original instructions are:
+ * r1 = map[id:xxx][0]+0
+ * r2 = 0
+ * call bpf_kptr_xchg#yyy
+ *
+ * call bpf_kptr_xchg#yyy will be inlined as:
+ * r0 = r2
+ * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
+ */
+ if (!ASSERT_GT(cnt, 5, "insn cnt"))
+ goto out;
+
+ exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
+ if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
+ goto out;
+
+ exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
+ if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
+ goto out;
+out:
+ free(insn);
+ kptr_xchg_inline__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c
index b295969b263b..dc7aab532fb1 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c
@@ -5,8 +5,6 @@
#include "test_ksyms.skel.h"
#include <sys/stat.h>
-static int duration;
-
void test_ksyms(void)
{
const char *btf_path = "/sys/kernel/btf/vmlinux";
@@ -18,43 +16,37 @@ void test_ksyms(void)
int err;
err = kallsyms_find("bpf_link_fops", &link_fops_addr);
- if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ if (!ASSERT_NEQ(err, -EINVAL, "bpf_link_fops: kallsyms_fopen"))
return;
- if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n"))
+ if (!ASSERT_NEQ(err, -ENOENT, "bpf_link_fops: ksym_find"))
return;
err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr);
- if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ if (!ASSERT_NEQ(err, -EINVAL, "__per_cpu_start: kallsyms_fopen"))
return;
- if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n"))
+ if (!ASSERT_NEQ(err, -ENOENT, "__per_cpu_start: ksym_find"))
return;
- if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
+ if (!ASSERT_OK(stat(btf_path, &st), "stat_btf"))
return;
btf_size = st.st_size;
skel = test_ksyms__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "test_ksyms__open_and_load"))
return;
err = test_ksyms__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "test_ksyms__attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
- CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops",
- "got 0x%llx, exp 0x%llx\n",
- data->out__bpf_link_fops, link_fops_addr);
- CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1",
- "got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
- CHECK(data->out__btf_size != btf_size, "btf_size",
- "got %llu, exp %llu\n", data->out__btf_size, btf_size);
- CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
- "got %llu, exp %llu\n", data->out__per_cpu_start,
- per_cpu_start_addr);
+ ASSERT_EQ(data->out__bpf_link_fops, link_fops_addr, "bpf_link_fops");
+ ASSERT_EQ(data->out__bpf_link_fops1, 0, "bpf_link_fops1");
+ ASSERT_EQ(data->out__btf_size, btf_size, "btf_size");
+ ASSERT_EQ(data->out__per_cpu_start, per_cpu_start_addr, "__per_cpu_start");
cleanup:
test_ksyms__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
index b58b775d19f3..1d7a2f1e0731 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -6,6 +6,9 @@
#include <bpf/btf.h>
#include "test_ksyms_btf.skel.h"
#include "test_ksyms_btf_null_check.skel.h"
+#include "test_ksyms_weak.skel.h"
+#include "test_ksyms_weak.lskel.h"
+#include "test_ksyms_btf_write_check.skel.h"
static int duration;
@@ -81,14 +84,81 @@ static void test_null_check(void)
test_ksyms_btf_null_check__destroy(skel);
}
+static void test_weak_syms(void)
+{
+ struct test_ksyms_weak *skel;
+ struct test_ksyms_weak__data *data;
+ int err;
+
+ skel = test_ksyms_weak__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
+ return;
+
+ err = test_ksyms_weak__attach(skel);
+ if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ data = skel->data;
+ ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
+ ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
+ ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
+ ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
+
+cleanup:
+ test_ksyms_weak__destroy(skel);
+}
+
+static void test_weak_syms_lskel(void)
+{
+ struct test_ksyms_weak_lskel *skel;
+ struct test_ksyms_weak_lskel__data *data;
+ int err;
+
+ skel = test_ksyms_weak_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load"))
+ return;
+
+ err = test_ksyms_weak_lskel__attach(skel);
+ if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ data = skel->data;
+ ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
+ ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
+ ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
+ ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
+
+cleanup:
+ test_ksyms_weak_lskel__destroy(skel);
+}
+
+static void test_write_check(bool test_handler1)
+{
+ struct test_ksyms_btf_write_check *skel;
+
+ skel = test_ksyms_btf_write_check__open();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open"))
+ return;
+ bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false);
+ ASSERT_ERR(test_ksyms_btf_write_check__load(skel),
+ "unexpected load of a prog writing to ksym memory\n");
+
+ test_ksyms_btf_write_check__destroy(skel);
+}
+
void test_ksyms_btf(void)
{
int percpu_datasec;
struct btf *btf;
btf = libbpf_find_kernel_btf();
- if (CHECK(IS_ERR(btf), "btf_exists", "failed to load kernel BTF: %ld\n",
- PTR_ERR(btf)))
+ if (!ASSERT_OK_PTR(btf, "btf_exists"))
return;
percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
@@ -106,4 +176,16 @@ void test_ksyms_btf(void)
if (test__start_subtest("null_check"))
test_null_check();
+
+ if (test__start_subtest("weak_ksyms"))
+ test_weak_syms();
+
+ if (test__start_subtest("weak_ksyms_lskel"))
+ test_weak_syms_lskel();
+
+ if (test__start_subtest("write_check1"))
+ test_write_check(true);
+
+ if (test__start_subtest("write_check2"))
+ test_write_check(false);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
new file mode 100644
index 000000000000..a1ebac70ec29
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_ksyms_module.lskel.h"
+#include "test_ksyms_module.skel.h"
+
+static void test_ksyms_module_lskel(void)
+{
+ struct test_ksyms_module_lskel *skel;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ skel = test_ksyms_module_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load"))
+ return;
+ err = bpf_prog_test_run_opts(skel->progs.load.prog_fd, &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run"))
+ goto cleanup;
+ ASSERT_EQ(topts.retval, 0, "retval");
+ ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
+cleanup:
+ test_ksyms_module_lskel__destroy(skel);
+}
+
+static void test_ksyms_module_libbpf(void)
+{
+ struct test_ksyms_module *skel;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ skel = test_ksyms_module__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
+ return;
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.load), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run"))
+ goto cleanup;
+ ASSERT_EQ(topts.retval, 0, "retval");
+ ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
+cleanup:
+ test_ksyms_module__destroy(skel);
+}
+
+void test_ksyms_module(void)
+{
+ if (test__start_subtest("lskel"))
+ test_ksyms_module_lskel();
+ if (test__start_subtest("libbpf"))
+ test_ksyms_module_libbpf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
index 8073105548ff..1eab286b14fe 100644
--- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -23,14 +23,18 @@ static void test_l4lb(const char *file)
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
int err, i, prog_fd, map_fd;
__u64 bytes = 0, pkts = 0;
struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = NUM_ITER,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -49,19 +53,24 @@ static void test_l4lb(const char *file)
goto out;
bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
+ topts.data_in = &pkt_v4;
+ topts.data_size_in = sizeof(pkt_v4);
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv4 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out");
+ ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 magic");
+
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = sizeof(buf); /* reset out size */
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv6 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out");
+ ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 magic");
map_fd = bpf_find_map(__func__, obj, "stats");
if (map_fd < 0)
@@ -81,7 +90,9 @@ out:
void test_l4lb_all(void)
{
if (test__start_subtest("l4lb_inline"))
- test_l4lb("test_l4lb.o");
+ test_l4lb("test_l4lb.bpf.o");
if (test__start_subtest("l4lb_noinline"))
- test_l4lb("test_l4lb_noinline.o");
+ test_l4lb("test_l4lb_noinline.bpf.o");
+ if (test__start_subtest("l4lb_noinline_dynptr"))
+ test_l4lb("test_l4lb_noinline_dynptr.bpf.o");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/legacy_printk.c b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c
new file mode 100644
index 000000000000..ec6e45f2a644
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "test_legacy_printk.skel.h"
+
+static int execute_one_variant(bool legacy)
+{
+ struct test_legacy_printk *skel;
+ int err, zero = 0, my_pid = getpid(), res, map_fd;
+
+ skel = test_legacy_printk__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return -errno;
+
+ bpf_program__set_autoload(skel->progs.handle_legacy, legacy);
+ bpf_program__set_autoload(skel->progs.handle_modern, !legacy);
+
+ err = test_legacy_printk__load(skel);
+ /* no ASSERT_OK, we expect one of two variants can fail here */
+ if (err)
+ goto err_out;
+
+ if (legacy) {
+ map_fd = bpf_map__fd(skel->maps.my_pid_map);
+ err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY);
+ if (!ASSERT_OK(err, "my_pid_map_update"))
+ goto err_out;
+ err = bpf_map_lookup_elem(map_fd, &zero, &res);
+ } else {
+ skel->bss->my_pid_var = my_pid;
+ }
+
+ err = test_legacy_printk__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto err_out;
+
+ usleep(1); /* trigger */
+
+ if (legacy) {
+ map_fd = bpf_map__fd(skel->maps.res_map);
+ err = bpf_map_lookup_elem(map_fd, &zero, &res);
+ if (!ASSERT_OK(err, "res_map_lookup"))
+ goto err_out;
+ } else {
+ res = skel->bss->res_var;
+ }
+
+ if (!ASSERT_GT(res, 0, "res")) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+err_out:
+ test_legacy_printk__destroy(skel);
+ return err;
+}
+
+void test_legacy_printk(void)
+{
+ /* legacy variant should work everywhere */
+ ASSERT_OK(execute_one_variant(true /* legacy */), "legacy_case");
+
+ /* execute modern variant, can fail the load on old kernels */
+ execute_one_variant(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
new file mode 100644
index 000000000000..a3f238f51d05
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <test_progs.h>
+
+#include "test_libbpf_get_fd_by_id_opts.skel.h"
+
+void test_libbpf_get_fd_by_id_opts(void)
+{
+ struct test_libbpf_get_fd_by_id_opts *skel;
+ struct bpf_map_info info_m = {};
+ __u32 len = sizeof(info_m), value;
+ int ret, zero = 0, fd = -1;
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, fd_opts_rdonly,
+ .open_flags = BPF_F_RDONLY,
+ );
+
+ skel = test_libbpf_get_fd_by_id_opts__open_and_load();
+ if (!ASSERT_OK_PTR(skel,
+ "test_libbpf_get_fd_by_id_opts__open_and_load"))
+ return;
+
+ ret = test_libbpf_get_fd_by_id_opts__attach(skel);
+ if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach"))
+ goto close_prog;
+
+ ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
+ &info_m, &len);
+ if (!ASSERT_OK(ret, "bpf_map_get_info_by_fd"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id(info_m.id);
+ if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id_opts(info_m.id, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id_opts"))
+ goto close_prog;
+
+ fd = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly);
+ if (!ASSERT_GE(fd, 0, "bpf_map_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* Map lookup should work with read-only fd. */
+ ret = bpf_map_lookup_elem(fd, &zero, &value);
+ if (!ASSERT_OK(ret, "bpf_map_lookup_elem"))
+ goto close_prog;
+
+ if (!ASSERT_EQ(value, 0, "map value mismatch"))
+ goto close_prog;
+
+ /* Map update should not work with read-only fd. */
+ ret = bpf_map_update_elem(fd, &zero, &len, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem"))
+ goto close_prog;
+
+ /* Map update should work with read-write fd. */
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.data_input), &zero,
+ &len, BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem"))
+ goto close_prog;
+
+ /* Prog get fd with opts set should not work (no kernel support). */
+ ret = bpf_prog_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_prog_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* Link get fd with opts set should not work (no kernel support). */
+ ret = bpf_link_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_link_get_fd_by_id_opts"))
+ goto close_prog;
+
+ /* BTF get fd with opts set should not work (no kernel support). */
+ ret = bpf_btf_get_fd_by_id_opts(0, &fd_opts_rdonly);
+ ASSERT_EQ(ret, -EINVAL, "bpf_btf_get_fd_by_id_opts");
+
+close_prog:
+ if (fd >= 0)
+ close(fd);
+
+ test_libbpf_get_fd_by_id_opts__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
new file mode 100644
index 000000000000..4ed46ed58a7b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+void test_libbpf_probe_prog_types(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_prog_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ if (!ASSERT_OK_PTR(t, "bpf_prog_type_enum"))
+ goto cleanup;
+
+ for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
+ const char *prog_type_name = btf__str_by_offset(btf, e->name_off);
+ enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
+ int res;
+
+ if (prog_type == BPF_PROG_TYPE_UNSPEC)
+ continue;
+ if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0)
+ continue;
+
+ if (!test__start_subtest(prog_type_name))
+ continue;
+
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL);
+ ASSERT_EQ(res, 1, prog_type_name);
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+void test_libbpf_probe_map_types(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_map_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ if (!ASSERT_OK_PTR(t, "bpf_map_type_enum"))
+ goto cleanup;
+
+ for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
+ const char *map_type_name = btf__str_by_offset(btf, e->name_off);
+ enum bpf_map_type map_type = (enum bpf_map_type)e->val;
+ int res;
+
+ if (map_type == BPF_MAP_TYPE_UNSPEC)
+ continue;
+ if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
+ continue;
+
+ if (!test__start_subtest(map_type_name))
+ continue;
+
+ res = libbpf_probe_bpf_map_type(map_type, NULL);
+ ASSERT_EQ(res, 1, map_type_name);
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+void test_libbpf_probe_helpers(void)
+{
+#define CASE(prog, helper, supp) { \
+ .prog_type_name = "BPF_PROG_TYPE_" # prog, \
+ .helper_name = "bpf_" # helper, \
+ .prog_type = BPF_PROG_TYPE_ ## prog, \
+ .helper_id = BPF_FUNC_ ## helper, \
+ .supported = supp, \
+}
+ const struct case_def {
+ const char *prog_type_name;
+ const char *helper_name;
+ enum bpf_prog_type prog_type;
+ enum bpf_func_id helper_id;
+ bool supported;
+ } cases[] = {
+ CASE(KPROBE, unspec, false),
+ CASE(KPROBE, map_lookup_elem, true),
+ CASE(KPROBE, loop, true),
+
+ CASE(KPROBE, ktime_get_coarse_ns, false),
+ CASE(SOCKET_FILTER, ktime_get_coarse_ns, true),
+
+ CASE(KPROBE, sys_bpf, false),
+ CASE(SYSCALL, sys_bpf, true),
+ };
+ size_t case_cnt = ARRAY_SIZE(cases), i;
+ char buf[128];
+
+ for (i = 0; i < case_cnt; i++) {
+ const struct case_def *d = &cases[i];
+ int res;
+
+ snprintf(buf, sizeof(buf), "%s+%s", d->prog_type_name, d->helper_name);
+
+ if (!test__start_subtest(buf))
+ continue;
+
+ res = libbpf_probe_bpf_helper(d->prog_type, d->helper_id, NULL);
+ ASSERT_EQ(res, d->supported, buf);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
new file mode 100644
index 000000000000..62ea855ec4d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <ctype.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+/*
+ * Utility function uppercasing an entire string.
+ */
+static void uppercase(char *s)
+{
+ for (; *s != '\0'; s++)
+ *s = toupper(*s);
+}
+
+/*
+ * Test case to check that all bpf_attach_type variants are covered by
+ * libbpf_bpf_attach_type_str.
+ */
+static void test_libbpf_bpf_attach_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_attach_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_attach_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_attach_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_attach_type attach_type = (enum bpf_attach_type)e->val;
+ const char *attach_type_name;
+ const char *attach_type_str;
+ char buf[256];
+
+ if (attach_type == __MAX_BPF_ATTACH_TYPE)
+ continue;
+
+ attach_type_name = btf__str_by_offset(btf, e->name_off);
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ ASSERT_OK_PTR(attach_type_str, attach_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_%s", attach_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, attach_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_link_type variants are covered by
+ * libbpf_bpf_link_type_str.
+ */
+static void test_libbpf_bpf_link_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_link_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_link_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_link_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_link_type link_type = (enum bpf_link_type)e->val;
+ const char *link_type_name;
+ const char *link_type_str;
+ char buf[256];
+
+ if (link_type == __MAX_BPF_LINK_TYPE)
+ continue;
+
+ link_type_name = btf__str_by_offset(btf, e->name_off);
+ link_type_str = libbpf_bpf_link_type_str(link_type);
+ ASSERT_OK_PTR(link_type_str, link_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_LINK_TYPE_%s", link_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, link_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_map_type variants are covered by
+ * libbpf_bpf_map_type_str.
+ */
+static void test_libbpf_bpf_map_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_map_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_map_type map_type = (enum bpf_map_type)e->val;
+ const char *map_type_name;
+ const char *map_type_str;
+ char buf[256];
+
+ if (map_type == __MAX_BPF_MAP_TYPE)
+ continue;
+
+ map_type_name = btf__str_by_offset(btf, e->name_off);
+ map_type_str = libbpf_bpf_map_type_str(map_type);
+ ASSERT_OK_PTR(map_type_str, map_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str);
+ uppercase(buf);
+
+ /* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
+ * where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
+ * (map_type). For this enum value, libbpf_bpf_map_type_str() picks
+ * BPF_MAP_TYPE_CGROUP_STORAGE. The same for
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
+ * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE.
+ */
+ if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
+ continue;
+ if (strcmp(map_type_name, "BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED") == 0)
+ continue;
+
+ ASSERT_STREQ(buf, map_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_prog_type variants are covered by
+ * libbpf_bpf_prog_type_str.
+ */
+static void test_libbpf_bpf_prog_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_prog_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
+ const char *prog_type_name;
+ const char *prog_type_str;
+ char buf[256];
+
+ if (prog_type == __MAX_BPF_PROG_TYPE)
+ continue;
+
+ prog_type_name = btf__str_by_offset(btf, e->name_off);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ ASSERT_OK_PTR(prog_type_str, prog_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_PROG_TYPE_%s", prog_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, prog_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Run all libbpf str conversion tests.
+ */
+void test_libbpf_str(void)
+{
+ if (test__start_subtest("bpf_attach_type_str"))
+ test_libbpf_bpf_attach_type_str();
+
+ if (test__start_subtest("bpf_link_type_str"))
+ test_libbpf_bpf_link_type_str();
+
+ if (test__start_subtest("bpf_map_type_str"))
+ test_libbpf_bpf_map_type_str();
+
+ if (test__start_subtest("bpf_prog_type_str"))
+ test_libbpf_bpf_prog_type_str();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
index a743288cf384..6fc97c45f71e 100644
--- a/tools/testing/selftests/bpf/prog_tests/link_pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
@@ -17,7 +17,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
int err, i;
link = bpf_program__attach(prog);
- if (CHECK(IS_ERR(link), "link_attach", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
bss->in = 1;
@@ -51,7 +51,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
/* re-open link from BPFFS */
link = bpf_link__open(link_pin_path);
- if (CHECK(IS_ERR(link), "link_open", "err: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_open"))
goto cleanup;
CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2",
@@ -84,8 +84,7 @@ void test_link_pinning_subtest(struct bpf_program *prog,
CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i);
cleanup:
- if (!IS_ERR(link))
- bpf_link__destroy(link);
+ bpf_link__destroy(link);
}
void test_link_pinning(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
new file mode 100644
index 000000000000..fa639b021f7e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_funcs.skel.h"
+
+void test_linked_funcs(void)
+{
+ int err;
+ struct linked_funcs *skel;
+
+ skel = linked_funcs__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and
+ * are set to not autoload by default
+ */
+ bpf_program__set_autoload(skel->progs.handler1, true);
+ bpf_program__set_autoload(skel->progs.handler2, true);
+
+ skel->rodata->my_tid = sys_gettid();
+ skel->bss->syscall_id = SYS_getpgid;
+
+ err = linked_funcs__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = linked_funcs__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1");
+ ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1");
+ ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1");
+
+ ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2");
+ ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2");
+ /* output_weak2 should never be updated */
+ ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2");
+
+cleanup:
+ linked_funcs__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
new file mode 100644
index 000000000000..14c5a7ef0e87
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/btf.h>
+#include <test_btf.h>
+#include <linux/btf.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "linked_list.skel.h"
+#include "linked_list_fail.skel.h"
+#include "linked_list_peek.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static struct {
+ const char *prog_name;
+ const char *err_msg;
+} linked_list_fail_tests[] = {
+#define TEST(test, off) \
+ { #test "_missing_lock_push_front", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_push_back", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_pop_front", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
+ { #test "_missing_lock_pop_back", \
+ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
+ TEST(kptr, 40)
+ TEST(global, 16)
+ TEST(map, 0)
+ TEST(inner_map, 0)
+#undef TEST
+#define TEST(test, op) \
+ { #test "_kptr_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
+ { #test "_global_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
+ { #test "_map_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
+ { #test "_inner_map_incorrect_lock_" #op, \
+ "held lock and object are not in the same allocation\n" \
+ "bpf_spin_lock at off=0 must be held for bpf_list_head" },
+ TEST(kptr, push_front)
+ TEST(kptr, push_back)
+ TEST(kptr, pop_front)
+ TEST(kptr, pop_back)
+ TEST(global, push_front)
+ TEST(global, push_back)
+ TEST(global, pop_front)
+ TEST(global, pop_back)
+ TEST(map, push_front)
+ TEST(map, push_back)
+ TEST(map, pop_front)
+ TEST(map, pop_back)
+ TEST(inner_map, push_front)
+ TEST(inner_map, push_back)
+ TEST(inner_map, pop_front)
+ TEST(inner_map, pop_back)
+#undef TEST
+ { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
+ { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
+ { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
+ { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
+ { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
+ { "obj_new_acq", "Unreleased reference id=" },
+ { "use_after_drop", "invalid mem access 'scalar'" },
+ { "ptr_walk_scalar", "type=rdonly_untrusted_mem expected=percpu_ptr_" },
+ { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
+ { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
+ { "direct_read_head", "direct access to bpf_list_head is disallowed" },
+ { "direct_write_head", "direct access to bpf_list_head is disallowed" },
+ { "direct_read_node", "direct access to bpf_list_node is disallowed" },
+ { "direct_write_node", "direct access to bpf_list_node is disallowed" },
+ { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
+ { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
+ { "double_push_front", "arg#1 expected pointer to allocated object" },
+ { "double_push_back", "arg#1 expected pointer to allocated object" },
+ { "no_node_value_type", "bpf_list_node not found at offset=0" },
+ { "incorrect_value_type",
+ "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
+ "but arg is at offset=0 in struct bar" },
+ { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
+ { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
+ { "no_head_type", "bpf_list_head not found at offset=0" },
+ { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
+ { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
+ { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
+ { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
+ { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
+};
+
+static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct linked_list_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = linked_list_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = linked_list_fail__load(skel);
+ if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
+ goto end;
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ linked_list_fail__destroy(skel);
+}
+
+static void clear_fields(struct bpf_map *map)
+{
+ char buf[24];
+ int key = 0;
+
+ memset(buf, 0xff, sizeof(buf));
+ ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
+}
+
+enum {
+ TEST_ALL,
+ PUSH_POP,
+ PUSH_POP_MULT,
+ LIST_IN_LIST,
+};
+
+static void test_linked_list_success(int mode, bool leave_in_map)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct linked_list *skel;
+ int ret;
+
+ skel = linked_list__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
+ return;
+
+ if (mode == LIST_IN_LIST)
+ goto lil;
+ if (mode == PUSH_POP_MULT)
+ goto ppm;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
+ ASSERT_OK(ret, "map_list_push_pop");
+ ASSERT_OK(opts.retval, "map_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
+ ASSERT_OK(ret, "inner_map_list_push_pop");
+ ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
+ ASSERT_OK(ret, "global_list_push_pop");
+ ASSERT_OK(opts.retval, "global_list_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
+ ASSERT_OK(ret, "global_list_push_pop_nested");
+ ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
+ ASSERT_OK(ret, "global_list_array_push_pop");
+ ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ if (mode == PUSH_POP)
+ goto end;
+
+ppm:
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "map_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
+ ASSERT_OK(ret, "global_list_push_pop_multiple");
+ ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+
+ if (mode == PUSH_POP_MULT)
+ goto end;
+
+lil:
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
+ ASSERT_OK(ret, "map_list_in_list");
+ ASSERT_OK(opts.retval, "map_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.array_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
+ ASSERT_OK(ret, "inner_map_list_in_list");
+ ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.inner_map);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
+ ASSERT_OK(ret, "global_list_in_list");
+ ASSERT_OK(opts.retval, "global_list_in_list retval");
+ if (!leave_in_map)
+ clear_fields(skel->maps.bss_A);
+end:
+ linked_list__destroy(skel);
+}
+
+#define SPIN_LOCK 2
+#define LIST_HEAD 3
+#define LIST_NODE 4
+
+static struct btf *init_btf(void)
+{
+ int id, lid, hid, nid;
+ struct btf *btf;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
+ return NULL;
+ id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
+ if (!ASSERT_EQ(id, 1, "btf__add_int"))
+ goto end;
+ lid = btf__add_struct(btf, "bpf_spin_lock", 4);
+ if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
+ goto end;
+ hid = btf__add_struct(btf, "bpf_list_head", 16);
+ if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
+ goto end;
+ nid = btf__add_struct(btf, "bpf_list_node", 24);
+ if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
+ goto end;
+ return btf;
+end:
+ btf__free(btf);
+ return NULL;
+}
+
+static void list_and_rb_node_same_struct(bool refcount_field)
+{
+ int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
+ struct btf *btf;
+ int id, err;
+
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ return;
+
+ bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
+ if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
+ return;
+
+ if (refcount_field) {
+ bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
+ if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
+ return;
+ }
+
+ id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
+ if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
+ return;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ return;
+ err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ return;
+ if (refcount_field) {
+ err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::ref"))
+ return;
+ }
+
+ foo_btf_id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
+ return;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ return;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ return;
+ id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
+ if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
+ return;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
+ btf__free(btf);
+}
+
+static void test_btf(void)
+{
+ struct btf *btf = NULL;
+ int id, err;
+
+ while (test__start_subtest("btf: too many locks")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -E2BIG, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing lock")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 16);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_struct foo::a"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 16);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: bad offset")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 36);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EEXIST, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing contains:")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing struct")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ENOENT, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: missing node")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 24);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ btf__free(btf);
+ ASSERT_EQ(err, -ENOENT, "check btf");
+ break;
+ }
+
+ while (test__start_subtest("btf: node incorrect type")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
+ break;
+ id = btf__add_struct(btf, "bar", 4);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 52);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::d"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -EINVAL, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned AA cycle")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 44);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned ABA cycle")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 44);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 44);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 28);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
+ break;
+ id = btf__add_struct(btf, "bar", 24);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, 0, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 28);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 44);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 24);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz:a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, 0, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 44);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 44);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar:c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
+ break;
+ id = btf__add_struct(btf, "baz", 24);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field baz:a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
+ btf = init_btf();
+ if (!ASSERT_OK_PTR(btf, "init_btf"))
+ break;
+ id = btf__add_struct(btf, "foo", 20);
+ if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::a"))
+ break;
+ err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field foo::b"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
+ if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
+ break;
+ id = btf__add_struct(btf, "bar", 44);
+ if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
+ if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
+ break;
+ id = btf__add_struct(btf, "baz", 44);
+ if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
+ break;
+ err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::a"))
+ break;
+ err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::b"))
+ break;
+ err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
+ if (!ASSERT_OK(err, "btf__add_field bar::c"))
+ break;
+ id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
+ if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
+ break;
+ id = btf__add_struct(btf, "bam", 24);
+ if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
+ break;
+ err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
+ if (!ASSERT_OK(err, "btf__add_field bam::a"))
+ break;
+
+ err = btf__load_into_kernel(btf);
+ ASSERT_EQ(err, -ELOOP, "check btf");
+ btf__free(btf);
+ break;
+ }
+
+ while (test__start_subtest("btf: list_node and rb_node in same struct")) {
+ list_and_rb_node_same_struct(true);
+ break;
+ }
+
+ while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
+ list_and_rb_node_same_struct(false);
+ break;
+ }
+}
+
+void test_linked_list(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
+ if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
+ continue;
+ test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
+ linked_list_fail_tests[i].err_msg);
+ }
+ test_btf();
+ test_linked_list_success(PUSH_POP, false);
+ test_linked_list_success(PUSH_POP, true);
+ test_linked_list_success(PUSH_POP_MULT, false);
+ test_linked_list_success(PUSH_POP_MULT, true);
+ test_linked_list_success(LIST_IN_LIST, false);
+ test_linked_list_success(LIST_IN_LIST, true);
+ test_linked_list_success(TEST_ALL, false);
+}
+
+void test_linked_list_peek(void)
+{
+ RUN_TESTS(linked_list_peek);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
new file mode 100644
index 000000000000..85dcaaaf2775
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_maps.skel.h"
+
+void test_linked_maps(void)
+{
+ int err;
+ struct linked_maps *skel;
+
+ skel = linked_maps__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = linked_maps__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1");
+ ASSERT_EQ(skel->bss->output_second1, 2, "output_second1");
+ ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1");
+
+cleanup:
+ linked_maps__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
new file mode 100644
index 000000000000..267166abe4c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "linked_vars.skel.h"
+
+void test_linked_vars(void)
+{
+ int err;
+ struct linked_vars *skel;
+
+ skel = linked_vars__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->input_bss1 = 1000;
+ skel->bss->input_bss2 = 2000;
+ skel->bss->input_bss_weak = 3000;
+
+ err = linked_vars__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = linked_vars__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1");
+ ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2");
+ /* 10 comes from "winner" input_data_weak in first obj file */
+ ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1");
+ ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2");
+ /* 100 comes from "winner" input_rodata_weak in first obj file */
+ ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1");
+ ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2");
+
+cleanup:
+ linked_vars__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
new file mode 100644
index 000000000000..72aa5376c30e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "testing_helpers.h"
+#include "livepatch_trampoline.skel.h"
+
+static int load_livepatch(void)
+{
+ char path[4096];
+
+ /* CI will set KBUILD_OUTPUT */
+ snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko",
+ getenv("KBUILD_OUTPUT") ? : "../../../..");
+
+ return load_module(path, env_verbosity > VERBOSE_NONE);
+}
+
+static void unload_livepatch(void)
+{
+ /* Disable the livepatch before unloading the module */
+ system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled");
+
+ unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE);
+}
+
+static void read_proc_cmdline(void)
+{
+ char buf[4096];
+ int fd, ret;
+
+ fd = open("/proc/cmdline", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "open /proc/cmdline"))
+ return;
+
+ ret = read(fd, buf, sizeof(buf));
+ if (!ASSERT_GT(ret, 0, "read /proc/cmdline"))
+ goto out;
+
+ ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp");
+
+out:
+ close(fd);
+}
+
+static void __test_livepatch_trampoline(bool fexit_first)
+{
+ struct livepatch_trampoline *skel = NULL;
+ int err;
+
+ skel = livepatch_trampoline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ skel->bss->my_pid = getpid();
+
+ if (!fexit_first) {
+ /* fentry program is loaded first by default */
+ err = livepatch_trampoline__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+ } else {
+ /* Manually load fexit program first. */
+ skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit"))
+ goto out;
+
+ skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry"))
+ goto out;
+ }
+
+ read_proc_cmdline();
+
+ ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit");
+ ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit");
+out:
+ livepatch_trampoline__destroy(skel);
+}
+
+void test_livepatch_trampoline(void)
+{
+ int retry_cnt = 0;
+
+retry:
+ if (load_livepatch()) {
+ if (retry_cnt) {
+ ASSERT_OK(1, "load_livepatch");
+ goto out;
+ }
+ /*
+ * Something else (previous run of the same test?) loaded
+ * the KLP module. Unload the KLP module and retry.
+ */
+ unload_livepatch();
+ retry_cnt++;
+ goto retry;
+ }
+
+ if (test__start_subtest("fentry_first"))
+ __test_livepatch_trampoline(false);
+
+ if (test__start_subtest("fexit_first"))
+ __test_livepatch_trampoline(true);
+out:
+ unload_livepatch();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
index 5a2a689dbb68..581c0eb0a0a1 100644
--- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
+++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
@@ -27,8 +27,8 @@ void test_load_bytes_relative(void)
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
- err = bpf_prog_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB,
- &obj, &prog_fd);
+ err = bpf_prog_test_load("./load_bytes_relative.bpf.o", BPF_PROG_TYPE_CGROUP_SKB,
+ &obj, &prog_fd);
if (CHECK_FAIL(err))
goto close_server_fd;
diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
new file mode 100644
index 000000000000..827e713f6cf1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "local_kptr_stash.skel.h"
+#include "local_kptr_stash_fail.skel.h"
+static void test_local_kptr_stash_simple(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
+ ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
+static void test_local_kptr_stash_plain(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_plain run");
+ ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
+static void test_local_kptr_stash_local_with_root(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_local_with_root), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_local_with_root run");
+ ASSERT_OK(opts.retval, "local_kptr_stash_add_local_with_root retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
+static void test_local_kptr_stash_unstash(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
+ ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.unstash_rb_node), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_nodes run");
+ ASSERT_EQ(opts.retval, 42, "local_kptr_stash_add_nodes retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
+static void test_refcount_acquire_without_unstash(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+ &opts);
+ ASSERT_OK(ret, "refcount_acquire_without_unstash run");
+ ASSERT_EQ(opts.retval, 2, "refcount_acquire_without_unstash retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_refcounted_node), &opts);
+ ASSERT_OK(ret, "stash_refcounted_node run");
+ ASSERT_OK(opts.retval, "stash_refcounted_node retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
+ &opts);
+ ASSERT_OK(ret, "refcount_acquire_without_unstash (2) run");
+ ASSERT_EQ(opts.retval, 42, "refcount_acquire_without_unstash (2) retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
+static void test_local_kptr_stash_fail(void)
+{
+ RUN_TESTS(local_kptr_stash_fail);
+}
+
+void test_local_kptr_stash(void)
+{
+ if (test__start_subtest("local_kptr_stash_simple"))
+ test_local_kptr_stash_simple();
+ if (test__start_subtest("local_kptr_stash_plain"))
+ test_local_kptr_stash_plain();
+ if (test__start_subtest("local_kptr_stash_local_with_root"))
+ test_local_kptr_stash_local_with_root();
+ if (test__start_subtest("local_kptr_stash_unstash"))
+ test_local_kptr_stash_unstash();
+ if (test__start_subtest("refcount_acquire_without_unstash"))
+ test_refcount_acquire_without_unstash();
+ if (test__start_subtest("local_kptr_stash_fail"))
+ test_local_kptr_stash_fail();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
new file mode 100644
index 000000000000..d6f14a232002
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_buf.skel.h"
+#include "bpf_util.h"
+
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+static size_t libbpf_log_pos;
+static char libbpf_log_buf[1024 * 1024];
+static bool libbpf_log_error;
+
+static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
+{
+ int emitted_cnt;
+ size_t left_cnt;
+
+ left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
+ emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
+
+ if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
+ libbpf_log_error = true;
+ return 0;
+ }
+
+ libbpf_log_pos += emitted_cnt;
+ return 0;
+}
+
+static void obj_load_log_buf(void)
+{
+ libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ struct test_log_buf* skel;
+ char *obj_log_buf, *good_log_buf, *bad_log_buf;
+ int err;
+
+ obj_log_buf = malloc(3 * log_buf_sz);
+ if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
+ return;
+
+ good_log_buf = obj_log_buf + log_buf_sz;
+ bad_log_buf = obj_log_buf + 2 * log_buf_sz;
+ obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
+
+ /* In the first round every prog has its own log_buf, so libbpf logs
+ * don't have program failure logs
+ */
+ skel = test_log_buf__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ /* set very verbose level for good_prog so we always get detailed logs */
+ bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
+ bpf_program__set_log_level(skel->progs.good_prog, 2);
+
+ bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
+ /* log_level 0 with custom log_buf means that verbose logs are not
+ * requested if program load is successful, but libbpf should retry
+ * with log_level 1 on error and put program's verbose load log into
+ * custom log_buf
+ */
+ bpf_program__set_log_level(skel->progs.bad_prog, 0);
+
+ err = test_log_buf__load(skel);
+ if (!ASSERT_ERR(err, "unexpected_load_success"))
+ goto cleanup;
+
+ ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
+
+ /* there should be no prog loading log because we specified per-prog log buf */
+ ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
+ ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
+ "libbpf_log_not_empty");
+ ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
+ ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
+ "good_log_verbose");
+ ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
+ "bad_log_not_empty");
+
+ if (env.verbosity > VERBOSE_NONE) {
+ printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
+ printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
+ printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
+ printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
+ }
+
+ /* reset everything */
+ test_log_buf__destroy(skel);
+ obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
+ libbpf_log_buf[0] = '\0';
+ libbpf_log_pos = 0;
+ libbpf_log_error = false;
+
+ /* In the second round we let bad_prog's failure be logged through print callback */
+ opts.kernel_log_buf = NULL; /* let everything through into print callback */
+ opts.kernel_log_size = 0;
+ opts.kernel_log_level = 1;
+
+ skel = test_log_buf__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ /* set normal verbose level for good_prog to check log_level is taken into account */
+ bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
+ bpf_program__set_log_level(skel->progs.good_prog, 1);
+
+ err = test_log_buf__load(skel);
+ if (!ASSERT_ERR(err, "unexpected_load_success"))
+ goto cleanup;
+
+ ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
+
+ /* this time prog loading error should be logged through print callback */
+ ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
+ "libbpf_log_correct");
+ ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
+ ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "good_log_ok");
+ ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
+
+ if (env.verbosity > VERBOSE_NONE) {
+ printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
+ printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
+ printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
+ printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
+ }
+
+cleanup:
+ free(obj_log_buf);
+ test_log_buf__destroy(skel);
+ libbpf_set_print(old_print_cb);
+}
+
+static void bpf_prog_load_log_buf(void)
+{
+ const struct bpf_insn good_prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t good_prog_insn_cnt = ARRAY_SIZE(good_prog_insns);
+ const struct bpf_insn bad_prog_insns[] = {
+ BPF_EXIT_INSN(),
+ };
+ size_t bad_prog_insn_cnt = ARRAY_SIZE(bad_prog_insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ char *log_buf;
+ int fd = -1;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ return;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ /* with log_level == 0 log_buf should stay empty for good prog */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
+ good_prog_insns, good_prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "good_log_0");
+ ASSERT_GE(fd, 0, "good_fd1");
+ if (fd >= 0)
+ close(fd);
+
+ /* log_level == 2 should always fill log_buf, even for good prog */
+ log_buf[0] = '\0';
+ opts.log_level = 2;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
+ good_prog_insns, good_prog_insn_cnt, &opts);
+ ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
+ ASSERT_GE(fd, 0, "good_fd2");
+ if (fd >= 0)
+ close(fd);
+
+ /* log_level == 0 should fill log_buf for bad prog */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
+ bad_prog_insns, bad_prog_insn_cnt, &opts);
+ ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
+ ASSERT_LT(fd, 0, "bad_fd");
+ if (fd >= 0)
+ close(fd);
+
+ free(log_buf);
+}
+
+static void bpf_btf_load_log_buf(void)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ const void *raw_btf_data;
+ __u32 raw_btf_size;
+ struct btf *btf;
+ char *log_buf = NULL;
+ int fd = -1;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
+ goto cleanup;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ goto cleanup;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ /* with log_level == 0 log_buf should stay empty for good BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ ASSERT_STREQ(log_buf, "", "good_log_0");
+ ASSERT_GE(fd, 0, "good_fd1");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* log_level == 2 should always fill log_buf, even for good BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 2;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ printf("LOG_BUF: %s\n", log_buf);
+ ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
+ ASSERT_GE(fd, 0, "good_fd2");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* make BTF bad, add pointer pointing to non-existing type */
+ ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
+ goto cleanup;
+
+ /* log_level == 0 should fill log_buf for bad BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ printf("LOG_BUF: %s\n", log_buf);
+ ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
+ ASSERT_LT(fd, 0, "bad_fd");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+cleanup:
+ free(log_buf);
+ btf__free(btf);
+}
+
+void test_log_buf(void)
+{
+ if (test__start_subtest("obj_load_log_buf"))
+ obj_load_log_buf();
+ if (test__start_subtest("bpf_prog_load_log_buf"))
+ bpf_prog_load_log_buf();
+ if (test__start_subtest("bpf_btf_load_log_buf"))
+ bpf_btf_load_log_buf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
new file mode 100644
index 000000000000..90a98e23be61
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_fixup.skel.h"
+
+enum trunc_type {
+ TRUNC_NONE,
+ TRUNC_PARTIAL,
+ TRUNC_FULL,
+};
+
+static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo, true);
+ memset(log_buf, 0, sizeof(log_buf));
+ bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
+ bpf_program__set_log_level(skel->progs.bad_relo, 1 | 8); /* BPF_LOG_FIXED to force truncation */
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "0: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_sz> ",
+ "log_buf_part1");
+
+ switch (trunc_type) {
+ case TRUNC_NONE:
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "log_buf_end");
+ break;
+ case TRUNC_PARTIAL:
+ /* we should get full libbpf message patch */
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ case TRUNC_FULL:
+ /* we shouldn't get second part of libbpf message patch */
+ ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"),
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ }
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void bad_core_relo_subprog(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo_subprog, true);
+ bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ ": <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_off> ",
+ "log_buf");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void missing_map(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_map__set_autocreate(skel->maps.missing_map, false);
+
+ bpf_program__set_autoload(skel->progs.use_missing_map, true);
+ bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate");
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ ": <invalid BPF map reference>\n"
+ "BPF map 'missing_map' is referenced but wasn't created\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void missing_kfunc(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.use_missing_kfunc, true);
+ bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "0: <invalid kfunc call>\n"
+ "kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+void test_log_fixup(void)
+{
+ if (test__start_subtest("bad_core_relo_trunc_none"))
+ bad_core_relo(0, TRUNC_NONE /* full buf */);
+ if (test__start_subtest("bad_core_relo_trunc_partial"))
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
+ if (test__start_subtest("bad_core_relo_trunc_full"))
+ bad_core_relo(240, TRUNC_FULL /* truncate also libbpf's message patch */);
+ if (test__start_subtest("bad_core_relo_subprog"))
+ bad_core_relo_subprog();
+ if (test__start_subtest("missing_map"))
+ missing_map();
+ if (test__start_subtest("missing_kfunc"))
+ missing_kfunc();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
new file mode 100644
index 000000000000..a767bb4a271c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+#include "test_lookup_and_delete.skel.h"
+
+#define START_VALUE 1234
+#define NEW_VALUE 4321
+#define MAX_ENTRIES 2
+
+static int duration;
+static int nr_cpus;
+
+static int fill_values(int map_fd)
+{
+ __u64 key, value = START_VALUE;
+ int err;
+
+ for (key = 1; key < MAX_ENTRIES + 1; key++) {
+ err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int fill_values_percpu(int map_fd)
+{
+ __u64 key, value[nr_cpus];
+ int i, err;
+
+ for (i = 0; i < nr_cpus; i++)
+ value[i] = START_VALUE;
+
+ for (key = 1; key < MAX_ENTRIES + 1; key++) {
+ err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
+ int *map_fd)
+{
+ struct test_lookup_and_delete *skel;
+ int err;
+
+ skel = test_lookup_and_delete__open();
+ if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
+ return NULL;
+
+ err = bpf_map__set_type(skel->maps.hash_map, map_type);
+ if (!ASSERT_OK(err, "bpf_map__set_type"))
+ goto cleanup;
+
+ err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
+ if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
+ goto cleanup;
+
+ err = test_lookup_and_delete__load(skel);
+ if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
+ goto cleanup;
+
+ *map_fd = bpf_map__fd(skel->maps.hash_map);
+ if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+ return NULL;
+}
+
+/* Triggers BPF program that updates map with given key and value */
+static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
+ __u64 value)
+{
+ int err;
+
+ skel->bss->set_pid = getpid();
+ skel->bss->set_key = key;
+ skel->bss->set_value = value;
+
+ err = test_lookup_and_delete__attach(skel);
+ if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
+ return -1;
+
+ syscall(__NR_getpgid);
+
+ test_lookup_and_delete__detach(skel);
+
+ return 0;
+}
+
+static void test_lookup_and_delete_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, value;
+ int map_fd, err;
+
+ /* Setup program and fill the map. */
+ skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values(map_fd);
+ if (!ASSERT_OK(err, "fill_values"))
+ goto cleanup;
+
+ /* Lookup and delete element. */
+ key = 1;
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ /* Fetched value should match the initially set value. */
+ if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
+ "unexpected value=%lld\n", value))
+ goto cleanup;
+
+ /* Check that the entry is non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_percpu_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, val, value[nr_cpus];
+ int map_fd, err, i;
+
+ /* Setup program and fill the map. */
+ skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values_percpu(map_fd);
+ if (!ASSERT_OK(err, "fill_values_percpu"))
+ goto cleanup;
+
+ /* Lookup and delete element. */
+ key = 1;
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ for (i = 0; i < nr_cpus; i++) {
+ val = value[i];
+
+ /* Fetched value should match the initially set value. */
+ if (CHECK(val != START_VALUE, "map value",
+ "unexpected for cpu %d: %lld\n", i, val))
+ goto cleanup;
+ }
+
+ /* Check that the entry is non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_lru_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, value;
+ int map_fd, err;
+
+ /* Setup program and fill the LRU map. */
+ skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values(map_fd);
+ if (!ASSERT_OK(err, "fill_values"))
+ goto cleanup;
+
+ /* Insert new element at key=3, should reuse LRU element. */
+ key = 3;
+ err = trigger_tp(skel, key, NEW_VALUE);
+ if (!ASSERT_OK(err, "trigger_tp"))
+ goto cleanup;
+
+ /* Lookup and delete element 3. */
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ /* Value should match the new value. */
+ if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
+ "unexpected value=%lld\n", value))
+ goto cleanup;
+
+ /* Check that entries 3 and 1 are non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ key = 1;
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+static void test_lookup_and_delete_lru_percpu_hash(void)
+{
+ struct test_lookup_and_delete *skel;
+ __u64 key, val, value[nr_cpus];
+ int map_fd, err, i, cpucnt = 0;
+
+ /* Setup program and fill the LRU map. */
+ skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
+ if (!ASSERT_OK_PTR(skel, "setup_prog"))
+ return;
+
+ err = fill_values_percpu(map_fd);
+ if (!ASSERT_OK(err, "fill_values_percpu"))
+ goto cleanup;
+
+ /* Insert new element at key=3, should reuse LRU element 1. */
+ key = 3;
+ err = trigger_tp(skel, key, NEW_VALUE);
+ if (!ASSERT_OK(err, "trigger_tp"))
+ goto cleanup;
+
+ /* Clean value. */
+ for (i = 0; i < nr_cpus; i++)
+ value[i] = 0;
+
+ /* Lookup and delete element 3. */
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
+ goto cleanup;
+
+ /* Check if only one CPU has set the value. */
+ for (i = 0; i < nr_cpus; i++) {
+ val = value[i];
+ if (val) {
+ if (CHECK(val != NEW_VALUE, "map value",
+ "unexpected for cpu %d: %lld\n", i, val))
+ goto cleanup;
+ cpucnt++;
+ }
+ }
+ if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
+ cpucnt))
+ goto cleanup;
+
+ /* Check that entries 3 and 1 are non existent. */
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ key = 1;
+ err = bpf_map_lookup_elem(map_fd, &key, &value);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+cleanup:
+ test_lookup_and_delete__destroy(skel);
+}
+
+void test_lookup_and_delete(void)
+{
+ nr_cpus = bpf_num_possible_cpus();
+
+ if (test__start_subtest("lookup_and_delete"))
+ test_lookup_and_delete_hash();
+ if (test__start_subtest("lookup_and_delete_percpu"))
+ test_lookup_and_delete_percpu_hash();
+ if (test__start_subtest("lookup_and_delete_lru"))
+ test_lookup_and_delete_lru_hash();
+ if (test__start_subtest("lookup_and_delete_lru_percpu"))
+ test_lookup_and_delete_lru_percpu_hash();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_key.c b/tools/testing/selftests/bpf/prog_tests/lookup_key.c
new file mode 100644
index 000000000000..68025e88f352
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lookup_key.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <linux/keyctl.h>
+#include <test_progs.h>
+
+#include "test_lookup_key.skel.h"
+
+#define KEY_LOOKUP_CREATE 0x01
+#define KEY_LOOKUP_PARTIAL 0x02
+
+static bool kfunc_not_supported;
+
+static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
+ va_list args)
+{
+ char *func;
+
+ if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
+ return 0;
+
+ func = va_arg(args, char *);
+
+ if (strcmp(func, "bpf_lookup_user_key") && strcmp(func, "bpf_key_put") &&
+ strcmp(func, "bpf_lookup_system_key"))
+ return 0;
+
+ kfunc_not_supported = true;
+ return 0;
+}
+
+void test_lookup_key(void)
+{
+ libbpf_print_fn_t old_print_cb;
+ struct test_lookup_key *skel;
+ __u32 next_id;
+ int ret;
+
+ skel = test_lookup_key__open();
+ if (!ASSERT_OK_PTR(skel, "test_lookup_key__open"))
+ return;
+
+ old_print_cb = libbpf_set_print(libbpf_print_cb);
+ ret = test_lookup_key__load(skel);
+ libbpf_set_print(old_print_cb);
+
+ if (ret < 0 && kfunc_not_supported) {
+ printf("%s:SKIP:bpf_lookup_*_key(), bpf_key_put() kfuncs not supported\n",
+ __func__);
+ test__skip();
+ goto close_prog;
+ }
+
+ if (!ASSERT_OK(ret, "test_lookup_key__load"))
+ goto close_prog;
+
+ ret = test_lookup_key__attach(skel);
+ if (!ASSERT_OK(ret, "test_lookup_key__attach"))
+ goto close_prog;
+
+ skel->bss->monitored_pid = getpid();
+ skel->bss->key_serial = KEY_SPEC_THREAD_KEYRING;
+
+ /* The thread-specific keyring does not exist, this test fails. */
+ skel->bss->flags = 0;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id"))
+ goto close_prog;
+
+ /* Force creation of the thread-specific keyring, this test succeeds. */
+ skel->bss->flags = KEY_LOOKUP_CREATE;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
+ goto close_prog;
+
+ /* Pass both lookup flags for parameter validation. */
+ skel->bss->flags = KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
+ goto close_prog;
+
+ /* Pass invalid flags. */
+ skel->bss->flags = UINT64_MAX;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id"))
+ goto close_prog;
+
+ skel->bss->key_serial = 0;
+ skel->bss->key_id = 1;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ if (!ASSERT_OK(ret, "bpf_prog_get_next_id"))
+ goto close_prog;
+
+ skel->bss->key_id = UINT32_MAX;
+
+ ret = bpf_prog_get_next_id(0, &next_id);
+ ASSERT_LT(ret, 0, "bpf_prog_get_next_id");
+
+close_prog:
+ skel->bss->monitored_pid = 0;
+ test_lookup_key__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lru_bug.c b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
new file mode 100644
index 000000000000..3c7822390827
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "lru_bug.skel.h"
+
+void test_lru_bug(void)
+{
+ struct lru_bug *skel;
+ int ret;
+
+ skel = lru_bug__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lru_bug__open_and_load"))
+ return;
+ ret = lru_bug__attach(skel);
+ if (!ASSERT_OK(ret, "lru_bug__attach"))
+ goto end;
+ usleep(1);
+ ASSERT_OK(skel->data->result, "prealloc_lru_pop doesn't call check_and_init_map_value");
+end:
+ lru_bug__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
new file mode 100644
index 000000000000..6df25de8f080
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "lsm_cgroup.skel.h"
+#include "lsm_cgroup_nonvoid.skel.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static struct btf *btf;
+
+static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ int cnt = 0;
+ int i;
+
+ ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
+
+ if (!attach_func)
+ return p.prog_cnt;
+
+ /* When attach_func is provided, count the number of progs that
+ * attach to the given symbol.
+ */
+
+ if (!btf)
+ btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
+ return -1;
+
+ p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
+ p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
+ ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
+
+ for (i = 0; i < p.prog_cnt; i++) {
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
+ ASSERT_GE(fd, 0, "prog_get_fd_by_id");
+ ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
+ "prog_info_by_fd");
+ close(fd);
+
+ if (info.attach_btf_id ==
+ btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
+ cnt++;
+ }
+
+ free(p.prog_ids);
+ free(p.prog_attach_flags);
+
+ return cnt;
+}
+
+static void test_lsm_cgroup_functional(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+ int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
+ int listen_fd, client_fd, accepted_fd;
+ struct lsm_cgroup *skel = NULL;
+ int post_create_prog_fd2 = -1;
+ int post_create_prog_fd = -1;
+ int bind_link_fd2 = -1;
+ int bind_prog_fd2 = -1;
+ int alloc_prog_fd = -1;
+ int bind_prog_fd = -1;
+ int bind_link_fd = -1;
+ int clone_prog_fd = -1;
+ int err, fd, prio;
+ socklen_t socklen;
+
+ cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
+ if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
+ goto close_cgroup;
+
+ cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
+ if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
+ goto close_cgroup;
+
+ cgroup_fd = test__join_cgroup("/sock_policy");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto close_cgroup;
+
+ skel = lsm_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto close_cgroup;
+
+ post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
+ post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
+ bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
+ bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
+ alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
+ clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
+ err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
+ if (err == -ENOTSUPP) {
+ test__skip();
+ goto close_cgroup;
+ }
+ if (!ASSERT_OK(err, "attach alloc_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
+ err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
+ if (!ASSERT_OK(err, "attach clone_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
+
+ /* Make sure replacing works. */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
+ err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP, 0);
+ if (!ASSERT_OK(err, "attach post_create_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
+
+ attach_opts.replace_prog_fd = post_create_prog_fd;
+ err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
+ BPF_LSM_CGROUP, &attach_opts);
+ if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
+
+ /* Try the same attach/replace via link API. */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
+ bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP, NULL);
+ if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+
+ update_opts.old_prog_fd = bind_prog_fd;
+ update_opts.flags = BPF_F_REPLACE;
+
+ err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
+ if (!ASSERT_OK(err, "link update bind_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+
+ /* Attach another instance of bind program to another cgroup.
+ * This should trigger the reuse of the trampoline shim (two
+ * programs attaching to the same btf_id).
+ */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
+ bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
+ BPF_LSM_CGROUP, NULL);
+ if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
+ || skel->kconfig->CONFIG_SECURITY_SELINUX
+ || skel->kconfig->CONFIG_SECURITY_SMACK))
+ /* AF_UNIX is prohibited. */
+ ASSERT_LT(fd, 0, "socket(AF_UNIX)");
+ close(fd);
+
+ /* AF_INET6 gets default policy (sk_priority). */
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
+ goto detach_cgroup;
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 123, "sk_priority");
+
+ close(fd);
+
+ /* TX-only AF_PACKET is allowed. */
+
+ ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
+ "socket(AF_PACKET, ..., ETH_P_ALL)");
+
+ fd = socket(AF_PACKET, SOCK_RAW, 0);
+ ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
+
+ /* TX-only AF_PACKET can not be rebound. */
+
+ struct sockaddr_ll sa = {
+ .sll_family = AF_PACKET,
+ .sll_protocol = htons(ETH_P_ALL),
+ };
+ ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
+ "bind(ETH_P_ALL)");
+
+ close(fd);
+
+ /* Trigger passive open. */
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ ASSERT_GE(listen_fd, 0, "start_server");
+ client_fd = connect_to_fd(listen_fd, 0);
+ ASSERT_GE(client_fd, 0, "connect_to_fd");
+ accepted_fd = accept(listen_fd, NULL, NULL);
+ ASSERT_GE(accepted_fd, 0, "accept");
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 234, "sk_priority");
+
+ /* These are replaced and never called. */
+ ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
+ ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
+
+ /* AF_INET6+SOCK_STREAM
+ * AF_PACKET+SOCK_RAW
+ * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
+ * listen_fd
+ * client_fd
+ * accepted_fd
+ */
+ if (skel->kconfig->CONFIG_SECURITY_APPARMOR
+ || skel->kconfig->CONFIG_SECURITY_SELINUX
+ || skel->kconfig->CONFIG_SECURITY_SMACK)
+ /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
+ ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
+ else
+ ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
+
+ /* start_server
+ * bind(ETH_P_ALL)
+ */
+ ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
+ /* Single accept(). */
+ ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
+
+ /* AF_UNIX+SOCK_STREAM (failed)
+ * AF_INET6+SOCK_STREAM
+ * AF_PACKET+SOCK_RAW (failed)
+ * AF_PACKET+SOCK_RAW
+ * listen_fd
+ * client_fd
+ * accepted_fd
+ */
+ ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
+
+ close(listen_fd);
+ close(client_fd);
+ close(accepted_fd);
+
+ /* Make sure other cgroup doesn't trigger the programs. */
+
+ if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
+ goto detach_cgroup;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
+ goto detach_cgroup;
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 0, "sk_priority");
+
+ close(fd);
+
+detach_cgroup:
+ ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_create");
+ close(bind_link_fd);
+ /* Don't close bind_link_fd2, exercise cgroup release cleanup. */
+ ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_alloc");
+ ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_clone");
+
+close_cgroup:
+ close(cgroup_fd);
+ close(cgroup_fd2);
+ close(cgroup_fd3);
+ lsm_cgroup__destroy(skel);
+}
+
+static void test_lsm_cgroup_nonvoid(void)
+{
+ struct lsm_cgroup_nonvoid *skel = NULL;
+
+ skel = lsm_cgroup_nonvoid__open_and_load();
+ ASSERT_NULL(skel, "open succeeds");
+ lsm_cgroup_nonvoid__destroy(skel);
+}
+
+void test_lsm_cgroup(void)
+{
+ if (test__start_subtest("functional"))
+ test_lsm_cgroup_functional();
+ if (test__start_subtest("nonvoid"))
+ test_lsm_cgroup_nonvoid();
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
new file mode 100644
index 000000000000..ccec0fcdabc1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LWT_HELPERS_H
+#define __LWT_HELPERS_H
+
+#include <time.h>
+#include <net/if.h>
+#include <linux/icmp.h>
+
+#include "test_progs.h"
+
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+#define RUN_TEST(name) \
+ ({ \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_create(), "netns_create")) { \
+ struct nstoken *token = open_netns(NETNS); \
+ if (ASSERT_OK_PTR(token, "setns")) { \
+ test_ ## name(); \
+ close_netns(token); \
+ } \
+ netns_delete(); \
+ } \
+ })
+
+static inline int netns_create(void)
+{
+ return system("ip netns add " NETNS);
+}
+
+static inline int netns_delete(void)
+{
+ return system("ip netns del " NETNS ">/dev/null 2>&1");
+}
+
+#define ICMP_PAYLOAD_SIZE 100
+
+/* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */
+static int __expect_icmp_ipv4(char *buf, ssize_t len)
+{
+ struct iphdr *ip = (struct iphdr *)buf;
+ struct icmphdr *icmp = (struct icmphdr *)(ip + 1);
+ ssize_t min_header_len = sizeof(*ip) + sizeof(*icmp);
+
+ if (len < min_header_len)
+ return -1;
+
+ if (ip->protocol != IPPROTO_ICMP)
+ return -1;
+
+ if (icmp->type != ICMP_ECHO)
+ return -1;
+
+ return len == ICMP_PAYLOAD_SIZE + min_header_len;
+}
+
+typedef int (*filter_t) (char *, ssize_t);
+
+/* wait_for_packet - wait for a packet that matches the filter
+ *
+ * @fd: tun fd/packet socket to read packet
+ * @filter: filter function, returning 1 if matches
+ * @timeout: timeout to wait for the packet
+ *
+ * Returns 1 if a matching packet is read, 0 if timeout expired, -1 on error.
+ */
+static int wait_for_packet(int fd, filter_t filter, struct timeval *timeout)
+{
+ char buf[4096];
+ int max_retry = 5; /* in case we read some spurious packets */
+ fd_set fds;
+
+ FD_ZERO(&fds);
+ while (max_retry--) {
+ /* Linux modifies timeout arg... So make a copy */
+ struct timeval copied_timeout = *timeout;
+ ssize_t ret = -1;
+
+ FD_SET(fd, &fds);
+
+ ret = select(1 + fd, &fds, NULL, NULL, &copied_timeout);
+ if (ret <= 0) {
+ if (errno == EINTR)
+ continue;
+ else if (errno == EAGAIN || ret == 0)
+ return 0;
+
+ log_err("select failed");
+ return -1;
+ }
+
+ ret = read(fd, buf, sizeof(buf));
+
+ if (ret <= 0) {
+ log_err("read(dev): %ld", ret);
+ return -1;
+ }
+
+ if (filter && filter(buf, ret) > 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __LWT_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c b/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c
new file mode 100644
index 000000000000..b6391af5f6f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_ip_encap.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <netinet/in.h>
+
+#include "network_helpers.h"
+#include "test_progs.h"
+
+#define BPF_FILE "test_lwt_ip_encap.bpf.o"
+
+#define NETNS_NAME_SIZE 32
+#define NETNS_BASE "ns-lwt-ip-encap"
+
+#define IP4_ADDR_1 "172.16.1.100"
+#define IP4_ADDR_2 "172.16.2.100"
+#define IP4_ADDR_3 "172.16.3.100"
+#define IP4_ADDR_4 "172.16.4.100"
+#define IP4_ADDR_5 "172.16.5.100"
+#define IP4_ADDR_6 "172.16.6.100"
+#define IP4_ADDR_7 "172.16.7.100"
+#define IP4_ADDR_8 "172.16.8.100"
+#define IP4_ADDR_GRE "172.16.16.100"
+
+#define IP4_ADDR_SRC IP4_ADDR_1
+#define IP4_ADDR_DST IP4_ADDR_4
+
+#define IP6_ADDR_1 "fb01::1"
+#define IP6_ADDR_2 "fb02::1"
+#define IP6_ADDR_3 "fb03::1"
+#define IP6_ADDR_4 "fb04::1"
+#define IP6_ADDR_5 "fb05::1"
+#define IP6_ADDR_6 "fb06::1"
+#define IP6_ADDR_7 "fb07::1"
+#define IP6_ADDR_8 "fb08::1"
+#define IP6_ADDR_GRE "fb10::1"
+
+#define IP6_ADDR_SRC IP6_ADDR_1
+#define IP6_ADDR_DST IP6_ADDR_4
+
+/* Setup/topology:
+ *
+ * NS1 NS2 NS3
+ * veth1 <---> veth2 veth3 <---> veth4 (the top route)
+ * veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
+ *
+ * Each vethN gets IP[4|6]_ADDR_N address.
+ *
+ * IP*_ADDR_SRC = IP*_ADDR_1
+ * IP*_ADDR_DST = IP*_ADDR_4
+ *
+ * All tests test pings from IP*_ADDR__SRC to IP*_ADDR_DST.
+ *
+ * By default, routes are configured to allow packets to go
+ * IP*_ADDR_1 <=> IP*_ADDR_2 <=> IP*_ADDR_3 <=> IP*_ADDR_4 (the top route).
+ *
+ * A GRE device is installed in NS3 with IP*_ADDR_GRE, and
+ * NS1/NS2 are configured to route packets to IP*_ADDR_GRE via IP*_ADDR_8
+ * (the bottom route).
+ *
+ * Tests:
+ *
+ * 1. Routes NS2->IP*_ADDR_DST are brought down, so the only way a ping
+ * from IP*_ADDR_SRC to IP*_ADDR_DST can work is via IP*_ADDR_GRE.
+ *
+ * 2a. In an egress test, a bpf LWT_XMIT program is installed on veth1
+ * that encaps the packets with an IP/GRE header to route to IP*_ADDR_GRE.
+ *
+ * ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+ * ping replies go DST->SRC directly
+ *
+ * 2b. In an ingress test, a bpf LWT_IN program is installed on veth2
+ * that encaps the packets with an IP/GRE header to route to IP*_ADDR_GRE.
+ *
+ * ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+ * ping replies go DST->SRC directly
+ */
+
+static int create_ns(char *name, size_t name_sz)
+{
+ if (!name)
+ goto fail;
+
+ if (!ASSERT_OK(append_tid(name, name_sz), "append TID"))
+ goto fail;
+
+ SYS(fail, "ip netns add %s", name);
+
+ /* rp_filter gets confused by what these tests are doing, so disable it */
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.conf.all.rp_filter=0", name);
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.conf.default.rp_filter=0", name);
+ /* Disable IPv6 DAD because it sometimes takes too long and fails tests */
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.all.accept_dad=0", name);
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.default.accept_dad=0", name);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int set_top_addr(const char *ns1, const char *ns2, const char *ns3)
+{
+ SYS(fail, "ip -n %s a add %s/24 dev veth1", ns1, IP4_ADDR_1);
+ SYS(fail, "ip -n %s a add %s/24 dev veth2", ns2, IP4_ADDR_2);
+ SYS(fail, "ip -n %s a add %s/24 dev veth3", ns2, IP4_ADDR_3);
+ SYS(fail, "ip -n %s a add %s/24 dev veth4", ns3, IP4_ADDR_4);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth1", ns1, IP6_ADDR_1);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth2", ns2, IP6_ADDR_2);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth3", ns2, IP6_ADDR_3);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth4", ns3, IP6_ADDR_4);
+
+ SYS(fail, "ip -n %s link set dev veth1 up", ns1);
+ SYS(fail, "ip -n %s link set dev veth2 up", ns2);
+ SYS(fail, "ip -n %s link set dev veth3 up", ns2);
+ SYS(fail, "ip -n %s link set dev veth4 up", ns3);
+
+ return 0;
+fail:
+ return 1;
+}
+
+static int set_bottom_addr(const char *ns1, const char *ns2, const char *ns3)
+{
+ SYS(fail, "ip -n %s a add %s/24 dev veth5", ns1, IP4_ADDR_5);
+ SYS(fail, "ip -n %s a add %s/24 dev veth6", ns2, IP4_ADDR_6);
+ SYS(fail, "ip -n %s a add %s/24 dev veth7", ns2, IP4_ADDR_7);
+ SYS(fail, "ip -n %s a add %s/24 dev veth8", ns3, IP4_ADDR_8);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth5", ns1, IP6_ADDR_5);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth6", ns2, IP6_ADDR_6);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth7", ns2, IP6_ADDR_7);
+ SYS(fail, "ip -n %s -6 a add %s/128 dev veth8", ns3, IP6_ADDR_8);
+
+ SYS(fail, "ip -n %s link set dev veth5 up", ns1);
+ SYS(fail, "ip -n %s link set dev veth6 up", ns2);
+ SYS(fail, "ip -n %s link set dev veth7 up", ns2);
+ SYS(fail, "ip -n %s link set dev veth8 up", ns3);
+
+ return 0;
+fail:
+ return 1;
+}
+
+static int configure_vrf(const char *ns1, const char *ns2)
+{
+ if (!ns1 || !ns2)
+ goto fail;
+
+ SYS(fail, "ip -n %s link add red type vrf table 1001", ns1);
+ SYS(fail, "ip -n %s link set red up", ns1);
+ SYS(fail, "ip -n %s route add table 1001 unreachable default metric 8192", ns1);
+ SYS(fail, "ip -n %s -6 route add table 1001 unreachable default metric 8192", ns1);
+ SYS(fail, "ip -n %s link set veth1 vrf red", ns1);
+ SYS(fail, "ip -n %s link set veth5 vrf red", ns1);
+
+ SYS(fail, "ip -n %s link add red type vrf table 1001", ns2);
+ SYS(fail, "ip -n %s link set red up", ns2);
+ SYS(fail, "ip -n %s route add table 1001 unreachable default metric 8192", ns2);
+ SYS(fail, "ip -n %s -6 route add table 1001 unreachable default metric 8192", ns2);
+ SYS(fail, "ip -n %s link set veth2 vrf red", ns2);
+ SYS(fail, "ip -n %s link set veth3 vrf red", ns2);
+ SYS(fail, "ip -n %s link set veth6 vrf red", ns2);
+ SYS(fail, "ip -n %s link set veth7 vrf red", ns2);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int configure_ns1(const char *ns1, const char *vrf)
+{
+ struct nstoken *nstoken = NULL;
+
+ if (!ns1 || !vrf)
+ goto fail;
+
+ nstoken = open_netns(ns1);
+ if (!ASSERT_OK_PTR(nstoken, "open ns1"))
+ goto fail;
+
+ /* Top route */
+ SYS(fail, "ip route add %s/32 dev veth1 %s", IP4_ADDR_2, vrf);
+ SYS(fail, "ip route add default dev veth1 via %s %s", IP4_ADDR_2, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth1 %s", IP6_ADDR_2, vrf);
+ SYS(fail, "ip -6 route add default dev veth1 via %s %s", IP6_ADDR_2, vrf);
+ /* Bottom route */
+ SYS(fail, "ip route add %s/32 dev veth5 %s", IP4_ADDR_6, vrf);
+ SYS(fail, "ip route add %s/32 dev veth5 via %s %s", IP4_ADDR_7, IP4_ADDR_6, vrf);
+ SYS(fail, "ip route add %s/32 dev veth5 via %s %s", IP4_ADDR_8, IP4_ADDR_6, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth5 %s", IP6_ADDR_6, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth5 via %s %s", IP6_ADDR_7, IP6_ADDR_6, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth5 via %s %s", IP6_ADDR_8, IP6_ADDR_6, vrf);
+
+ close_netns(nstoken);
+ return 0;
+fail:
+ close_netns(nstoken);
+ return -1;
+}
+
+static int configure_ns2(const char *ns2, const char *vrf)
+{
+ struct nstoken *nstoken = NULL;
+
+ if (!ns2 || !vrf)
+ goto fail;
+
+ nstoken = open_netns(ns2);
+ if (!ASSERT_OK_PTR(nstoken, "open ns2"))
+ goto fail;
+
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv4.ip_forward=1", ns2);
+ SYS(fail, "ip netns exec %s sysctl -wq net.ipv6.conf.all.forwarding=1", ns2);
+
+ /* Top route */
+ SYS(fail, "ip route add %s/32 dev veth2 %s", IP4_ADDR_1, vrf);
+ SYS(fail, "ip route add %s/32 dev veth3 %s", IP4_ADDR_4, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth2 %s", IP6_ADDR_1, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth3 %s", IP6_ADDR_4, vrf);
+ /* Bottom route */
+ SYS(fail, "ip route add %s/32 dev veth6 %s", IP4_ADDR_5, vrf);
+ SYS(fail, "ip route add %s/32 dev veth7 %s", IP4_ADDR_8, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth6 %s", IP6_ADDR_5, vrf);
+ SYS(fail, "ip -6 route add %s/128 dev veth7 %s", IP6_ADDR_8, vrf);
+
+ close_netns(nstoken);
+ return 0;
+fail:
+ close_netns(nstoken);
+ return -1;
+}
+
+static int configure_ns3(const char *ns3)
+{
+ struct nstoken *nstoken = NULL;
+
+ if (!ns3)
+ goto fail;
+
+ nstoken = open_netns(ns3);
+ if (!ASSERT_OK_PTR(nstoken, "open ns3"))
+ goto fail;
+
+ /* Top route */
+ SYS(fail, "ip route add %s/32 dev veth4", IP4_ADDR_3);
+ SYS(fail, "ip route add %s/32 dev veth4 via %s", IP4_ADDR_1, IP4_ADDR_3);
+ SYS(fail, "ip route add %s/32 dev veth4 via %s", IP4_ADDR_2, IP4_ADDR_3);
+ SYS(fail, "ip -6 route add %s/128 dev veth4", IP6_ADDR_3);
+ SYS(fail, "ip -6 route add %s/128 dev veth4 via %s", IP6_ADDR_1, IP6_ADDR_3);
+ SYS(fail, "ip -6 route add %s/128 dev veth4 via %s", IP6_ADDR_2, IP6_ADDR_3);
+ /* Bottom route */
+ SYS(fail, "ip route add %s/32 dev veth8", IP4_ADDR_7);
+ SYS(fail, "ip route add %s/32 dev veth8 via %s", IP4_ADDR_5, IP4_ADDR_7);
+ SYS(fail, "ip route add %s/32 dev veth8 via %s", IP4_ADDR_6, IP4_ADDR_7);
+ SYS(fail, "ip -6 route add %s/128 dev veth8", IP6_ADDR_7);
+ SYS(fail, "ip -6 route add %s/128 dev veth8 via %s", IP6_ADDR_5, IP6_ADDR_7);
+ SYS(fail, "ip -6 route add %s/128 dev veth8 via %s", IP6_ADDR_6, IP6_ADDR_7);
+
+ /* Configure IPv4 GRE device */
+ SYS(fail, "ip tunnel add gre_dev mode gre remote %s local %s ttl 255",
+ IP4_ADDR_1, IP4_ADDR_GRE);
+ SYS(fail, "ip link set gre_dev up");
+ SYS(fail, "ip a add %s dev gre_dev", IP4_ADDR_GRE);
+
+ /* Configure IPv6 GRE device */
+ SYS(fail, "ip tunnel add gre6_dev mode ip6gre remote %s local %s ttl 255",
+ IP6_ADDR_1, IP6_ADDR_GRE);
+ SYS(fail, "ip link set gre6_dev up");
+ SYS(fail, "ip a add %s dev gre6_dev", IP6_ADDR_GRE);
+
+ close_netns(nstoken);
+ return 0;
+fail:
+ close_netns(nstoken);
+ return -1;
+}
+
+static int setup_network(char *ns1, char *ns2, char *ns3, const char *vrf)
+{
+ if (!ns1 || !ns2 || !ns3 || !vrf)
+ goto fail;
+
+ SYS(fail, "ip -n %s link add veth1 type veth peer name veth2 netns %s", ns1, ns2);
+ SYS(fail, "ip -n %s link add veth3 type veth peer name veth4 netns %s", ns2, ns3);
+ SYS(fail, "ip -n %s link add veth5 type veth peer name veth6 netns %s", ns1, ns2);
+ SYS(fail, "ip -n %s link add veth7 type veth peer name veth8 netns %s", ns2, ns3);
+
+ if (vrf[0]) {
+ if (!ASSERT_OK(configure_vrf(ns1, ns2), "configure vrf"))
+ goto fail;
+ }
+ if (!ASSERT_OK(set_top_addr(ns1, ns2, ns3), "set top addresses"))
+ goto fail;
+
+ if (!ASSERT_OK(set_bottom_addr(ns1, ns2, ns3), "set bottom addresses"))
+ goto fail;
+
+ if (!ASSERT_OK(configure_ns1(ns1, vrf), "configure ns1 routes"))
+ goto fail;
+
+ if (!ASSERT_OK(configure_ns2(ns2, vrf), "configure ns2 routes"))
+ goto fail;
+
+ if (!ASSERT_OK(configure_ns3(ns3), "configure ns3 routes"))
+ goto fail;
+
+ /* Link bottom route to the GRE tunnels */
+ SYS(fail, "ip -n %s route add %s/32 dev veth5 via %s %s",
+ ns1, IP4_ADDR_GRE, IP4_ADDR_6, vrf);
+ SYS(fail, "ip -n %s route add %s/32 dev veth7 via %s %s",
+ ns2, IP4_ADDR_GRE, IP4_ADDR_8, vrf);
+ SYS(fail, "ip -n %s -6 route add %s/128 dev veth5 via %s %s",
+ ns1, IP6_ADDR_GRE, IP6_ADDR_6, vrf);
+ SYS(fail, "ip -n %s -6 route add %s/128 dev veth7 via %s %s",
+ ns2, IP6_ADDR_GRE, IP6_ADDR_8, vrf);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int remove_routes_to_gredev(const char *ns1, const char *ns2, const char *vrf)
+{
+ SYS(fail, "ip -n %s route del %s dev veth5 %s", ns1, IP4_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s route del %s dev veth7 %s", ns2, IP4_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s -6 route del %s/128 dev veth5 %s", ns1, IP6_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s -6 route del %s/128 dev veth7 %s", ns2, IP6_ADDR_GRE, vrf);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int add_unreachable_routes_to_gredev(const char *ns1, const char *ns2, const char *vrf)
+{
+ SYS(fail, "ip -n %s route add unreachable %s/32 %s", ns1, IP4_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s route add unreachable %s/32 %s", ns2, IP4_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s -6 route add unreachable %s/128 %s", ns1, IP6_ADDR_GRE, vrf);
+ SYS(fail, "ip -n %s -6 route add unreachable %s/128 %s", ns2, IP6_ADDR_GRE, vrf);
+
+ return 0;
+fail:
+ return -1;
+}
+
+#define GSO_SIZE 5000
+#define GSO_TCP_PORT 9000
+/* This tests the fix from commit ea0371f78799 ("net: fix GSO in bpf_lwt_push_ip_encap") */
+static int test_gso_fix(const char *ns1, const char *ns3, int family)
+{
+ const char *ip_addr = family == AF_INET ? IP4_ADDR_DST : IP6_ADDR_DST;
+ char gso_packet[GSO_SIZE] = {};
+ struct nstoken *nstoken = NULL;
+ int sfd, cfd, afd;
+ ssize_t bytes;
+ int ret = -1;
+
+ if (!ns1 || !ns3)
+ return ret;
+
+ nstoken = open_netns(ns3);
+ if (!ASSERT_OK_PTR(nstoken, "open ns3"))
+ return ret;
+
+ sfd = start_server_str(family, SOCK_STREAM, ip_addr, GSO_TCP_PORT, NULL);
+ if (!ASSERT_OK_FD(sfd, "start server"))
+ goto close_netns;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(ns1);
+ if (!ASSERT_OK_PTR(nstoken, "open ns1"))
+ goto close_server;
+
+ cfd = connect_to_addr_str(family, SOCK_STREAM, ip_addr, GSO_TCP_PORT, NULL);
+ if (!ASSERT_OK_FD(cfd, "connect to server"))
+ goto close_server;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+
+ afd = accept(sfd, NULL, NULL);
+ if (!ASSERT_OK_FD(afd, "accept"))
+ goto close_client;
+
+ /* Send a packet larger than MTU */
+ bytes = send(cfd, gso_packet, GSO_SIZE, 0);
+ if (!ASSERT_EQ(bytes, GSO_SIZE, "send packet"))
+ goto close_accept;
+
+ /* Verify we received all expected bytes */
+ bytes = read(afd, gso_packet, GSO_SIZE);
+ if (!ASSERT_EQ(bytes, GSO_SIZE, "receive packet"))
+ goto close_accept;
+
+ ret = 0;
+
+close_accept:
+ close(afd);
+close_client:
+ close(cfd);
+close_server:
+ close(sfd);
+close_netns:
+ close_netns(nstoken);
+
+ return ret;
+}
+
+static int check_ping_ok(const char *ns1)
+{
+ SYS(fail, "ip netns exec %s ping -c 1 -W1 -I veth1 %s > /dev/null", ns1, IP4_ADDR_DST);
+ SYS(fail, "ip netns exec %s ping6 -c 1 -W1 -I veth1 %s > /dev/null", ns1, IP6_ADDR_DST);
+ return 0;
+fail:
+ return -1;
+}
+
+static int check_ping_fails(const char *ns1)
+{
+ int ret;
+
+ ret = SYS_NOFAIL("ip netns exec %s ping -c 1 -W1 -I veth1 %s", ns1, IP4_ADDR_DST);
+ if (!ret)
+ return -1;
+
+ ret = SYS_NOFAIL("ip netns exec %s ping6 -c 1 -W1 -I veth1 %s", ns1, IP6_ADDR_DST);
+ if (!ret)
+ return -1;
+
+ return 0;
+}
+
+#define EGRESS true
+#define INGRESS false
+#define IPV4_ENCAP true
+#define IPV6_ENCAP false
+static void lwt_ip_encap(bool ipv4_encap, bool egress, const char *vrf)
+{
+ char ns1[NETNS_NAME_SIZE] = NETNS_BASE "-1-";
+ char ns2[NETNS_NAME_SIZE] = NETNS_BASE "-2-";
+ char ns3[NETNS_NAME_SIZE] = NETNS_BASE "-3-";
+ char *sec = ipv4_encap ? "encap_gre" : "encap_gre6";
+
+ if (!vrf)
+ return;
+
+ if (!ASSERT_OK(create_ns(ns1, NETNS_NAME_SIZE), "create ns1"))
+ goto out;
+ if (!ASSERT_OK(create_ns(ns2, NETNS_NAME_SIZE), "create ns2"))
+ goto out;
+ if (!ASSERT_OK(create_ns(ns3, NETNS_NAME_SIZE), "create ns3"))
+ goto out;
+
+ if (!ASSERT_OK(setup_network(ns1, ns2, ns3, vrf), "setup network"))
+ goto out;
+
+ /* By default, pings work */
+ if (!ASSERT_OK(check_ping_ok(ns1), "ping OK"))
+ goto out;
+
+ /* Remove NS2->DST routes, ping fails */
+ SYS(out, "ip -n %s route del %s/32 dev veth3 %s", ns2, IP4_ADDR_DST, vrf);
+ SYS(out, "ip -n %s -6 route del %s/128 dev veth3 %s", ns2, IP6_ADDR_DST, vrf);
+ if (!ASSERT_OK(check_ping_fails(ns1), "ping expected fail"))
+ goto out;
+
+ /* Install replacement routes (LWT/eBPF), pings succeed */
+ if (egress) {
+ SYS(out, "ip -n %s route add %s encap bpf xmit obj %s sec %s dev veth1 %s",
+ ns1, IP4_ADDR_DST, BPF_FILE, sec, vrf);
+ SYS(out, "ip -n %s -6 route add %s encap bpf xmit obj %s sec %s dev veth1 %s",
+ ns1, IP6_ADDR_DST, BPF_FILE, sec, vrf);
+ } else {
+ SYS(out, "ip -n %s route add %s encap bpf in obj %s sec %s dev veth2 %s",
+ ns2, IP4_ADDR_DST, BPF_FILE, sec, vrf);
+ SYS(out, "ip -n %s -6 route add %s encap bpf in obj %s sec %s dev veth2 %s",
+ ns2, IP6_ADDR_DST, BPF_FILE, sec, vrf);
+ }
+
+ if (!ASSERT_OK(check_ping_ok(ns1), "ping OK"))
+ goto out;
+
+ /* Skip GSO tests with VRF: VRF routing needs properly assigned
+ * source IP/device, which is easy to do with ping but hard with TCP.
+ */
+ if (egress && !vrf[0]) {
+ if (!ASSERT_OK(test_gso_fix(ns1, ns3, AF_INET), "test GSO"))
+ goto out;
+ }
+
+ /* Negative test: remove routes to GRE devices: ping fails */
+ if (!ASSERT_OK(remove_routes_to_gredev(ns1, ns2, vrf), "remove routes to gredev"))
+ goto out;
+ if (!ASSERT_OK(check_ping_fails(ns1), "ping expected fail"))
+ goto out;
+
+ /* Another negative test */
+ if (!ASSERT_OK(add_unreachable_routes_to_gredev(ns1, ns2, vrf),
+ "add unreachable routes"))
+ goto out;
+ ASSERT_OK(check_ping_fails(ns1), "ping expected fail");
+
+out:
+ SYS_NOFAIL("ip netns del %s", ns1);
+ SYS_NOFAIL("ip netns del %s", ns2);
+ SYS_NOFAIL("ip netns del %s", ns3);
+}
+
+void test_lwt_ip_encap_vrf_ipv6(void)
+{
+ if (test__start_subtest("egress"))
+ lwt_ip_encap(IPV6_ENCAP, EGRESS, "vrf red");
+
+ if (test__start_subtest("ingress"))
+ lwt_ip_encap(IPV6_ENCAP, INGRESS, "vrf red");
+}
+
+void test_lwt_ip_encap_vrf_ipv4(void)
+{
+ if (test__start_subtest("egress"))
+ lwt_ip_encap(IPV4_ENCAP, EGRESS, "vrf red");
+
+ if (test__start_subtest("ingress"))
+ lwt_ip_encap(IPV4_ENCAP, INGRESS, "vrf red");
+}
+
+void test_lwt_ip_encap_ipv6(void)
+{
+ if (test__start_subtest("egress"))
+ lwt_ip_encap(IPV6_ENCAP, EGRESS, "");
+
+ if (test__start_subtest("ingress"))
+ lwt_ip_encap(IPV6_ENCAP, INGRESS, "");
+}
+
+void test_lwt_ip_encap_ipv4(void)
+{
+ if (test__start_subtest("egress"))
+ lwt_ip_encap(IPV4_ENCAP, EGRESS, "");
+
+ if (test__start_subtest("ingress"))
+ lwt_ip_encap(IPV4_ENCAP, INGRESS, "");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
new file mode 100644
index 000000000000..b6e8d822e8e9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Test suite of lwt_xmit BPF programs that redirect packets
+ * The file tests focus not only if these programs work as expected normally,
+ * but also if they can handle abnormal situations gracefully.
+ *
+ * WARNING
+ * -------
+ * This test suite may crash the kernel, thus should be run in a VM.
+ *
+ * Setup:
+ * ---------
+ * All tests are performed in a single netns. Two lwt encap routes are setup for
+ * each subtest:
+ *
+ * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err
+ * ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err
+ *
+ * Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section
+ * of this object holds a program entry to test. The BPF object is built from
+ * progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the
+ * attachment for lwt programs are not supported by libbpf yet.
+ *
+ * For testing, ping commands are run in the test netns:
+ *
+ * ping 10.0.0.<ifindex> -c 1 -w 1 -s 100
+ * ping 20.0.0.<ifindex> -c 1 -w 1 -s 100
+ *
+ * Scenarios:
+ * --------------------------------
+ * 1. Redirect to a running tap/tun device
+ * 2. Redirect to a down tap/tun device
+ * 3. Redirect to a vlan device with lower layer down
+ *
+ * Case 1, ping packets should be received by packet socket on target device
+ * when redirected to ingress, and by tun/tap fd when redirected to egress.
+ *
+ * Case 2,3 are considered successful as long as they do not crash the kernel
+ * as a regression.
+ *
+ * Case 1,2 use tap device to test redirect to device that requires MAC
+ * header, and tun device to test the case with no MAC header added.
+ */
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_tun.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+#define NETNS "ns_lwt_redirect"
+#include "lwt_helpers.h"
+#include "test_progs.h"
+#include "network_helpers.h"
+
+#define BPF_OBJECT "test_lwt_redirect.bpf.o"
+#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac")
+#define EGRESS_SEC(need_mac) ((need_mac) ? "redir_egress" : "redir_egress_nomac")
+#define LOCAL_SRC "10.0.0.1"
+#define CIDR_TO_INGRESS "10.0.0.0/24"
+#define CIDR_TO_EGRESS "20.0.0.0/24"
+
+/* ping to redirect toward given dev, with last byte of dest IP being the target
+ * device index.
+ *
+ * Note: ping command inside BPF-CI is busybox version, so it does not have certain
+ * function, such like -m option to set packet mark.
+ */
+static void ping_dev(const char *dev, bool is_ingress)
+{
+ int link_index = if_nametoindex(dev);
+ char ip[256];
+
+ if (!ASSERT_GE(link_index, 0, "if_nametoindex"))
+ return;
+
+ if (is_ingress)
+ snprintf(ip, sizeof(ip), "10.0.0.%d", link_index);
+ else
+ snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);
+
+ /* We won't get a reply. Don't fail here */
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
+ ip, ICMP_PAYLOAD_SIZE);
+}
+
+static int new_packet_sock(const char *ifname)
+{
+ int err = 0;
+ int ignore_outgoing = 1;
+ int ifindex = -1;
+ int s = -1;
+
+ s = socket(AF_PACKET, SOCK_RAW, 0);
+ if (!ASSERT_GE(s, 0, "socket(AF_PACKET)"))
+ return -1;
+
+ ifindex = if_nametoindex(ifname);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) {
+ close(s);
+ return -1;
+ }
+
+ struct sockaddr_ll addr = {
+ .sll_family = AF_PACKET,
+ .sll_protocol = htons(ETH_P_IP),
+ .sll_ifindex = ifindex,
+ };
+
+ err = bind(s, (struct sockaddr *)&addr, sizeof(addr));
+ if (!ASSERT_OK(err, "bind(AF_PACKET)")) {
+ close(s);
+ return -1;
+ }
+
+ /* Use packet socket to capture only the ingress, so we can distinguish
+ * the case where a regression that actually redirects the packet to
+ * the egress.
+ */
+ err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING,
+ &ignore_outgoing, sizeof(ignore_outgoing));
+ if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) {
+ close(s);
+ return -1;
+ }
+
+ err = fcntl(s, F_SETFL, O_NONBLOCK);
+ if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) {
+ close(s);
+ return -1;
+ }
+
+ return s;
+}
+
+static int expect_icmp(char *buf, ssize_t len)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+
+ if (len < (ssize_t)sizeof(*eth))
+ return -1;
+
+ if (eth->h_proto == htons(ETH_P_IP))
+ return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth));
+
+ return -1;
+}
+
+static int expect_icmp_nomac(char *buf, ssize_t len)
+{
+ return __expect_icmp_ipv4(buf, len);
+}
+
+static void send_and_capture_test_packets(const char *test_name, int tap_fd,
+ const char *target_dev, bool need_mac)
+{
+ int psock = -1;
+ struct timeval timeo = {
+ .tv_sec = 0,
+ .tv_usec = 250000,
+ };
+ int ret = -1;
+
+ filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac;
+
+ ping_dev(target_dev, false);
+
+ ret = wait_for_packet(tap_fd, filter, &timeo);
+ if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) {
+ log_err("%s egress test fails", test_name);
+ goto out;
+ }
+
+ psock = new_packet_sock(target_dev);
+ ping_dev(target_dev, true);
+
+ ret = wait_for_packet(psock, filter, &timeo);
+ if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) {
+ log_err("%s ingress test fails", test_name);
+ goto out;
+ }
+
+out:
+ if (psock >= 0)
+ close(psock);
+}
+
+static int setup_redirect_target(const char *target_dev, bool need_mac)
+{
+ int target_index = -1;
+ int tap_fd = -1;
+
+ tap_fd = open_tuntap(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
+ goto fail;
+
+ target_index = if_nametoindex(target_dev);
+ if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
+ goto fail;
+
+ SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");
+ SYS(fail, "ip link add link_err type dummy");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
+ SYS(fail, "ip link set link_err up");
+ SYS(fail, "ip link set %s up", target_dev);
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
+ CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac));
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
+ CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac));
+
+ return tap_fd;
+
+fail:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ return -1;
+}
+
+static void test_lwt_redirect_normal(void)
+{
+ const char *target_dev = "tap0";
+ int tap_fd = -1;
+ bool need_mac = true;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
+ close(tap_fd);
+}
+
+static void test_lwt_redirect_normal_nomac(void)
+{
+ const char *target_dev = "tun0";
+ int tap_fd = -1;
+ bool need_mac = false;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
+ close(tap_fd);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+static void __test_lwt_redirect_dev_down(bool need_mac)
+{
+ const char *target_dev = "tap0";
+ int tap_fd = -1;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ SYS(out, "ip link set %s down", target_dev);
+ ping_dev(target_dev, true);
+ ping_dev(target_dev, false);
+
+out:
+ close(tap_fd);
+}
+
+static void test_lwt_redirect_dev_down(void)
+{
+ __test_lwt_redirect_dev_down(true);
+}
+
+static void test_lwt_redirect_dev_down_nomac(void)
+{
+ __test_lwt_redirect_dev_down(false);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+static void test_lwt_redirect_dev_carrier_down(void)
+{
+ const char *lower_dev = "tap0";
+ const char *vlan_dev = "vlan100";
+ int tap_fd = -1;
+
+ tap_fd = setup_redirect_target(lower_dev, true);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev);
+ SYS(out, "ip link set %s up", vlan_dev);
+ SYS(out, "ip link set %s down", lower_dev);
+ ping_dev(vlan_dev, true);
+ ping_dev(vlan_dev, false);
+
+out:
+ close(tap_fd);
+}
+
+static void *test_lwt_redirect_run(void *arg)
+{
+ netns_delete();
+ RUN_TEST(lwt_redirect_normal);
+ RUN_TEST(lwt_redirect_normal_nomac);
+ RUN_TEST(lwt_redirect_dev_down);
+ RUN_TEST(lwt_redirect_dev_down_nomac);
+ RUN_TEST(lwt_redirect_dev_carrier_down);
+ return NULL;
+}
+
+void test_lwt_redirect(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
new file mode 100644
index 000000000000..6c50c0f63f43
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Test suite of lwt BPF programs that reroutes packets
+ * The file tests focus not only if these programs work as expected normally,
+ * but also if they can handle abnormal situations gracefully. This test
+ * suite currently only covers lwt_xmit hook. lwt_in tests have not been
+ * implemented.
+ *
+ * WARNING
+ * -------
+ * This test suite can crash the kernel, thus should be run in a VM.
+ *
+ * Setup:
+ * ---------
+ * all tests are performed in a single netns. A lwt encap route is setup for
+ * each subtest:
+ *
+ * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err
+ *
+ * Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains
+ * a single test program entry. This program sets packet mark by last byte of
+ * the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb
+ * mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped
+ * to avoid route loop. We didn't use generated BPF skeleton since the
+ * attachment for lwt programs are not supported by libbpf yet.
+ *
+ * The test program will bring up a tun device, and sets up the following
+ * routes:
+ *
+ * ip rule add pref 100 from all fwmark <tun_index> lookup 100
+ * ip route add table 100 default dev tun0
+ *
+ * For normal testing, a ping command is running in the test netns:
+ *
+ * ping 10.0.0.<tun_index> -c 1 -w 1 -s 100
+ *
+ * For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP
+ * socket will try to overflow the fq queue and trigger qdisc drop error.
+ *
+ * Scenarios:
+ * --------------------------------
+ * 1. Reroute to a running tun device
+ * 2. Reroute to a device where qdisc drop
+ *
+ * For case 1, ping packets should be received by the tun device.
+ *
+ * For case 2, force UDP packets to overflow fq limit. As long as kernel
+ * is not crashed, it is considered successful.
+ */
+#define NETNS "ns_lwt_reroute"
+#include <netinet/in.h>
+#include "lwt_helpers.h"
+#include "network_helpers.h"
+#include <linux/net_tstamp.h>
+
+#define BPF_OBJECT "test_lwt_reroute.bpf.o"
+#define LOCAL_SRC "10.0.0.1"
+#define TEST_CIDR "10.0.0.0/24"
+#define XMIT_HOOK "xmit"
+#define XMIT_SECTION "lwt_xmit"
+#define NSEC_PER_SEC 1000000000ULL
+
+/* send a ping to be rerouted to the target device */
+static void ping_once(const char *ip)
+{
+ /* We won't get a reply. Don't fail here */
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
+ ip, ICMP_PAYLOAD_SIZE);
+}
+
+/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop
+ * error. This is done via TX tstamp to force buffering delayed packets.
+ */
+static int overflow_fq(int snd_target, const char *target_ip)
+{
+ struct sockaddr_in addr = {
+ .sin_family = AF_INET,
+ .sin_port = htons(1234),
+ };
+
+ char data_buf[8]; /* only #pkts matter, so use a random small buffer */
+ char control_buf[CMSG_SPACE(sizeof(uint64_t))];
+ struct iovec iov = {
+ .iov_base = data_buf,
+ .iov_len = sizeof(data_buf),
+ };
+ int err = -1;
+ int s = -1;
+ struct sock_txtime txtime_on = {
+ .clockid = CLOCK_MONOTONIC,
+ .flags = 0,
+ };
+ struct msghdr msg = {
+ .msg_name = &addr,
+ .msg_namelen = sizeof(addr),
+ .msg_control = control_buf,
+ .msg_controllen = sizeof(control_buf),
+ .msg_iovlen = 1,
+ .msg_iov = &iov,
+ };
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+
+ memset(data_buf, 0, sizeof(data_buf));
+
+ s = socket(AF_INET, SOCK_DGRAM, 0);
+ if (!ASSERT_GE(s, 0, "socket"))
+ goto out;
+
+ err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on));
+ if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)"))
+ goto out;
+
+ err = inet_pton(AF_INET, target_ip, &addr.sin_addr);
+ if (!ASSERT_EQ(err, 1, "inet_pton"))
+ goto out;
+
+ while (snd_target > 0) {
+ struct timespec now;
+
+ memset(control_buf, 0, sizeof(control_buf));
+ cmsg->cmsg_type = SCM_TXTIME;
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t));
+
+ err = clock_gettime(CLOCK_MONOTONIC, &now);
+ if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) {
+ err = -1;
+ goto out;
+ }
+
+ *(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC +
+ now.tv_nsec;
+
+ /* we will intentionally send more than fq limit, so ignore
+ * the error here.
+ */
+ sendmsg(s, &msg, MSG_NOSIGNAL);
+ snd_target--;
+ }
+
+ /* no kernel crash so far is considered success */
+ err = 0;
+
+out:
+ if (s >= 0)
+ close(s);
+
+ return err;
+}
+
+static int setup(const char *tun_dev)
+{
+ int target_index = -1;
+ int tap_fd = -1;
+
+ tap_fd = open_tuntap(tun_dev, false);
+ if (!ASSERT_GE(tap_fd, 0, "open_tun"))
+ return -1;
+
+ target_index = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
+ return -1;
+
+ SYS(fail, "ip link add link_err type dummy");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
+ SYS(fail, "ip link set link_err up");
+ SYS(fail, "ip link set %s up", tun_dev);
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit",
+ TEST_CIDR, BPF_OBJECT);
+
+ SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100",
+ target_index);
+ SYS(fail, "ip route add t 100 default dev %s", tun_dev);
+
+ return tap_fd;
+
+fail:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ return -1;
+}
+
+static void test_lwt_reroute_normal_xmit(void)
+{
+ const char *tun_dev = "tun0";
+ int tun_fd = -1;
+ int ifindex = -1;
+ char ip[256];
+ struct timeval timeo = {
+ .tv_sec = 0,
+ .tv_usec = 250000,
+ };
+
+ tun_fd = setup(tun_dev);
+ if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
+ return;
+
+ ifindex = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
+ return;
+
+ snprintf(ip, 256, "10.0.0.%d", ifindex);
+
+ /* ping packets should be received by the tun device */
+ ping_once(ip);
+
+ if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1,
+ "wait_for_packet"))
+ log_err("%s xmit", __func__);
+}
+
+/*
+ * Test the failure case when the skb is dropped at the qdisc. This is a
+ * regression prevention at the xmit hook only.
+ */
+static void test_lwt_reroute_qdisc_dropped(void)
+{
+ const char *tun_dev = "tun0";
+ int tun_fd = -1;
+ int ifindex = -1;
+ char ip[256];
+
+ tun_fd = setup(tun_dev);
+ if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
+ goto fail;
+
+ SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev);
+
+ ifindex = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
+ return;
+
+ snprintf(ip, 256, "10.0.0.%d", ifindex);
+ ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq");
+
+fail:
+ if (tun_fd >= 0)
+ close(tun_fd);
+}
+
+static void *test_lwt_reroute_run(void *arg)
+{
+ netns_delete();
+ RUN_TEST(lwt_reroute_normal_xmit);
+ RUN_TEST(lwt_reroute_qdisc_dropped);
+ return NULL;
+}
+
+void test_lwt_reroute(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c b/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c
new file mode 100644
index 000000000000..3bc730b7c7fa
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_seg6local.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Connects 6 network namespaces through veths.
+ * Each NS may have different IPv6 global scope addresses :
+ *
+ * NS1 NS2 NS3 NS4 NS5 NS6
+ * lo veth1 <-> veth2 veth3 <-> veth4 veth5 <-> veth6 lo veth7 <-> veth8 veth9 <-> veth10 lo
+ * fb00 ::1 ::12 ::21 ::34 ::43 ::56 ::65 ::78 ::87 ::910 ::109 ::6
+ * fd00 ::4
+ * fc42 ::1
+ *
+ * All IPv6 packets going to fb00::/16 through NS2 will be encapsulated in a
+ * IPv6 header with a Segment Routing Header, with segments :
+ * fd00::1 -> fd00::2 -> fd00::3 -> fd00::4
+ *
+ * 3 fd00::/16 IPv6 addresses are binded to seg6local End.BPF actions :
+ * - fd00::1 : add a TLV, change the flags and apply a End.X action to fc42::1
+ * - fd00::2 : remove the TLV, change the flags, add a tag
+ * - fd00::3 : apply an End.T action to fd00::4, through routing table 117
+ *
+ * fd00::4 is a simple Segment Routing node decapsulating the inner IPv6 packet.
+ * Each End.BPF action will validate the operations applied on the SRH by the
+ * previous BPF program in the chain, otherwise the packet is dropped.
+ *
+ * An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
+ * datagram can be read on NS6 when binding to fb00::6.
+ */
+
+#include "network_helpers.h"
+#include "test_progs.h"
+
+#define NETNS_BASE "lwt-seg6local-"
+#define BPF_FILE "test_lwt_seg6local.bpf.o"
+
+static void cleanup(void)
+{
+ int ns;
+
+ for (ns = 1; ns < 7; ns++)
+ SYS_NOFAIL("ip netns del %s%d", NETNS_BASE, ns);
+}
+
+static int setup(void)
+{
+ int ns;
+
+ for (ns = 1; ns < 7; ns++)
+ SYS(fail, "ip netns add %s%d", NETNS_BASE, ns);
+
+ SYS(fail, "ip -n %s6 link set dev lo up", NETNS_BASE);
+
+ for (ns = 1; ns < 6; ns++) {
+ int local_id = ns * 2 - 1;
+ int peer_id = ns * 2;
+ int next_ns = ns + 1;
+
+ SYS(fail, "ip -n %s%d link add veth%d type veth peer name veth%d netns %s%d",
+ NETNS_BASE, ns, local_id, peer_id, NETNS_BASE, next_ns);
+
+ SYS(fail, "ip -n %s%d link set dev veth%d up", NETNS_BASE, ns, local_id);
+ SYS(fail, "ip -n %s%d link set dev veth%d up", NETNS_BASE, next_ns, peer_id);
+
+ /* All link scope addresses to veths */
+ SYS(fail, "ip -n %s%d -6 addr add fb00::%d%d/16 dev veth%d scope link",
+ NETNS_BASE, ns, local_id, peer_id, local_id);
+ SYS(fail, "ip -n %s%d -6 addr add fb00::%d%d/16 dev veth%d scope link",
+ NETNS_BASE, next_ns, peer_id, local_id, peer_id);
+ }
+
+
+ SYS(fail, "ip -n %s5 -6 route add fb00::109 table 117 dev veth9 scope link", NETNS_BASE);
+
+ SYS(fail, "ip -n %s1 -6 addr add fb00::1/16 dev lo", NETNS_BASE);
+ SYS(fail, "ip -n %s1 -6 route add fb00::6 dev veth1 via fb00::21", NETNS_BASE);
+
+ SYS(fail, "ip -n %s2 -6 route add fb00::6 encap bpf in obj %s sec encap_srh dev veth2",
+ NETNS_BASE, BPF_FILE);
+ SYS(fail, "ip -n %s2 -6 route add fd00::1 dev veth3 via fb00::43 scope link", NETNS_BASE);
+
+ SYS(fail, "ip -n %s3 -6 route add fc42::1 dev veth5 via fb00::65", NETNS_BASE);
+ SYS(fail,
+ "ip -n %s3 -6 route add fd00::1 encap seg6local action End.BPF endpoint obj %s sec add_egr_x dev veth4",
+ NETNS_BASE, BPF_FILE);
+
+ SYS(fail,
+ "ip -n %s4 -6 route add fd00::2 encap seg6local action End.BPF endpoint obj %s sec pop_egr dev veth6",
+ NETNS_BASE, BPF_FILE);
+ SYS(fail, "ip -n %s4 -6 addr add fc42::1 dev lo", NETNS_BASE);
+ SYS(fail, "ip -n %s4 -6 route add fd00::3 dev veth7 via fb00::87", NETNS_BASE);
+
+ SYS(fail, "ip -n %s5 -6 route add fd00::4 table 117 dev veth9 via fb00::109", NETNS_BASE);
+ SYS(fail,
+ "ip -n %s5 -6 route add fd00::3 encap seg6local action End.BPF endpoint obj %s sec inspect_t dev veth8",
+ NETNS_BASE, BPF_FILE);
+
+ SYS(fail, "ip -n %s6 -6 addr add fb00::6/16 dev lo", NETNS_BASE);
+ SYS(fail, "ip -n %s6 -6 addr add fd00::4/16 dev lo", NETNS_BASE);
+
+ for (ns = 1; ns < 6; ns++)
+ SYS(fail, "ip netns exec %s%d sysctl -wq net.ipv6.conf.all.forwarding=1",
+ NETNS_BASE, ns);
+
+ SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.all.seg6_enabled=1", NETNS_BASE);
+ SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.lo.seg6_enabled=1", NETNS_BASE);
+ SYS(fail, "ip netns exec %s6 sysctl -wq net.ipv6.conf.veth10.seg6_enabled=1", NETNS_BASE);
+
+ return 0;
+fail:
+ return -1;
+}
+
+#define SERVER_PORT 7330
+#define CLIENT_PORT 2121
+void test_lwt_seg6local(void)
+{
+ struct sockaddr_in6 server_addr = {};
+ const char *ns1 = NETNS_BASE "1";
+ const char *ns6 = NETNS_BASE "6";
+ struct nstoken *nstoken = NULL;
+ const char *foobar = "foobar";
+ ssize_t bytes;
+ int sfd, cfd;
+ char buf[7];
+
+ if (!ASSERT_OK(setup(), "setup"))
+ goto out;
+
+ nstoken = open_netns(ns6);
+ if (!ASSERT_OK_PTR(nstoken, "open ns6"))
+ goto out;
+
+ sfd = start_server_str(AF_INET6, SOCK_DGRAM, "fb00::6", SERVER_PORT, NULL);
+ if (!ASSERT_OK_FD(sfd, "start server"))
+ goto close_netns;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(ns1);
+ if (!ASSERT_OK_PTR(nstoken, "open ns1"))
+ goto close_server;
+
+ cfd = start_server_str(AF_INET6, SOCK_DGRAM, "fb00::1", CLIENT_PORT, NULL);
+ if (!ASSERT_OK_FD(cfd, "start client"))
+ goto close_server;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+
+ /* Send a packet larger than MTU */
+ server_addr.sin6_family = AF_INET6;
+ server_addr.sin6_port = htons(SERVER_PORT);
+ if (!ASSERT_EQ(inet_pton(AF_INET6, "fb00::6", &server_addr.sin6_addr), 1,
+ "build target addr"))
+ goto close_client;
+
+ bytes = sendto(cfd, foobar, sizeof(foobar), 0,
+ (struct sockaddr *)&server_addr, sizeof(server_addr));
+ if (!ASSERT_EQ(bytes, sizeof(foobar), "send packet"))
+ goto close_client;
+
+ /* Verify we received all expected bytes */
+ bytes = read(sfd, buf, sizeof(buf));
+ if (!ASSERT_EQ(bytes, sizeof(buf), "receive packet"))
+ goto close_client;
+ ASSERT_STREQ(buf, foobar, "check udp packet");
+
+close_client:
+ close(cfd);
+close_server:
+ close(sfd);
+close_netns:
+ close_netns(nstoken);
+
+out:
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_btf.c b/tools/testing/selftests/bpf/prog_tests/map_btf.c
new file mode 100644
index 000000000000..2c4ef6037573
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_btf.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+
+#include "normal_map_btf.skel.h"
+#include "map_in_map_btf.skel.h"
+
+static void do_test_normal_map_btf(void)
+{
+ struct normal_map_btf *skel;
+ int i, err, new_fd = -1;
+ int map_fd_arr[64];
+
+ skel = normal_map_btf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ err = normal_map_btf__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+ ASSERT_TRUE(skel->bss->done, "done");
+
+ /* Use percpu_array to slow bpf_map_free_deferred() down.
+ * The memory allocation may fail, so doesn't check the returned fd.
+ */
+ for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++)
+ map_fd_arr[i] = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, 4, 4, 256, NULL);
+
+ /* Close array fd later */
+ new_fd = dup(bpf_map__fd(skel->maps.array));
+out:
+ normal_map_btf__destroy(skel);
+ if (new_fd < 0)
+ return;
+ /* Use kern_sync_rcu() to wait for the start of the free of the bpf
+ * program and use an assumed delay to wait for the release of the map
+ * btf which is held by other maps (e.g, bss). After that, array map
+ * holds the last reference of map btf.
+ */
+ kern_sync_rcu();
+ usleep(4000);
+ /* Spawn multiple kworkers to delay the invocation of
+ * bpf_map_free_deferred() for array map.
+ */
+ for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++) {
+ if (map_fd_arr[i] < 0)
+ continue;
+ close(map_fd_arr[i]);
+ }
+ close(new_fd);
+}
+
+static void do_test_map_in_map_btf(void)
+{
+ int err, zero = 0, new_fd = -1;
+ struct map_in_map_btf *skel;
+
+ skel = map_in_map_btf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ err = map_in_map_btf__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+ ASSERT_TRUE(skel->bss->done, "done");
+
+ /* Close inner_array fd later */
+ new_fd = dup(bpf_map__fd(skel->maps.inner_array));
+ /* Defer the free of inner_array */
+ err = bpf_map__delete_elem(skel->maps.outer_array, &zero, sizeof(zero), 0);
+ ASSERT_OK(err, "delete inner map");
+out:
+ map_in_map_btf__destroy(skel);
+ if (new_fd < 0)
+ return;
+ /* Use kern_sync_rcu() to wait for the start of the free of the bpf
+ * program and use an assumed delay to wait for the free of the outer
+ * map and the release of map btf. After that, inner map holds the last
+ * reference of map btf.
+ */
+ kern_sync_rcu();
+ usleep(10000);
+ close(new_fd);
+}
+
+void test_map_btf(void)
+{
+ if (test__start_subtest("array_btf"))
+ do_test_normal_map_btf();
+ if (test__start_subtest("inner_array_btf"))
+ do_test_map_in_map_btf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_excl.c b/tools/testing/selftests/bpf/prog_tests/map_excl.c
new file mode 100644
index 000000000000..6bdc6d6de0da
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_excl.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Google LLC. */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "map_excl.skel.h"
+
+static void test_map_excl_allowed(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__set_exclusive_program(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__set_exclusive_program"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, true);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, false);
+
+ err = map_excl__load(skel);
+ ASSERT_OK(err, "map_excl__load");
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_excl_denied(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__set_exclusive_program(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, false);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, true);
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EACCES, "exclusive map access not denied\n");
+out:
+ map_excl__destroy(skel);
+
+}
+
+void test_map_excl(void)
+{
+ if (test__start_subtest("map_excl_allowed"))
+ test_map_excl_allowed();
+ if (test__start_subtest("map_excl_denied"))
+ test_map_excl_denied();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_in_map.c b/tools/testing/selftests/bpf/prog_tests/map_in_map.c
new file mode 100644
index 000000000000..286a9fb469e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_in_map.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "access_map_in_map.skel.h"
+#include "update_map_in_htab.skel.h"
+
+struct thread_ctx {
+ pthread_barrier_t barrier;
+ int outer_map_fd;
+ int start, abort;
+ int loop, err;
+};
+
+static int wait_for_start_or_abort(struct thread_ctx *ctx)
+{
+ while (!ctx->start && !ctx->abort)
+ usleep(1);
+ return ctx->abort ? -1 : 0;
+}
+
+static void *update_map_fn(void *data)
+{
+ struct thread_ctx *ctx = data;
+ int loop = ctx->loop, err = 0;
+
+ if (wait_for_start_or_abort(ctx) < 0)
+ return NULL;
+ pthread_barrier_wait(&ctx->barrier);
+
+ while (loop-- > 0) {
+ int fd, zero = 0;
+
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
+ if (fd < 0) {
+ err |= 1;
+ pthread_barrier_wait(&ctx->barrier);
+ continue;
+ }
+
+ /* Remove the old inner map */
+ if (bpf_map_update_elem(ctx->outer_map_fd, &zero, &fd, 0) < 0)
+ err |= 2;
+ close(fd);
+ pthread_barrier_wait(&ctx->barrier);
+ }
+
+ ctx->err = err;
+
+ return NULL;
+}
+
+static void *access_map_fn(void *data)
+{
+ struct thread_ctx *ctx = data;
+ int loop = ctx->loop;
+
+ if (wait_for_start_or_abort(ctx) < 0)
+ return NULL;
+ pthread_barrier_wait(&ctx->barrier);
+
+ while (loop-- > 0) {
+ /* Access the old inner map */
+ syscall(SYS_getpgid);
+ pthread_barrier_wait(&ctx->barrier);
+ }
+
+ return NULL;
+}
+
+static void test_map_in_map_access(const char *prog_name, const char *map_name)
+{
+ struct access_map_in_map *skel;
+ struct bpf_map *outer_map;
+ struct bpf_program *prog;
+ struct thread_ctx ctx;
+ pthread_t tid[2];
+ int err;
+
+ skel = access_map_in_map__open();
+ if (!ASSERT_OK_PTR(skel, "access_map_in_map open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "find program"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+
+ outer_map = bpf_object__find_map_by_name(skel->obj, map_name);
+ if (!ASSERT_OK_PTR(outer_map, "find map"))
+ goto out;
+
+ err = access_map_in_map__load(skel);
+ if (!ASSERT_OK(err, "access_map_in_map load"))
+ goto out;
+
+ err = access_map_in_map__attach(skel);
+ if (!ASSERT_OK(err, "access_map_in_map attach"))
+ goto out;
+
+ skel->bss->tgid = getpid();
+
+ memset(&ctx, 0, sizeof(ctx));
+ pthread_barrier_init(&ctx.barrier, NULL, 2);
+ ctx.outer_map_fd = bpf_map__fd(outer_map);
+ ctx.loop = 4;
+
+ err = pthread_create(&tid[0], NULL, update_map_fn, &ctx);
+ if (!ASSERT_OK(err, "close_thread"))
+ goto out;
+
+ err = pthread_create(&tid[1], NULL, access_map_fn, &ctx);
+ if (!ASSERT_OK(err, "read_thread")) {
+ ctx.abort = 1;
+ pthread_join(tid[0], NULL);
+ goto out;
+ }
+
+ ctx.start = 1;
+ pthread_join(tid[0], NULL);
+ pthread_join(tid[1], NULL);
+
+ ASSERT_OK(ctx.err, "err");
+out:
+ access_map_in_map__destroy(skel);
+}
+
+static void add_del_fd_htab(int outer_fd)
+{
+ int inner_fd, err;
+ int key = 1;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner1"))
+ return;
+ err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "add"))
+ return;
+
+ /* Delete */
+ err = bpf_map_delete_elem(outer_fd, &key);
+ ASSERT_OK(err, "del");
+}
+
+static void overwrite_fd_htab(int outer_fd)
+{
+ int inner_fd, err;
+ int key = 1;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner1"))
+ return;
+ err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "add"))
+ return;
+
+ /* Overwrite */
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner2"))
+ goto out;
+ err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_EXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "overwrite"))
+ goto out;
+
+ err = bpf_map_delete_elem(outer_fd, &key);
+ ASSERT_OK(err, "del");
+ return;
+out:
+ bpf_map_delete_elem(outer_fd, &key);
+}
+
+static void lookup_delete_fd_htab(int outer_fd)
+{
+ int key = 1, value;
+ int inner_fd, err;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner1"))
+ return;
+ err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "add"))
+ return;
+
+ /* lookup_and_delete is not supported for htab of maps */
+ err = bpf_map_lookup_and_delete_elem(outer_fd, &key, &value);
+ ASSERT_EQ(err, -ENOTSUPP, "lookup_del");
+
+ err = bpf_map_delete_elem(outer_fd, &key);
+ ASSERT_OK(err, "del");
+}
+
+static void batched_lookup_delete_fd_htab(int outer_fd)
+{
+ int keys[2] = {1, 2}, values[2];
+ unsigned int cnt, batch;
+ int inner_fd, err;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner1"))
+ return;
+
+ err = bpf_map_update_elem(outer_fd, &keys[0], &inner_fd, BPF_NOEXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "add1"))
+ return;
+
+ inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL);
+ if (!ASSERT_OK_FD(inner_fd, "inner2"))
+ goto out;
+ err = bpf_map_update_elem(outer_fd, &keys[1], &inner_fd, BPF_NOEXIST);
+ close(inner_fd);
+ if (!ASSERT_OK(err, "add2"))
+ goto out;
+
+ /* batched lookup_and_delete */
+ cnt = ARRAY_SIZE(keys);
+ err = bpf_map_lookup_and_delete_batch(outer_fd, NULL, &batch, keys, values, &cnt, NULL);
+ ASSERT_TRUE((!err || err == -ENOENT), "delete_batch ret");
+ ASSERT_EQ(cnt, ARRAY_SIZE(keys), "delete_batch cnt");
+
+out:
+ bpf_map_delete_elem(outer_fd, &keys[0]);
+}
+
+static void test_update_map_in_htab(bool preallocate)
+{
+ struct update_map_in_htab *skel;
+ int err, fd;
+
+ skel = update_map_in_htab__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ err = update_map_in_htab__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto out;
+
+ fd = preallocate ? bpf_map__fd(skel->maps.outer_htab_map) :
+ bpf_map__fd(skel->maps.outer_alloc_htab_map);
+
+ add_del_fd_htab(fd);
+ overwrite_fd_htab(fd);
+ lookup_delete_fd_htab(fd);
+ batched_lookup_delete_fd_htab(fd);
+out:
+ update_map_in_htab__destroy(skel);
+}
+
+void test_map_in_map(void)
+{
+ if (test__start_subtest("acc_map_in_array"))
+ test_map_in_map_access("access_map_in_array", "outer_array_map");
+ if (test__start_subtest("sleepable_acc_map_in_array"))
+ test_map_in_map_access("sleepable_access_map_in_array", "outer_array_map");
+ if (test__start_subtest("acc_map_in_htab"))
+ test_map_in_map_access("access_map_in_htab", "outer_htab_map");
+ if (test__start_subtest("sleepable_acc_map_in_htab"))
+ test_map_in_map_access("sleepable_access_map_in_htab", "outer_htab_map");
+ if (test__start_subtest("update_map_in_htab"))
+ test_update_map_in_htab(true);
+ if (test__start_subtest("update_map_in_alloc_htab"))
+ test_update_map_in_htab(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
new file mode 100644
index 000000000000..8743df599567
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "map_kptr.skel.h"
+#include "map_kptr_fail.skel.h"
+#include "rcu_tasks_trace_gp.skel.h"
+
+static void test_map_kptr_success(bool test_run)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, lopts);
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ int key = 0, ret, cpu;
+ struct map_kptr *skel;
+ char buf[16], *pbuf;
+
+ skel = map_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref1 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval");
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts);
+ ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount");
+ ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts);
+ ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount");
+ ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval");
+
+ if (test_run)
+ goto exit;
+
+ cpu = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
+ goto exit;
+
+ pbuf = calloc(cpu, sizeof(buf));
+ if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)"))
+ goto exit;
+
+ ret = bpf_map__update_elem(skel->maps.array_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "array_map update");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__update_elem(skel->maps.pcpu_array_map,
+ &key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
+ ASSERT_OK(ret, "pcpu_array_map update");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "hash_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "pcpu_hash_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "hash_malloc_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "pcpu_hash_malloc_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "lru_hash_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "lru_pcpu_hash_map delete");
+ skel->data->ref--;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts);
+ ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete");
+ skel->data->ref--;
+ ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval");
+
+ free(pbuf);
+exit:
+ map_kptr__destroy(skel);
+}
+
+static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
+{
+ long gp_seq = READ_ONCE(rcu->bss->gp_seq);
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
+ &opts), "do_call_rcu_tasks_trace"))
+ return -EFAULT;
+ if (!ASSERT_OK(opts.retval, "opts.retval == 0"))
+ return -EFAULT;
+ while (gp_seq == READ_ONCE(rcu->bss->gp_seq))
+ sched_yield();
+ return 0;
+}
+
+void serial_test_map_kptr(void)
+{
+ struct rcu_tasks_trace_gp *skel;
+
+ RUN_TESTS(map_kptr_fail);
+
+ skel = rcu_tasks_trace_gp__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
+ return;
+ if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach"))
+ goto end;
+
+ if (test__start_subtest("success-map")) {
+ test_map_kptr_success(true);
+
+ ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
+ ASSERT_OK(kern_sync_rcu(), "sync rcu");
+ /* Observe refcount dropping to 1 on bpf_map_free_deferred */
+ test_map_kptr_success(false);
+
+ ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
+ ASSERT_OK(kern_sync_rcu(), "sync rcu");
+ /* Observe refcount dropping to 1 on synchronous delete elem */
+ test_map_kptr_success(true);
+ }
+
+end:
+ rcu_tasks_trace_gp__destroy(skel);
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
index ce17b1ed8709..1d6726f01dd2 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -4,14 +4,17 @@
static void *spin_lock_thread(void *arg)
{
- __u32 duration, retval;
int err, prog_fd = *(u32 *) arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 10000,
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run_opts err");
+ ASSERT_OK(topts.retval, "test_run_opts retval");
- err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
pthread_exit(arg);
}
@@ -46,16 +49,16 @@ out:
void test_map_lock(void)
{
- const char *file = "./test_map_lock.o";
+ const char *file = "./test_map_lock.bpf.o";
int prog_fd, map_fd[2], vars[17] = {};
pthread_t thread_id[6];
struct bpf_object *obj = NULL;
int err = 0, key = 0, i;
void *ret;
- err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK_FAIL(err)) {
- printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+ printf("test_map_lock:bpf_prog_test_load errno %d\n", errno);
goto close_prog;
}
map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
new file mode 100644
index 000000000000..bfb1bf3fd427
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include <test_progs.h>
+#include "test_map_lookup_percpu_elem.skel.h"
+
+void test_map_lookup_percpu_elem(void)
+{
+ struct test_map_lookup_percpu_elem *skel;
+ __u64 key = 0, sum;
+ int ret, i, nr_cpus = libbpf_num_possible_cpus();
+ __u64 *buf;
+
+ buf = malloc(nr_cpus*sizeof(__u64));
+ if (!ASSERT_OK_PTR(buf, "malloc"))
+ return;
+
+ for (i = 0; i < nr_cpus; i++)
+ buf[i] = i;
+ sum = (nr_cpus - 1) * nr_cpus / 2;
+
+ skel = test_map_lookup_percpu_elem__open();
+ if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
+ goto exit;
+
+ skel->rodata->my_pid = getpid();
+ skel->rodata->nr_cpus = nr_cpus;
+
+ ret = test_map_lookup_percpu_elem__load(skel);
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
+ goto cleanup;
+
+ ret = test_map_lookup_percpu_elem__attach(skel);
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
+ goto cleanup;
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_array_map update");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_hash_map update");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_lru_hash_map update");
+
+ syscall(__NR_getuid);
+
+ test_map_lookup_percpu_elem__detach(skel);
+
+ ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
+
+cleanup:
+ test_map_lookup_percpu_elem__destroy(skel);
+exit:
+ free(buf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_ops.c b/tools/testing/selftests/bpf/prog_tests/map_ops.c
new file mode 100644
index 000000000000..be5e42a413b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_ops.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "test_map_ops.skel.h"
+#include "test_progs.h"
+
+static void map_update(void)
+{
+ (void)syscall(__NR_getpid);
+}
+
+static void map_delete(void)
+{
+ (void)syscall(__NR_getppid);
+}
+
+static void map_push(void)
+{
+ (void)syscall(__NR_getuid);
+}
+
+static void map_pop(void)
+{
+ (void)syscall(__NR_geteuid);
+}
+
+static void map_peek(void)
+{
+ (void)syscall(__NR_getgid);
+}
+
+static void map_for_each_pass(void)
+{
+ (void)syscall(__NR_gettid);
+}
+
+static void map_for_each_fail(void)
+{
+ (void)syscall(__NR_getpgid);
+}
+
+static int setup(struct test_map_ops **skel)
+{
+ int err = 0;
+
+ if (!skel)
+ return -1;
+
+ *skel = test_map_ops__open();
+ if (!ASSERT_OK_PTR(*skel, "test_map_ops__open"))
+ return -1;
+
+ (*skel)->rodata->pid = getpid();
+
+ err = test_map_ops__load(*skel);
+ if (!ASSERT_OK(err, "test_map_ops__load"))
+ return err;
+
+ err = test_map_ops__attach(*skel);
+ if (!ASSERT_OK(err, "test_map_ops__attach"))
+ return err;
+
+ return err;
+}
+
+static void teardown(struct test_map_ops **skel)
+{
+ if (skel && *skel)
+ test_map_ops__destroy(*skel);
+}
+
+static void map_ops_update_delete_subtest(void)
+{
+ struct test_map_ops *skel;
+
+ if (setup(&skel))
+ goto teardown;
+
+ map_update();
+ ASSERT_OK(skel->bss->err, "map_update_initial");
+
+ map_update();
+ ASSERT_LT(skel->bss->err, 0, "map_update_existing");
+ ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing");
+
+ map_delete();
+ ASSERT_OK(skel->bss->err, "map_delete_existing");
+
+ map_delete();
+ ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing");
+ ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing");
+
+teardown:
+ teardown(&skel);
+}
+
+static void map_ops_push_peek_pop_subtest(void)
+{
+ struct test_map_ops *skel;
+
+ if (setup(&skel))
+ goto teardown;
+
+ map_push();
+ ASSERT_OK(skel->bss->err, "map_push_initial");
+
+ map_push();
+ ASSERT_LT(skel->bss->err, 0, "map_push_when_full");
+ ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full");
+
+ map_peek();
+ ASSERT_OK(skel->bss->err, "map_peek");
+
+ map_pop();
+ ASSERT_OK(skel->bss->err, "map_pop");
+
+ map_peek();
+ ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty");
+ ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty");
+
+ map_pop();
+ ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty");
+ ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty");
+
+teardown:
+ teardown(&skel);
+}
+
+static void map_ops_for_each_subtest(void)
+{
+ struct test_map_ops *skel;
+
+ if (setup(&skel))
+ goto teardown;
+
+ map_for_each_pass();
+ /* expect to iterate over 1 element */
+ ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags");
+
+ map_for_each_fail();
+ ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags");
+ ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags");
+
+teardown:
+ teardown(&skel);
+}
+
+void test_map_ops(void)
+{
+ if (test__start_subtest("map_ops_update_delete"))
+ map_ops_update_delete_subtest();
+
+ if (test__start_subtest("map_ops_push_peek_pop"))
+ map_ops_push_peek_pop_subtest();
+
+ if (test__start_subtest("map_ops_for_each"))
+ map_ops_for_each_subtest();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c
index c230a573c373..43e502acf050 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c
@@ -4,29 +4,42 @@
#include <test_progs.h>
#include <network_helpers.h>
-#include "map_ptr_kern.skel.h"
+#include "map_ptr_kern.lskel.h"
void test_map_ptr(void)
{
- struct map_ptr_kern *skel;
- __u32 duration = 0, retval;
+ struct map_ptr_kern_lskel *skel;
char buf[128];
int err;
-
- skel = map_ptr_kern__open_and_load();
- if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
+ int page_size = getpagesize();
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
+
+ skel = map_ptr_kern_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
- err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
- sizeof(pkt_v4), buf, NULL, &retval, NULL);
+ skel->maps.m_ringbuf.max_entries = page_size;
+
+ err = map_ptr_kern_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ skel->bss->page_size = page_size;
+
+ err = bpf_prog_test_run_opts(skel->progs.cg_skb.prog_fd, &topts);
- if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno))
+ if (!ASSERT_OK(err, "test_run"))
goto cleanup;
- if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval,
- skel->bss->g_map_type, skel->bss->g_line))
+ if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
goto cleanup;
cleanup:
- map_ptr_kern__destroy(skel);
+ map_ptr_kern_lskel__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c
new file mode 100644
index 000000000000..40d4f687bd9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+#include "mem_rdonly_untrusted.skel.h"
+
+void test_mem_rdonly_untrusted(void)
+{
+ RUN_TESTS(mem_rdonly_untrusted);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c
index 2c53eade88e3..8b67dfc10f5c 100644
--- a/tools/testing/selftests/bpf/prog_tests/metadata.c
+++ b/tools/testing/selftests/bpf/prog_tests/metadata.c
@@ -16,7 +16,7 @@ static int duration;
static int prog_holds_map(int prog_fd, int map_fd)
{
struct bpf_prog_info prog_info = {};
- struct bpf_prog_info map_info = {};
+ struct bpf_map_info map_info = {};
__u32 prog_info_len;
__u32 map_info_len;
__u32 *map_ids;
@@ -25,12 +25,12 @@ static int prog_holds_map(int prog_fd, int map_fd)
int i;
map_info_len = sizeof(map_info);
- ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (ret)
return -errno;
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return -errno;
@@ -44,7 +44,7 @@ static int prog_holds_map(int prog_fd, int map_fd)
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
- ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret) {
ret = -errno;
goto free_map_ids;
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
new file mode 100644
index 000000000000..653b0a20fab9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check if we can migrate child sockets.
+ *
+ * 1. call listen() for 4 server sockets.
+ * 2. call connect() for 25 client sockets.
+ * 3. call listen() for 1 server socket. (migration target)
+ * 4. update a map to migrate all child sockets
+ * to the last server socket (migrate_map[cookie] = 4)
+ * 5. call shutdown() for first 4 server sockets
+ * and migrate the requests in the accept queue
+ * to the last server socket.
+ * 6. call listen() for the second server socket.
+ * 7. call shutdown() for the last server
+ * and migrate the requests in the accept queue
+ * to the second server socket.
+ * 8. call listen() for the last server.
+ * 9. call shutdown() for the second server
+ * and migrate the requests in the accept queue
+ * to the last server socket.
+ * 10. call accept() for the last server socket.
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "test_progs.h"
+#include "test_migrate_reuseport.skel.h"
+#include "network_helpers.h"
+
+#ifndef TCP_FASTOPEN_CONNECT
+#define TCP_FASTOPEN_CONNECT 30
+#endif
+
+#define IFINDEX_LO 1
+
+#define NR_SERVERS 5
+#define NR_CLIENTS (NR_SERVERS * 5)
+#define MIGRATED_TO (NR_SERVERS - 1)
+
+/* fastopenq->max_qlen and sk->sk_max_ack_backlog */
+#define QLEN (NR_CLIENTS * 5)
+
+#define MSG "Hello World\0"
+#define MSGLEN 12
+
+static struct migrate_reuseport_test_case {
+ const char *name;
+ __s64 servers[NR_SERVERS];
+ __s64 clients[NR_CLIENTS];
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ int family;
+ int state;
+ bool drop_ack;
+ bool expire_synack_timer;
+ bool fastopen;
+ struct bpf_link *link;
+} test_cases[] = {
+ {
+ .name = "IPv4 TCP_ESTABLISHED inet_csk_listen_stop",
+ .family = AF_INET,
+ .state = BPF_TCP_ESTABLISHED,
+ .drop_ack = false,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv4 TCP_SYN_RECV inet_csk_listen_stop",
+ .family = AF_INET,
+ .state = BPF_TCP_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = true,
+ },
+ {
+ .name = "IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler",
+ .family = AF_INET,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = true,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv4 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
+ .family = AF_INET,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_ESTABLISHED inet_csk_listen_stop",
+ .family = AF_INET6,
+ .state = BPF_TCP_ESTABLISHED,
+ .drop_ack = false,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_SYN_RECV inet_csk_listen_stop",
+ .family = AF_INET6,
+ .state = BPF_TCP_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = true,
+ },
+ {
+ .name = "IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler",
+ .family = AF_INET6,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = true,
+ .fastopen = false,
+ },
+ {
+ .name = "IPv6 TCP_NEW_SYN_RECV inet_csk_complete_hashdance",
+ .family = AF_INET6,
+ .state = BPF_TCP_NEW_SYN_RECV,
+ .drop_ack = true,
+ .expire_synack_timer = false,
+ .fastopen = false,
+ }
+};
+
+static void init_fds(__s64 fds[], int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ fds[i] = -1;
+}
+
+static void close_fds(__s64 fds[], int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (fds[i] != -1) {
+ close(fds[i]);
+ fds[i] = -1;
+ }
+ }
+}
+
+static int setup_fastopen(char *buf, int size, int *saved_len, bool restore)
+{
+ int err = 0, fd, len;
+
+ fd = open("/proc/sys/net/ipv4/tcp_fastopen", O_RDWR);
+ if (!ASSERT_NEQ(fd, -1, "open"))
+ return -1;
+
+ if (restore) {
+ len = write(fd, buf, *saved_len);
+ if (!ASSERT_EQ(len, *saved_len, "write - restore"))
+ err = -1;
+ } else {
+ *saved_len = read(fd, buf, size);
+ if (!ASSERT_GE(*saved_len, 1, "read")) {
+ err = -1;
+ goto close;
+ }
+
+ err = lseek(fd, 0, SEEK_SET);
+ if (!ASSERT_OK(err, "lseek"))
+ goto close;
+
+ /* (TFO_CLIENT_ENABLE | TFO_SERVER_ENABLE |
+ * TFO_CLIENT_NO_COOKIE | TFO_SERVER_COOKIE_NOT_REQD)
+ */
+ len = write(fd, "519", 3);
+ if (!ASSERT_EQ(len, 3, "write - setup"))
+ err = -1;
+ }
+
+close:
+ close(fd);
+
+ return err;
+}
+
+static int drop_ack(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ if (test_case->family == AF_INET)
+ skel->bss->server_port = ((struct sockaddr_in *)
+ &test_case->addr)->sin_port;
+ else
+ skel->bss->server_port = ((struct sockaddr_in6 *)
+ &test_case->addr)->sin6_port;
+
+ test_case->link = bpf_program__attach_xdp(skel->progs.drop_ack,
+ IFINDEX_LO);
+ if (!ASSERT_OK_PTR(test_case->link, "bpf_program__attach_xdp"))
+ return -1;
+
+ return 0;
+}
+
+static int pass_ack(struct migrate_reuseport_test_case *test_case)
+{
+ int err;
+
+ err = bpf_link__destroy(test_case->link);
+ if (!ASSERT_OK(err, "bpf_link__destroy"))
+ return -1;
+
+ test_case->link = NULL;
+
+ return 0;
+}
+
+static int start_servers(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int i, err, prog_fd, reuseport = 1, qlen = QLEN;
+
+ prog_fd = bpf_program__fd(skel->progs.migrate_reuseport);
+
+ make_sockaddr(test_case->family,
+ test_case->family == AF_INET ? "127.0.0.1" : "::1", 0,
+ &test_case->addr, &test_case->addrlen);
+
+ for (i = 0; i < NR_SERVERS; i++) {
+ test_case->servers[i] = socket(test_case->family, SOCK_STREAM,
+ IPPROTO_TCP);
+ if (!ASSERT_NEQ(test_case->servers[i], -1, "socket"))
+ return -1;
+
+ err = setsockopt(test_case->servers[i], SOL_SOCKET,
+ SO_REUSEPORT, &reuseport, sizeof(reuseport));
+ if (!ASSERT_OK(err, "setsockopt - SO_REUSEPORT"))
+ return -1;
+
+ err = bind(test_case->servers[i],
+ (struct sockaddr *)&test_case->addr,
+ test_case->addrlen);
+ if (!ASSERT_OK(err, "bind"))
+ return -1;
+
+ if (i == 0) {
+ err = setsockopt(test_case->servers[i], SOL_SOCKET,
+ SO_ATTACH_REUSEPORT_EBPF,
+ &prog_fd, sizeof(prog_fd));
+ if (!ASSERT_OK(err,
+ "setsockopt - SO_ATTACH_REUSEPORT_EBPF"))
+ return -1;
+
+ err = getsockname(test_case->servers[i],
+ (struct sockaddr *)&test_case->addr,
+ &test_case->addrlen);
+ if (!ASSERT_OK(err, "getsockname"))
+ return -1;
+ }
+
+ if (test_case->fastopen) {
+ err = setsockopt(test_case->servers[i],
+ SOL_TCP, TCP_FASTOPEN,
+ &qlen, sizeof(qlen));
+ if (!ASSERT_OK(err, "setsockopt - TCP_FASTOPEN"))
+ return -1;
+ }
+
+ /* All requests will be tied to the first four listeners */
+ if (i != MIGRATED_TO) {
+ err = listen(test_case->servers[i], qlen);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_clients(struct migrate_reuseport_test_case *test_case)
+{
+ char buf[MSGLEN] = MSG;
+ int i, err;
+
+ for (i = 0; i < NR_CLIENTS; i++) {
+ test_case->clients[i] = socket(test_case->family, SOCK_STREAM,
+ IPPROTO_TCP);
+ if (!ASSERT_NEQ(test_case->clients[i], -1, "socket"))
+ return -1;
+
+ /* The attached XDP program drops only the final ACK, so
+ * clients will transition to TCP_ESTABLISHED immediately.
+ */
+ err = settimeo(test_case->clients[i], 100);
+ if (!ASSERT_OK(err, "settimeo"))
+ return -1;
+
+ if (test_case->fastopen) {
+ int fastopen = 1;
+
+ err = setsockopt(test_case->clients[i], IPPROTO_TCP,
+ TCP_FASTOPEN_CONNECT, &fastopen,
+ sizeof(fastopen));
+ if (!ASSERT_OK(err,
+ "setsockopt - TCP_FASTOPEN_CONNECT"))
+ return -1;
+ }
+
+ err = connect(test_case->clients[i],
+ (struct sockaddr *)&test_case->addr,
+ test_case->addrlen);
+ if (!ASSERT_OK(err, "connect"))
+ return -1;
+
+ err = write(test_case->clients[i], buf, MSGLEN);
+ if (!ASSERT_EQ(err, MSGLEN, "write"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int update_maps(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int i, err, migrated_to = MIGRATED_TO;
+ int reuseport_map_fd, migrate_map_fd;
+ __u64 value;
+
+ reuseport_map_fd = bpf_map__fd(skel->maps.reuseport_map);
+ migrate_map_fd = bpf_map__fd(skel->maps.migrate_map);
+
+ for (i = 0; i < NR_SERVERS; i++) {
+ value = (__u64)test_case->servers[i];
+ err = bpf_map_update_elem(reuseport_map_fd, &i, &value,
+ BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem - reuseport_map"))
+ return -1;
+
+ err = bpf_map_lookup_elem(reuseport_map_fd, &i, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem - reuseport_map"))
+ return -1;
+
+ err = bpf_map_update_elem(migrate_map_fd, &value, &migrated_to,
+ BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem - migrate_map"))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int migrate_dance(struct migrate_reuseport_test_case *test_case)
+{
+ int i, err;
+
+ /* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests
+ * to the last listener based on eBPF.
+ */
+ for (i = 0; i < MIGRATED_TO; i++) {
+ err = shutdown(test_case->servers[i], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+ }
+
+ /* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */
+ if (test_case->state == BPF_TCP_NEW_SYN_RECV)
+ return 0;
+
+ /* Note that we use the second listener instead of the
+ * first one here.
+ *
+ * The fist listener is bind()ed with port 0 and,
+ * SOCK_BINDPORT_LOCK is not set to sk_userlocks, so
+ * calling listen() again will bind() the first listener
+ * on a new ephemeral port and detach it from the existing
+ * reuseport group. (See: __inet_bind(), tcp_set_state())
+ *
+ * OTOH, the second one is bind()ed with a specific port,
+ * and SOCK_BINDPORT_LOCK is set. Thus, re-listen() will
+ * resurrect the listener on the existing reuseport group.
+ */
+ err = listen(test_case->servers[1], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+
+ /* Migrate from the last listener to the second one.
+ *
+ * All listeners were detached out of the reuseport_map,
+ * so migration will be done by kernel random pick from here.
+ */
+ err = shutdown(test_case->servers[MIGRATED_TO], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+
+ /* Back to the existing reuseport group */
+ err = listen(test_case->servers[MIGRATED_TO], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ return -1;
+
+ /* Migrate back to the last one from the second one */
+ err = shutdown(test_case->servers[1], SHUT_RDWR);
+ if (!ASSERT_OK(err, "shutdown"))
+ return -1;
+
+ return 0;
+}
+
+static void count_requests(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = sizeof(addr);
+ int err, cnt = 0, client;
+ char buf[MSGLEN];
+
+ err = settimeo(test_case->servers[MIGRATED_TO], 4000);
+ if (!ASSERT_OK(err, "settimeo"))
+ goto out;
+
+ for (; cnt < NR_CLIENTS; cnt++) {
+ client = accept(test_case->servers[MIGRATED_TO],
+ (struct sockaddr *)&addr, &len);
+ if (!ASSERT_NEQ(client, -1, "accept"))
+ goto out;
+
+ memset(buf, 0, MSGLEN);
+ read(client, &buf, MSGLEN);
+ close(client);
+
+ if (!ASSERT_STREQ(buf, MSG, "read"))
+ goto out;
+ }
+
+out:
+ ASSERT_EQ(cnt, NR_CLIENTS, "count in userspace");
+
+ switch (test_case->state) {
+ case BPF_TCP_ESTABLISHED:
+ cnt = skel->bss->migrated_at_close;
+ break;
+ case BPF_TCP_SYN_RECV:
+ cnt = skel->bss->migrated_at_close_fastopen;
+ break;
+ case BPF_TCP_NEW_SYN_RECV:
+ if (test_case->expire_synack_timer)
+ cnt = skel->bss->migrated_at_send_synack;
+ else
+ cnt = skel->bss->migrated_at_recv_ack;
+ break;
+ default:
+ cnt = 0;
+ }
+
+ ASSERT_EQ(cnt, NR_CLIENTS, "count in BPF prog");
+}
+
+static void run_test(struct migrate_reuseport_test_case *test_case,
+ struct test_migrate_reuseport *skel)
+{
+ int err, saved_len;
+ char buf[16];
+
+ skel->bss->migrated_at_close = 0;
+ skel->bss->migrated_at_close_fastopen = 0;
+ skel->bss->migrated_at_send_synack = 0;
+ skel->bss->migrated_at_recv_ack = 0;
+
+ init_fds(test_case->servers, NR_SERVERS);
+ init_fds(test_case->clients, NR_CLIENTS);
+
+ if (test_case->fastopen) {
+ memset(buf, 0, sizeof(buf));
+
+ err = setup_fastopen(buf, sizeof(buf), &saved_len, false);
+ if (!ASSERT_OK(err, "setup_fastopen - setup"))
+ return;
+ }
+
+ err = start_servers(test_case, skel);
+ if (!ASSERT_OK(err, "start_servers"))
+ goto close_servers;
+
+ if (test_case->drop_ack) {
+ /* Drop the final ACK of the 3-way handshake and stick the
+ * in-flight requests on TCP_SYN_RECV or TCP_NEW_SYN_RECV.
+ */
+ err = drop_ack(test_case, skel);
+ if (!ASSERT_OK(err, "drop_ack"))
+ goto close_servers;
+ }
+
+ /* Tie requests to the first four listeners */
+ err = start_clients(test_case);
+ if (!ASSERT_OK(err, "start_clients"))
+ goto close_clients;
+
+ err = listen(test_case->servers[MIGRATED_TO], QLEN);
+ if (!ASSERT_OK(err, "listen"))
+ goto close_clients;
+
+ err = update_maps(test_case, skel);
+ if (!ASSERT_OK(err, "fill_maps"))
+ goto close_clients;
+
+ /* Migrate the requests in the accept queue only.
+ * TCP_NEW_SYN_RECV requests are not migrated at this point.
+ */
+ err = migrate_dance(test_case);
+ if (!ASSERT_OK(err, "migrate_dance"))
+ goto close_clients;
+
+ if (test_case->expire_synack_timer) {
+ /* Wait for SYN+ACK timers to expire so that
+ * reqsk_timer_handler() migrates TCP_NEW_SYN_RECV requests.
+ */
+ sleep(1);
+ }
+
+ if (test_case->link) {
+ /* Resume 3WHS and migrate TCP_NEW_SYN_RECV requests */
+ err = pass_ack(test_case);
+ if (!ASSERT_OK(err, "pass_ack"))
+ goto close_clients;
+ }
+
+ count_requests(test_case, skel);
+
+close_clients:
+ close_fds(test_case->clients, NR_CLIENTS);
+
+ if (test_case->link) {
+ err = pass_ack(test_case);
+ ASSERT_OK(err, "pass_ack - clean up");
+ }
+
+close_servers:
+ close_fds(test_case->servers, NR_SERVERS);
+
+ if (test_case->fastopen) {
+ err = setup_fastopen(buf, sizeof(buf), &saved_len, true);
+ ASSERT_OK(err, "setup_fastopen - restore");
+ }
+}
+
+void serial_test_migrate_reuseport(void)
+{
+ struct test_migrate_reuseport *skel;
+ int i;
+
+ skel = test_migrate_reuseport__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ test__start_subtest(test_cases[i].name);
+ run_test(&test_cases[i], skel);
+ }
+
+ test_migrate_reuseport__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c
new file mode 100644
index 000000000000..ed8857ae914a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/missed.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "missed_kprobe.skel.h"
+#include "missed_kprobe_recursion.skel.h"
+#include "missed_tp_recursion.skel.h"
+
+/*
+ * Putting kprobe on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc, which has also kprobe on. The latter won't get triggered due
+ * to kprobe recursion check and kprobe missed counter is incremented.
+ */
+static void test_missed_perf_kprobe(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_link_info info = {};
+ struct missed_kprobe *skel;
+ __u32 len = sizeof(info);
+ int err, prog_fd;
+
+ skel = missed_kprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ err = bpf_link_get_info_by_fd(bpf_link__fd(skel->links.test2), &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ goto cleanup;
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "info.type");
+ ASSERT_EQ(info.perf_event.type, BPF_PERF_EVENT_KPROBE, "info.perf_event.type");
+ ASSERT_EQ(info.perf_event.kprobe.missed, 1, "info.perf_event.kprobe.missed");
+
+cleanup:
+ missed_kprobe__destroy(skel);
+}
+
+static __u64 get_missed_count(int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ return (__u64) -1;
+ return info.recursion_misses;
+}
+
+/*
+ * Putting kprobe.multi on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc which has 3 perf event kprobes and 1 kprobe.multi attached.
+ *
+ * Because fprobe (kprobe.multi attach layear) does not have strict recursion
+ * check the kprobe's bpf_prog_active check is hit for test2-5.
+ */
+static void test_missed_kprobe_recursion(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct missed_kprobe_recursion *skel;
+ int err, prog_fd;
+
+ skel = missed_kprobe_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe_recursion__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe_recursion__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe_recursion__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
+ ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test6)), 1, "test6_recursion_misses");
+
+cleanup:
+ missed_kprobe_recursion__destroy(skel);
+}
+
+/*
+ * Putting kprobe on bpf_fentry_test1 that calls bpf_printk and invokes
+ * bpf_trace_printk tracepoint. The bpf_trace_printk tracepoint has test[234]
+ * programs attached to it.
+ *
+ * Because kprobe execution goes through bpf_prog_active check, programs
+ * attached to the tracepoint will fail the recursion check and increment
+ * the recursion_misses stats.
+ */
+static void test_missed_tp_recursion(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct missed_tp_recursion *skel;
+ int err, prog_fd;
+
+ skel = missed_tp_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_tp_recursion__open_and_load"))
+ goto cleanup;
+
+ err = missed_tp_recursion__attach(skel);
+ if (!ASSERT_OK(err, "missed_tp_recursion__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
+ ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
+
+cleanup:
+ missed_tp_recursion__destroy(skel);
+}
+
+void test_missed(void)
+{
+ if (test__start_subtest("perf_kprobe"))
+ test_missed_perf_kprobe();
+ if (test__start_subtest("kprobe_recursion"))
+ test_missed_kprobe_recursion();
+ if (test__start_subtest("tp_recursion"))
+ test_missed_tp_recursion();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
index 9c3c5c0f068f..a271d5a0f7ab 100644
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -29,28 +29,42 @@ void test_mmap(void)
struct test_mmap *skel;
__u64 val = 0;
- skel = test_mmap__open_and_load();
- if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
+ skel = test_mmap__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ /* at least 4 pages of data */
+ err = bpf_map__set_max_entries(skel->maps.data_map,
+ 4 * (page_size / sizeof(u64)));
+ if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
+ goto cleanup;
+
+ err = test_mmap__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
bss_map = skel->maps.bss;
data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
- tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
+ tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
- munmap(tmp1, 4096);
+ munmap(tmp1, page_size);
goto cleanup;
}
/* now double-check if it's mmap()'able at all */
- tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
+ tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
goto cleanup;
/* get map's ID */
memset(&map_info, 0, map_info_sz);
- err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
+ err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
if (CHECK(err, "map_get_info", "failed %d\n", errno))
goto cleanup;
data_map_id = map_info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c
index 97fec70c600b..a70c99c2f8c8 100644
--- a/tools/testing/selftests/bpf/prog_tests/modify_return.c
+++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c
@@ -15,51 +15,47 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)
{
struct modify_return *skel = NULL;
int err, prog_fd;
- __u32 duration = 0, retval;
__u16 side_effect;
__s16 ret;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
skel = modify_return__open_and_load();
- if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
err = modify_return__attach(skel);
- if (CHECK(err, "modify_return", "attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "modify_return__attach failed"))
goto cleanup;
skel->bss->input_retval = input_retval;
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0,
- &retval, &duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
- CHECK(err, "test_run", "err %d errno %d\n", err, errno);
+ side_effect = UPPER(topts.retval);
+ ret = LOWER(topts.retval);
- side_effect = UPPER(retval);
- ret = LOWER(retval);
+ ASSERT_EQ(ret, want_ret, "test_run ret");
+ ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect");
+ ASSERT_EQ(skel->bss->fentry_result, 1, "modify_return fentry_result");
+ ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result");
+ ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result");
- CHECK(ret != want_ret, "test_run",
- "unexpected ret: %d, expected: %d\n", ret, want_ret);
- CHECK(side_effect != want_side_effect, "modify_return",
- "unexpected side_effect: %d\n", side_effect);
-
- CHECK(skel->bss->fentry_result != 1, "modify_return",
- "fentry failed\n");
- CHECK(skel->bss->fexit_result != 1, "modify_return",
- "fexit failed\n");
- CHECK(skel->bss->fmod_ret_result != 1, "modify_return",
- "fmod_ret failed\n");
+ ASSERT_EQ(skel->bss->fentry_result2, 1, "modify_return fentry_result2");
+ ASSERT_EQ(skel->bss->fexit_result2, 1, "modify_return fexit_result2");
+ ASSERT_EQ(skel->bss->fmod_ret_result2, 1, "modify_return fmod_ret_result2");
cleanup:
modify_return__destroy(skel);
}
-void test_modify_return(void)
+/* TODO: conflict with get_func_ip_test */
+void serial_test_modify_return(void)
{
run_test(0 /* input_retval */,
- 1 /* want_side_effect */,
- 4 /* want_ret */);
+ 2 /* want_side_effect */,
+ 33 /* want_ret */);
run_test(-EINVAL /* input_retval */,
0 /* want_side_effect */,
- -EINVAL /* want_ret */);
+ -EINVAL * 2 /* want_ret */);
}
-
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 50796b651f72..70fa7ae93173 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -2,20 +2,32 @@
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
+#include <stdbool.h>
#include "test_module_attach.skel.h"
+#include "testing_helpers.h"
static int duration;
-static int trigger_module_test_read(int read_sz)
+static int trigger_module_test_writable(int *val)
{
int fd, err;
+ char buf[65];
+ ssize_t rd;
- fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
+ fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
err = -errno;
- if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
+ if (!ASSERT_GE(fd, 0, "testmode_file_open"))
return err;
- read(fd, NULL, read_sz);
+ rd = read(fd, buf, sizeof(buf) - 1);
+ err = -errno;
+ if (!ASSERT_GT(rd, 0, "testmod_file_rd_val")) {
+ close(fd);
+ return err;
+ }
+
+ buf[rd] = '\0';
+ *val = strtol(buf, NULL, 0);
close(fd);
return 0;
@@ -24,9 +36,12 @@ static int trigger_module_test_read(int read_sz)
void test_module_attach(void)
{
const int READ_SZ = 456;
+ const int WRITE_SZ = 457;
struct test_module_attach* skel;
struct test_module_attach__bss *bss;
+ struct bpf_link *link;
int err;
+ int writable_val = 0;
skel = test_module_attach__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
@@ -36,6 +51,10 @@ void test_module_attach(void)
0, "bpf_testmod_test_read");
ASSERT_OK(err, "set_attach_target");
+ err = bpf_program__set_attach_target(skel->progs.handle_fentry_explicit_manual,
+ 0, "bpf_testmod:bpf_testmod_test_read");
+ ASSERT_OK(err, "set_attach_target_explicit");
+
err = test_module_attach__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton\n"))
return;
@@ -48,15 +67,51 @@ void test_module_attach(void)
/* trigger tracepoint */
ASSERT_OK(trigger_module_test_read(READ_SZ), "trigger_read");
+ ASSERT_OK(trigger_module_test_write(WRITE_SZ), "trigger_write");
ASSERT_EQ(bss->raw_tp_read_sz, READ_SZ, "raw_tp");
+ ASSERT_EQ(bss->raw_tp_bare_write_sz, WRITE_SZ, "raw_tp_bare");
ASSERT_EQ(bss->tp_btf_read_sz, READ_SZ, "tp_btf");
ASSERT_EQ(bss->fentry_read_sz, READ_SZ, "fentry");
ASSERT_EQ(bss->fentry_manual_read_sz, READ_SZ, "fentry_manual");
+ ASSERT_EQ(bss->fentry_explicit_read_sz, READ_SZ, "fentry_explicit");
+ ASSERT_EQ(bss->fentry_explicit_manual_read_sz, READ_SZ, "fentry_explicit_manual");
ASSERT_EQ(bss->fexit_read_sz, READ_SZ, "fexit");
ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet");
ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret");
+ bss->raw_tp_writable_bare_early_ret = true;
+ bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4;
+ ASSERT_OK(trigger_module_test_writable(&writable_val),
+ "trigger_writable");
+ ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in");
+ ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val,
+ "writable_test_out");
+
+ test_module_attach__detach(skel);
+
+ /* attach fentry/fexit and make sure it gets module reference */
+ link = bpf_program__attach(skel->progs.handle_fentry);
+ if (!ASSERT_OK_PTR(link, "attach_fentry"))
+ goto cleanup;
+
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
+ bpf_link__destroy(link);
+
+ link = bpf_program__attach(skel->progs.handle_fexit);
+ if (!ASSERT_OK_PTR(link, "attach_fexit"))
+ goto cleanup;
+
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
+ bpf_link__destroy(link);
+
+ link = bpf_program__attach(skel->progs.kprobe_multi);
+ if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
+ goto cleanup;
+
+ ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod");
+ bpf_link__destroy(link);
+
cleanup:
test_module_attach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
new file mode 100644
index 000000000000..bea05f78de5f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat */
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "bpf/libbpf_internal.h"
+#include "cgroup_helpers.h"
+#include "bpf_util.h"
+
+static const char *module_name = "bpf_testmod";
+static const char *symbol_name = "bpf_fentry_shadow_test";
+
+static int get_bpf_testmod_btf_fd(void)
+{
+ struct bpf_btf_info info;
+ char name[64];
+ __u32 id = 0, len;
+ int err, fd;
+
+ while (true) {
+ err = bpf_btf_get_next_id(id, &id);
+ if (err) {
+ log_err("failed to iterate BTF objects");
+ return err;
+ }
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue; /* expected race: BTF was unloaded */
+ err = -errno;
+ log_err("failed to get FD for BTF object #%d", id);
+ return err;
+ }
+
+ len = sizeof(info);
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ err = -errno;
+ log_err("failed to get info for BTF object #%d", id);
+ close(fd);
+ return err;
+ }
+
+ if (strcmp(name, module_name) == 0)
+ return fd;
+
+ close(fd);
+ }
+ return -ENOENT;
+}
+
+void test_module_fentry_shadow(void)
+{
+ struct btf *vmlinux_btf = NULL, *mod_btf = NULL;
+ int err, i;
+ int btf_fd[2] = {};
+ int prog_fd[2] = {};
+ int link_fd[2] = {};
+ __s32 btf_id[2] = {};
+
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ );
+
+ const struct bpf_insn trace_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ vmlinux_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux_btf"))
+ return;
+
+ btf_fd[1] = get_bpf_testmod_btf_fd();
+ if (!ASSERT_GE(btf_fd[1], 0, "get_bpf_testmod_btf_fd"))
+ goto out;
+
+ mod_btf = btf_get_from_fd(btf_fd[1], vmlinux_btf);
+ if (!ASSERT_OK_PTR(mod_btf, "btf_get_from_fd"))
+ goto out;
+
+ btf_id[0] = btf__find_by_name_kind(vmlinux_btf, symbol_name, BTF_KIND_FUNC);
+ if (!ASSERT_GT(btf_id[0], 0, "btf_find_by_name"))
+ goto out;
+
+ btf_id[1] = btf__find_by_name_kind(mod_btf, symbol_name, BTF_KIND_FUNC);
+ if (!ASSERT_GT(btf_id[1], 0, "btf_find_by_name"))
+ goto out;
+
+ for (i = 0; i < 2; i++) {
+ load_opts.attach_btf_id = btf_id[i];
+ load_opts.attach_btf_obj_fd = btf_fd[i];
+ prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
+ trace_program,
+ ARRAY_SIZE(trace_program),
+ &load_opts);
+ if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load"))
+ goto out;
+
+ /* If the verifier incorrectly resolves addresses of the
+ * shadowed functions and uses the same address for both the
+ * vmlinux and the bpf_testmod functions, this will fail on
+ * attempting to create two trampolines for the same address,
+ * which is forbidden.
+ */
+ link_fd[i] = bpf_link_create(prog_fd[i], 0, BPF_TRACE_FENTRY, NULL);
+ if (!ASSERT_GE(link_fd[i], 0, "bpf_link_create"))
+ goto out;
+ }
+
+ err = bpf_prog_test_run_opts(prog_fd[0], NULL);
+ ASSERT_OK(err, "running test");
+
+out:
+ btf__free(vmlinux_btf);
+ btf__free(mod_btf);
+ for (i = 0; i < 2; i++) {
+ if (btf_fd[i])
+ close(btf_fd[i]);
+ if (prog_fd[i] > 0)
+ close(prog_fd[i]);
+ if (link_fd[i] > 0)
+ close(link_fd[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
new file mode 100644
index 000000000000..8fade8bdc451
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/const.h>
+#include <netinet/in.h>
+#include <test_progs.h>
+#include <unistd.h>
+#include <errno.h>
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "mptcp_sock.skel.h"
+#include "mptcpify.skel.h"
+#include "mptcp_subflow.skel.h"
+#include "mptcp_sockmap.skel.h"
+
+#define NS_TEST "mptcp_ns"
+#define ADDR_1 "10.0.1.1"
+#define ADDR_2 "10.0.1.2"
+#define PORT_1 10001
+
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
+
+#ifndef SOL_MPTCP
+#define SOL_MPTCP 284
+#endif
+#ifndef MPTCP_INFO
+#define MPTCP_INFO 1
+#endif
+#ifndef MPTCP_INFO_FLAG_FALLBACK
+#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
+#endif
+#ifndef MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED
+#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+#endif
+
+#ifndef TCP_CA_NAME_MAX
+#define TCP_CA_NAME_MAX 16
+#endif
+
+struct __mptcp_info {
+ __u8 mptcpi_subflows;
+ __u8 mptcpi_add_addr_signal;
+ __u8 mptcpi_add_addr_accepted;
+ __u8 mptcpi_subflows_max;
+ __u8 mptcpi_add_addr_signal_max;
+ __u8 mptcpi_add_addr_accepted_max;
+ __u32 mptcpi_flags;
+ __u32 mptcpi_token;
+ __u64 mptcpi_write_seq;
+ __u64 mptcpi_snd_una;
+ __u64 mptcpi_rcv_nxt;
+ __u8 mptcpi_local_addr_used;
+ __u8 mptcpi_local_addr_max;
+ __u8 mptcpi_csum_enabled;
+ __u32 mptcpi_retransmits;
+ __u64 mptcpi_bytes_retrans;
+ __u64 mptcpi_bytes_sent;
+ __u64 mptcpi_bytes_received;
+ __u64 mptcpi_bytes_acked;
+};
+
+struct mptcp_storage {
+ __u32 invoked;
+ __u32 is_mptcp;
+ struct sock *sk;
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+};
+
+static int start_mptcp_server(int family, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ struct network_helper_opts opts = {
+ .timeout_ms = timeout_ms,
+ .proto = IPPROTO_MPTCP,
+ };
+
+ return start_server_str(family, SOCK_STREAM, addr_str, port, &opts);
+}
+
+static int verify_tsk(int map_fd, int client_fd)
+{
+ int err, cfd = client_fd;
+ struct mptcp_storage val;
+
+ err = bpf_map_lookup_elem(map_fd, &cfd, &val);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ return err;
+
+ if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
+ err++;
+
+ if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp"))
+ err++;
+
+ return err;
+}
+
+static void get_msk_ca_name(char ca_name[])
+{
+ size_t len;
+ int fd;
+
+ fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control"))
+ return;
+
+ len = read(fd, ca_name, TCP_CA_NAME_MAX);
+ if (!ASSERT_GT(len, 0, "failed to read ca_name"))
+ goto err;
+
+ if (len > 0 && ca_name[len - 1] == '\n')
+ ca_name[len - 1] = '\0';
+
+err:
+ close(fd);
+}
+
+static int verify_msk(int map_fd, int client_fd, __u32 token)
+{
+ char ca_name[TCP_CA_NAME_MAX];
+ int err, cfd = client_fd;
+ struct mptcp_storage val;
+
+ if (!ASSERT_GT(token, 0, "invalid token"))
+ return -1;
+
+ get_msk_ca_name(ca_name);
+
+ err = bpf_map_lookup_elem(map_fd, &cfd, &val);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ return err;
+
+ if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
+ err++;
+
+ if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp"))
+ err++;
+
+ if (!ASSERT_EQ(val.token, token, "unexpected token"))
+ err++;
+
+ if (!ASSERT_EQ(val.first, val.sk, "unexpected first"))
+ err++;
+
+ if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name"))
+ err++;
+
+ return err;
+}
+
+static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)
+{
+ int client_fd, prog_fd, map_fd, err;
+ struct mptcp_sock *sock_skel;
+
+ sock_skel = mptcp_sock__open_and_load();
+ if (!ASSERT_OK_PTR(sock_skel, "skel_open_load"))
+ return libbpf_get_error(sock_skel);
+
+ err = mptcp_sock__attach(sock_skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(sock_skel->progs._sockops);
+ map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map);
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
+ err = -EIO;
+ goto out;
+ }
+
+ err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) :
+ verify_tsk(map_fd, client_fd);
+
+ close(client_fd);
+
+out:
+ mptcp_sock__destroy(sock_skel);
+ return err;
+}
+
+static void test_base(void)
+{
+ struct netns_obj *netns = NULL;
+ int server_fd, cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/mptcp");
+ if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
+ return;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ goto fail;
+
+ /* without MPTCP */
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto with_mptcp;
+
+ ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp");
+
+ close(server_fd);
+
+with_mptcp:
+ /* with MPTCP */
+ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_mptcp_server"))
+ goto fail;
+
+ ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp");
+
+ close(server_fd);
+
+fail:
+ netns_free(netns);
+ close(cgroup_fd);
+}
+
+static void send_byte(int fd)
+{
+ char b = 0x55;
+
+ ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
+}
+
+static int verify_mptcpify(int server_fd, int client_fd)
+{
+ struct __mptcp_info info;
+ socklen_t optlen;
+ int protocol;
+ int err = 0;
+
+ optlen = sizeof(protocol);
+ if (!ASSERT_OK(getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen),
+ "getsockopt(SOL_PROTOCOL)"))
+ return -1;
+
+ if (!ASSERT_EQ(protocol, IPPROTO_MPTCP, "protocol isn't MPTCP"))
+ err++;
+
+ optlen = sizeof(info);
+ if (!ASSERT_OK(getsockopt(client_fd, SOL_MPTCP, MPTCP_INFO, &info, &optlen),
+ "getsockopt(MPTCP_INFO)"))
+ return -1;
+
+ if (!ASSERT_GE(info.mptcpi_flags, 0, "unexpected mptcpi_flags"))
+ err++;
+ if (!ASSERT_FALSE(info.mptcpi_flags & MPTCP_INFO_FLAG_FALLBACK,
+ "MPTCP fallback"))
+ err++;
+ if (!ASSERT_TRUE(info.mptcpi_flags & MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED,
+ "no remote key received"))
+ err++;
+
+ return err;
+}
+
+static int run_mptcpify(int cgroup_fd)
+{
+ int server_fd, client_fd, err = 0;
+ struct mptcpify *mptcpify_skel;
+
+ mptcpify_skel = mptcpify__open_and_load();
+ if (!ASSERT_OK_PTR(mptcpify_skel, "skel_open_load"))
+ return libbpf_get_error(mptcpify_skel);
+
+ mptcpify_skel->bss->pid = getpid();
+
+ err = mptcpify__attach(mptcpify_skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* without MPTCP */
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server")) {
+ err = -EIO;
+ goto out;
+ }
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
+ err = -EIO;
+ goto close_server;
+ }
+
+ send_byte(client_fd);
+
+ err = verify_mptcpify(server_fd, client_fd);
+
+ close(client_fd);
+close_server:
+ close(server_fd);
+out:
+ mptcpify__destroy(mptcpify_skel);
+ return err;
+}
+
+static void test_mptcpify(void)
+{
+ struct netns_obj *netns = NULL;
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/mptcpify");
+ if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
+ return;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ goto fail;
+
+ ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify");
+
+fail:
+ netns_free(netns);
+ close(cgroup_fd);
+}
+
+static int endpoint_init(char *flags)
+{
+ SYS(fail, "ip -net %s link add veth1 type veth peer name veth2", NS_TEST);
+ SYS(fail, "ip -net %s addr add %s/24 dev veth1", NS_TEST, ADDR_1);
+ SYS(fail, "ip -net %s link set dev veth1 up", NS_TEST);
+ SYS(fail, "ip -net %s addr add %s/24 dev veth2", NS_TEST, ADDR_2);
+ SYS(fail, "ip -net %s link set dev veth2 up", NS_TEST);
+ if (SYS_NOFAIL("ip -net %s mptcp endpoint add %s %s", NS_TEST, ADDR_2, flags)) {
+ printf("'ip mptcp' not supported, skip this test.\n");
+ test__skip();
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void wait_for_new_subflows(int fd)
+{
+ socklen_t len;
+ u8 subflows;
+ int err, i;
+
+ len = sizeof(subflows);
+ /* Wait max 5 sec for new subflows to be created */
+ for (i = 0; i < 50; i++) {
+ err = getsockopt(fd, SOL_MPTCP, MPTCP_INFO, &subflows, &len);
+ if (!err && subflows > 0)
+ break;
+
+ usleep(100000); /* 0.1s */
+ }
+}
+
+static void run_subflow(void)
+{
+ int server_fd, client_fd, err;
+ char new[TCP_CA_NAME_MAX];
+ char cc[TCP_CA_NAME_MAX];
+ unsigned int mark;
+ socklen_t len;
+
+ server_fd = start_mptcp_server(AF_INET, ADDR_1, PORT_1, 0);
+ if (!ASSERT_OK_FD(server_fd, "start_mptcp_server"))
+ return;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_fd"))
+ goto close_server;
+
+ send_byte(client_fd);
+ wait_for_new_subflows(client_fd);
+
+ len = sizeof(mark);
+ err = getsockopt(client_fd, SOL_SOCKET, SO_MARK, &mark, &len);
+ if (ASSERT_OK(err, "getsockopt(client_fd, SO_MARK)"))
+ ASSERT_EQ(mark, 0, "mark");
+
+ len = sizeof(new);
+ err = getsockopt(client_fd, SOL_TCP, TCP_CONGESTION, new, &len);
+ if (ASSERT_OK(err, "getsockopt(client_fd, TCP_CONGESTION)")) {
+ get_msk_ca_name(cc);
+ ASSERT_STREQ(new, cc, "cc");
+ }
+
+ close(client_fd);
+close_server:
+ close(server_fd);
+}
+
+static void test_subflow(void)
+{
+ struct mptcp_subflow *skel;
+ struct netns_obj *netns;
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/mptcp_subflow");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_subflow"))
+ return;
+
+ skel = mptcp_subflow__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_subflow"))
+ goto close_cgroup;
+
+ skel->bss->pid = getpid();
+
+ skel->links.mptcp_subflow =
+ bpf_program__attach_cgroup(skel->progs.mptcp_subflow, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.mptcp_subflow, "attach mptcp_subflow"))
+ goto skel_destroy;
+
+ skel->links._getsockopt_subflow =
+ bpf_program__attach_cgroup(skel->progs._getsockopt_subflow, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._getsockopt_subflow, "attach _getsockopt_subflow"))
+ goto skel_destroy;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_subflow"))
+ goto skel_destroy;
+
+ if (endpoint_init("subflow") < 0)
+ goto close_netns;
+
+ run_subflow();
+
+close_netns:
+ netns_free(netns);
+skel_destroy:
+ mptcp_subflow__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
+
+/* Test sockmap on MPTCP server handling non-mp-capable clients. */
+static void test_sockmap_with_mptcp_fallback(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, client_fd1 = -1, client_fd2 = -1;
+ int server_fd1 = -1, server_fd2 = -1, sent, recvd;
+ char snd[9] = "123456789";
+ char rcv[10];
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "sockmap-fb:start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client without MPTCP enabled */
+ client_fd1 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd1, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd1 = accept(listen_fd, NULL, 0);
+ skel->bss->sk_index = 1;
+ client_fd2 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd2, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd2 = accept(listen_fd, NULL, 0);
+ /* test normal redirect behavior: data sent by client_fd1 can be
+ * received by client_fd2
+ */
+ skel->bss->redirect_idx = 1;
+ sent = send(client_fd1, snd, sizeof(snd), 0);
+ if (!ASSERT_EQ(sent, sizeof(snd), "sockmap-fb:send(client_fd1)"))
+ goto end;
+
+ /* try to recv more bytes to avoid truncation check */
+ recvd = recv(client_fd2, rcv, sizeof(rcv), 0);
+ if (!ASSERT_EQ(recvd, sizeof(snd), "sockmap-fb:recv(client_fd2)"))
+ goto end;
+
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (client_fd2 >= 0)
+ close(client_fd2);
+ if (server_fd1 >= 0)
+ close(server_fd1);
+ if (server_fd2 >= 0)
+ close(server_fd2);
+ close(listen_fd);
+}
+
+/* Test sockmap rejection of MPTCP sockets - both server and client sides. */
+static void test_sockmap_reject_mptcp(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, server_fd = -1, client_fd1 = -1;
+ int err, zero = 0;
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client with MPTCP enabled */
+ client_fd1 = connect_to_fd(listen_fd, 0);
+ if (!ASSERT_OK_FD(client_fd1, "connect_to_fd client_fd1"))
+ goto end;
+
+ /* bpf_sock_map_update() called from sockops should reject MPTCP sk */
+ if (!ASSERT_EQ(skel->bss->helper_ret, -EOPNOTSUPP, "should reject"))
+ goto end;
+
+ server_fd = accept(listen_fd, NULL, 0);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &server_fd, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "server should be disallowed"))
+ goto end;
+
+ /* MPTCP client should also be disallowed */
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &client_fd1, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "client should be disallowed"))
+ goto end;
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (server_fd >= 0)
+ close(server_fd);
+ close(listen_fd);
+}
+
+static void test_mptcp_sockmap(void)
+{
+ struct mptcp_sockmap *skel;
+ struct netns_obj *netns;
+ int cgroup_fd, err;
+
+ cgroup_fd = test__join_cgroup("/mptcp_sockmap");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_sockmap"))
+ return;
+
+ skel = mptcp_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_sockmap"))
+ goto close_cgroup;
+
+ skel->links.mptcp_sockmap_inject =
+ bpf_program__attach_cgroup(skel->progs.mptcp_sockmap_inject, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.mptcp_sockmap_inject, "attach sockmap"))
+ goto skel_destroy;
+
+ err = bpf_prog_attach(bpf_program__fd(skel->progs.mptcp_sockmap_redirect),
+ bpf_map__fd(skel->maps.sock_map),
+ BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
+ goto skel_destroy;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_sockmap"))
+ goto skel_destroy;
+
+ if (endpoint_init("subflow") < 0)
+ goto close_netns;
+
+ test_sockmap_with_mptcp_fallback(skel);
+ test_sockmap_reject_mptcp(skel);
+
+close_netns:
+ netns_free(netns);
+skel_destroy:
+ mptcp_sockmap__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
+
+void test_mptcp(void)
+{
+ if (test__start_subtest("base"))
+ test_base();
+ if (test__start_subtest("mptcpify"))
+ test_mptcpify();
+ if (test__start_subtest("subflow"))
+ test_subflow();
+ if (test__start_subtest("sockmap"))
+ test_mptcp_sockmap();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/nested_trust.c b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
new file mode 100644
index 000000000000..54a112ad5f9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "nested_trust_failure.skel.h"
+#include "nested_trust_success.skel.h"
+#include "nested_acquire.skel.h"
+
+void test_nested_trust(void)
+{
+ RUN_TESTS(nested_trust_success);
+ RUN_TESTS(nested_trust_failure);
+
+ if (env.has_testmod)
+ RUN_TESTS(nested_acquire);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/net_timestamping.c b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c
new file mode 100644
index 000000000000..dbfd87499b6b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c
@@ -0,0 +1,239 @@
+#include <linux/net_tstamp.h>
+#include <sys/time.h>
+#include <linux/errqueue.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "net_timestamping.skel.h"
+
+#define CG_NAME "/net-timestamping-test"
+#define NSEC_PER_SEC 1000000000LL
+
+static const char addr4_str[] = "127.0.0.1";
+static const char addr6_str[] = "::1";
+static struct net_timestamping *skel;
+static const int cfg_payload_len = 30;
+static struct timespec usr_ts;
+static u64 delay_tolerance_nsec = 10000000000; /* 10 seconds */
+int SK_TS_SCHED;
+int SK_TS_TXSW;
+int SK_TS_ACK;
+
+static int64_t timespec_to_ns64(struct timespec *ts)
+{
+ return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
+}
+
+static void validate_key(int tskey, int tstype)
+{
+ static int expected_tskey = -1;
+
+ if (tstype == SCM_TSTAMP_SCHED)
+ expected_tskey = cfg_payload_len - 1;
+
+ ASSERT_EQ(expected_tskey, tskey, "tskey mismatch");
+
+ expected_tskey = tskey;
+}
+
+static void validate_timestamp(struct timespec *cur, struct timespec *prev)
+{
+ int64_t cur_ns, prev_ns;
+
+ cur_ns = timespec_to_ns64(cur);
+ prev_ns = timespec_to_ns64(prev);
+
+ ASSERT_LT(cur_ns - prev_ns, delay_tolerance_nsec, "latency");
+}
+
+static void test_socket_timestamp(struct scm_timestamping *tss, int tstype,
+ int tskey)
+{
+ static struct timespec prev_ts;
+
+ validate_key(tskey, tstype);
+
+ switch (tstype) {
+ case SCM_TSTAMP_SCHED:
+ validate_timestamp(&tss->ts[0], &usr_ts);
+ SK_TS_SCHED += 1;
+ break;
+ case SCM_TSTAMP_SND:
+ validate_timestamp(&tss->ts[0], &prev_ts);
+ SK_TS_TXSW += 1;
+ break;
+ case SCM_TSTAMP_ACK:
+ validate_timestamp(&tss->ts[0], &prev_ts);
+ SK_TS_ACK += 1;
+ break;
+ }
+
+ prev_ts = tss->ts[0];
+}
+
+static void test_recv_errmsg_cmsg(struct msghdr *msg)
+{
+ struct sock_extended_err *serr = NULL;
+ struct scm_timestamping *tss = NULL;
+ struct cmsghdr *cm;
+
+ for (cm = CMSG_FIRSTHDR(msg);
+ cm && cm->cmsg_len;
+ cm = CMSG_NXTHDR(msg, cm)) {
+ if (cm->cmsg_level == SOL_SOCKET &&
+ cm->cmsg_type == SCM_TIMESTAMPING) {
+ tss = (void *)CMSG_DATA(cm);
+ } else if ((cm->cmsg_level == SOL_IP &&
+ cm->cmsg_type == IP_RECVERR) ||
+ (cm->cmsg_level == SOL_IPV6 &&
+ cm->cmsg_type == IPV6_RECVERR) ||
+ (cm->cmsg_level == SOL_PACKET &&
+ cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
+ serr = (void *)CMSG_DATA(cm);
+ ASSERT_EQ(serr->ee_origin, SO_EE_ORIGIN_TIMESTAMPING,
+ "cmsg type");
+ }
+
+ if (serr && tss)
+ test_socket_timestamp(tss, serr->ee_info,
+ serr->ee_data);
+ }
+}
+
+static bool socket_recv_errmsg(int fd)
+{
+ static char ctrl[1024 /* overprovision*/];
+ char data[cfg_payload_len];
+ static struct msghdr msg;
+ struct iovec entry;
+ int n = 0;
+
+ memset(&msg, 0, sizeof(msg));
+ memset(&entry, 0, sizeof(entry));
+ memset(ctrl, 0, sizeof(ctrl));
+
+ entry.iov_base = data;
+ entry.iov_len = cfg_payload_len;
+ msg.msg_iov = &entry;
+ msg.msg_iovlen = 1;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_control = ctrl;
+ msg.msg_controllen = sizeof(ctrl);
+
+ n = recvmsg(fd, &msg, MSG_ERRQUEUE);
+ if (n == -1)
+ ASSERT_EQ(errno, EAGAIN, "recvmsg MSG_ERRQUEUE");
+
+ if (n >= 0)
+ test_recv_errmsg_cmsg(&msg);
+
+ return n == -1;
+}
+
+static void test_socket_timestamping(int fd)
+{
+ while (!socket_recv_errmsg(fd));
+
+ ASSERT_EQ(SK_TS_SCHED, 1, "SCM_TSTAMP_SCHED");
+ ASSERT_EQ(SK_TS_TXSW, 1, "SCM_TSTAMP_SND");
+ ASSERT_EQ(SK_TS_ACK, 1, "SCM_TSTAMP_ACK");
+
+ SK_TS_SCHED = 0;
+ SK_TS_TXSW = 0;
+ SK_TS_ACK = 0;
+}
+
+static void test_tcp(int family, bool enable_socket_timestamping)
+{
+ struct net_timestamping__bss *bss;
+ char buf[cfg_payload_len];
+ int sfd = -1, cfd = -1;
+ unsigned int sock_opt;
+ struct netns_obj *ns;
+ int cg_fd;
+ int ret;
+
+ cg_fd = test__join_cgroup(CG_NAME);
+ if (!ASSERT_OK_FD(cg_fd, "join cgroup"))
+ return;
+
+ ns = netns_new("net_timestamping_ns", true);
+ if (!ASSERT_OK_PTR(ns, "create ns"))
+ goto out;
+
+ skel = net_timestamping__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skel"))
+ goto out;
+
+ if (!ASSERT_OK(net_timestamping__attach(skel), "attach skel"))
+ goto out;
+
+ skel->links.skops_sockopt =
+ bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd);
+ if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup"))
+ goto out;
+
+ bss = skel->bss;
+ memset(bss, 0, sizeof(*bss));
+
+ skel->bss->monitored_pid = getpid();
+
+ sfd = start_server(family, SOCK_STREAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_OK_FD(sfd, "start_server"))
+ goto out;
+
+ cfd = connect_to_fd(sfd, 0);
+ if (!ASSERT_OK_FD(cfd, "connect_to_fd_server"))
+ goto out;
+
+ if (enable_socket_timestamping) {
+ sock_opt = SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_OPT_ID |
+ SOF_TIMESTAMPING_TX_SCHED |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_ACK;
+ ret = setsockopt(cfd, SOL_SOCKET, SO_TIMESTAMPING,
+ (char *) &sock_opt, sizeof(sock_opt));
+ if (!ASSERT_OK(ret, "setsockopt SO_TIMESTAMPING"))
+ goto out;
+
+ ret = clock_gettime(CLOCK_REALTIME, &usr_ts);
+ if (!ASSERT_OK(ret, "get user time"))
+ goto out;
+ }
+
+ ret = write(cfd, buf, sizeof(buf));
+ if (!ASSERT_EQ(ret, sizeof(buf), "send to server"))
+ goto out;
+
+ if (enable_socket_timestamping)
+ test_socket_timestamping(cfd);
+
+ ASSERT_EQ(bss->nr_active, 1, "nr_active");
+ ASSERT_EQ(bss->nr_snd, 2, "nr_snd");
+ ASSERT_EQ(bss->nr_sched, 1, "nr_sched");
+ ASSERT_EQ(bss->nr_txsw, 1, "nr_txsw");
+ ASSERT_EQ(bss->nr_ack, 1, "nr_ack");
+
+out:
+ if (sfd >= 0)
+ close(sfd);
+ if (cfd >= 0)
+ close(cfd);
+ net_timestamping__destroy(skel);
+ netns_free(ns);
+ close(cg_fd);
+}
+
+void test_net_timestamping(void)
+{
+ if (test__start_subtest("INET4: bpf timestamping"))
+ test_tcp(AF_INET, false);
+ if (test__start_subtest("INET4: bpf and socket timestamping"))
+ test_tcp(AF_INET, true);
+ if (test__start_subtest("INET6: bpf timestamping"))
+ test_tcp(AF_INET6, false);
+ if (test__start_subtest("INET6: bpf and socket timestamping"))
+ test_tcp(AF_INET6, true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c
new file mode 100644
index 000000000000..c3333edd029f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/sysinfo.h>
+#include <test_progs.h>
+#include "network_helpers.h"
+#include "netcnt_prog.skel.h"
+#include "netcnt_common.h"
+
+#define CG_NAME "/netcnt"
+
+void serial_test_netcnt(void)
+{
+ union percpu_net_cnt *percpu_netcnt = NULL;
+ struct bpf_cgroup_storage_key key;
+ int map_fd, percpu_map_fd;
+ struct netcnt_prog *skel;
+ unsigned long packets;
+ union net_cnt netcnt;
+ unsigned long bytes;
+ int cpu, nproc;
+ int cg_fd = -1;
+ char cmd[128];
+
+ skel = netcnt_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load"))
+ return;
+
+ nproc = bpf_num_possible_cpus();
+ percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
+ if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)"))
+ goto err;
+
+ cg_fd = test__join_cgroup(CG_NAME);
+ if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup"))
+ goto err;
+
+ skel->links.bpf_nextcnt = bpf_program__attach_cgroup(skel->progs.bpf_nextcnt, cg_fd);
+ if (!ASSERT_OK_PTR(skel->links.bpf_nextcnt,
+ "attach_cgroup(bpf_nextcnt)"))
+ goto err;
+
+ snprintf(cmd, sizeof(cmd), "%s ::1 -A -c 10000 -q > /dev/null", ping_command(AF_INET6));
+ ASSERT_OK(system(cmd), cmd);
+
+ map_fd = bpf_map__fd(skel->maps.netcnt);
+ if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, &key), "bpf_map_get_next_key"))
+ goto err;
+
+ if (!ASSERT_OK(bpf_map_lookup_elem(map_fd, &key, &netcnt), "bpf_map_lookup_elem(netcnt)"))
+ goto err;
+
+ percpu_map_fd = bpf_map__fd(skel->maps.percpu_netcnt);
+ if (!ASSERT_OK(bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0]),
+ "bpf_map_lookup_elem(percpu_netcnt)"))
+ goto err;
+
+ /* Some packets can be still in per-cpu cache, but not more than
+ * MAX_PERCPU_PACKETS.
+ */
+ packets = netcnt.packets;
+ bytes = netcnt.bytes;
+ for (cpu = 0; cpu < nproc; cpu++) {
+ ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS");
+
+ packets += percpu_netcnt[cpu].packets;
+ bytes += percpu_netcnt[cpu].bytes;
+ }
+
+ /* No packets should be lost */
+ ASSERT_GE(packets, 10000, "packets");
+
+ /* Let's check that bytes counter matches the number of packets
+ * multiplied by the size of ipv6 ICMP packet.
+ */
+ ASSERT_GE(bytes, packets * 104, "bytes");
+
+err:
+ if (cg_fd != -1)
+ close(cg_fd);
+ free(percpu_netcnt);
+ netcnt_prog__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c
new file mode 100644
index 000000000000..2f52fa2641ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <netinet/in.h>
+#include <linux/netfilter.h>
+
+#include "test_progs.h"
+#include "test_netfilter_link_attach.skel.h"
+
+struct nf_link_test {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+
+ bool expect_success;
+ const char * const name;
+};
+
+static const struct nf_link_test nf_hook_link_tests[] = {
+ { .name = "allzero", },
+ { .pf = NFPROTO_NUMPROTO, .name = "invalid-pf", },
+ { .pf = NFPROTO_IPV4, .hooknum = 42, .name = "invalid-hooknum", },
+ { .pf = NFPROTO_IPV4, .priority = INT_MIN, .name = "invalid-priority-min", },
+ { .pf = NFPROTO_IPV4, .priority = INT_MAX, .name = "invalid-priority-max", },
+ { .pf = NFPROTO_IPV4, .flags = UINT_MAX, .name = "invalid-flags", },
+
+ { .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", },
+
+ {
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = -10000,
+ .flags = 0,
+ .expect_success = true,
+ .name = "attach ipv4",
+ },
+ {
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_FORWARD,
+ .priority = 10001,
+ .flags = BPF_F_NETFILTER_IP_DEFRAG,
+ .expect_success = true,
+ .name = "attach ipv6",
+ },
+};
+
+static void verify_netfilter_link_info(struct bpf_link *link, const struct nf_link_test nf_expected)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ int err, fd;
+
+ memset(&info, 0, len);
+
+ fd = bpf_link__fd(link);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_OK(err, "get_link_info");
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_NETFILTER, "info link type");
+ ASSERT_EQ(info.netfilter.pf, nf_expected.pf, "info nf protocol family");
+ ASSERT_EQ(info.netfilter.hooknum, nf_expected.hooknum, "info nf hooknum");
+ ASSERT_EQ(info.netfilter.priority, nf_expected.priority, "info nf priority");
+ ASSERT_EQ(info.netfilter.flags, nf_expected.flags, "info nf flags");
+}
+
+void test_netfilter_link_attach(void)
+{
+ struct test_netfilter_link_attach *skel;
+ struct bpf_program *prog;
+ LIBBPF_OPTS(bpf_netfilter_opts, opts);
+ int i;
+
+ skel = test_netfilter_link_attach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_netfilter_link_attach__open_and_load"))
+ goto out;
+
+ prog = skel->progs.nf_link_attach_test;
+ if (!ASSERT_OK_PTR(prog, "attach program"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(nf_hook_link_tests); i++) {
+ struct bpf_link *link;
+
+ if (!test__start_subtest(nf_hook_link_tests[i].name))
+ continue;
+
+#define X(opts, m, i) opts.m = nf_hook_link_tests[(i)].m
+ X(opts, pf, i);
+ X(opts, hooknum, i);
+ X(opts, priority, i);
+ X(opts, flags, i);
+#undef X
+ link = bpf_program__attach_netfilter(prog, &opts);
+ if (nf_hook_link_tests[i].expect_success) {
+ struct bpf_link *link2;
+
+ if (!ASSERT_OK_PTR(link, "program attach successful"))
+ continue;
+
+ verify_netfilter_link_info(link, nf_hook_link_tests[i]);
+
+ link2 = bpf_program__attach_netfilter(prog, &opts);
+ ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority");
+
+ if (!ASSERT_OK(bpf_link__destroy(link), "link destroy"))
+ break;
+
+ link2 = bpf_program__attach_netfilter(prog, &opts);
+ if (!ASSERT_OK_PTR(link2, "program reattach successful"))
+ continue;
+
+ verify_netfilter_link_info(link2, nf_hook_link_tests[i]);
+
+ if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy"))
+ break;
+ } else {
+ ASSERT_ERR_PTR(link, "program load failure");
+ }
+ }
+
+out:
+ test_netfilter_link_attach__destroy(skel);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c
new file mode 100644
index 000000000000..e00cd34586dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "netns_cookie_prog.skel.h"
+#include "network_helpers.h"
+
+#ifndef SO_NETNS_COOKIE
+#define SO_NETNS_COOKIE 71
+#endif
+
+#define loopback 1
+
+static int duration;
+
+void test_netns_cookie(void)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ int server_fd = -1, client_fd = -1, cgroup_fd = -1;
+ int err, val, ret, map, verdict, tc_fd;
+ struct netns_cookie_prog *skel;
+ uint64_t cookie_expected_value;
+ socklen_t vallen = sizeof(cookie_expected_value);
+ static const char send_msg[] = "message";
+
+ skel = netns_cookie_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ cgroup_fd = test__join_cgroup("/netns_cookie");
+ if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n"))
+ goto done;
+
+ skel->links.get_netns_cookie_sockops = bpf_program__attach_cgroup(
+ skel->progs.get_netns_cookie_sockops, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach_sockops"))
+ goto done;
+
+ verdict = bpf_program__fd(skel->progs.get_netns_cookie_sk_msg);
+ map = bpf_map__fd(skel->maps.sock_map);
+ err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "prog_attach_sk_msg"))
+ goto done;
+
+ tc_fd = bpf_program__fd(skel->progs.get_netns_cookie_tcx);
+ err = bpf_prog_attach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &opta);
+ if (!ASSERT_OK(err, "prog_attach_tcx"))
+ goto done;
+
+ skel->links.get_netns_cookie_cgroup_skb = bpf_program__attach_cgroup(
+ skel->progs.get_netns_cookie_cgroup_skb, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_cgroup_skb, "prog_attach_cgroup_skb"))
+ goto cleanup_tc;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno))
+ goto cleanup_tc;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno))
+ goto cleanup_tc;
+
+ ret = send(client_fd, send_msg, sizeof(send_msg), 0);
+ if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", ret))
+ goto cleanup_tc;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sockops_netns_cookies),
+ &client_fd, &val);
+ if (!ASSERT_OK(err, "map_lookup(sockops_netns_cookies)"))
+ goto cleanup_tc;
+
+ err = getsockopt(client_fd, SOL_SOCKET, SO_NETNS_COOKIE,
+ &cookie_expected_value, &vallen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ goto cleanup_tc;
+
+ ASSERT_EQ(val, cookie_expected_value, "cookie_value_sockops");
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies),
+ &client_fd, &val);
+ if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)"))
+ goto cleanup_tc;
+
+ ASSERT_EQ(val, cookie_expected_value, "cookie_value_sk_msg");
+ ASSERT_EQ(skel->bss->tcx_init_netns_cookie, cookie_expected_value, "cookie_value_init_tcx");
+ ASSERT_EQ(skel->bss->tcx_netns_cookie, cookie_expected_value, "cookie_value_tcx");
+ ASSERT_EQ(skel->bss->cgroup_skb_init_netns_cookie, cookie_expected_value, "cookie_value_init_cgroup_skb");
+ ASSERT_EQ(skel->bss->cgroup_skb_netns_cookie, cookie_expected_value, "cookie_value_cgroup_skb");
+
+cleanup_tc:
+ err = bpf_prog_detach_opts(tc_fd, loopback, BPF_TCX_INGRESS, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+done:
+ if (server_fd != -1)
+ close(server_fd);
+ if (client_fd != -1)
+ close(client_fd);
+ if (cgroup_fd != -1)
+ close(cgroup_fd);
+ netns_cookie_prog__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
index e74dc501b27f..99c953f2be21 100644
--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -1,85 +1,227 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */
+
+#define _GNU_SOURCE
#include <test_progs.h>
+#include "test_ns_current_pid_tgid.skel.h"
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <sys/mount.h>
+#include <fcntl.h>
+#include "network_helpers.h"
-struct bss {
- __u64 dev;
- __u64 ino;
- __u64 pid_tgid;
- __u64 user_pid_tgid;
-};
+#define STACK_SIZE (1024 * 1024)
+static char child_stack[STACK_SIZE];
-void test_ns_current_pid_tgid(void)
+static int get_pid_tgid(pid_t *pid, pid_t *tgid,
+ struct test_ns_current_pid_tgid__bss *bss)
{
- const char *probe_name = "raw_tracepoint/sys_enter";
- const char *file = "test_ns_current_pid_tgid.o";
- int err, key = 0, duration = 0;
- struct bpf_link *link = NULL;
- struct bpf_program *prog;
- struct bpf_map *bss_map;
- struct bpf_object *obj;
- struct bss bss;
struct stat st;
- __u64 id;
+ int err;
- obj = bpf_object__open_file(file, NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
- return;
+ *pid = sys_gettid();
+ *tgid = getpid();
+
+ err = stat("/proc/self/ns/pid", &st);
+ if (!ASSERT_OK(err, "stat /proc/self/ns/pid"))
+ return err;
+
+ bss->dev = st.st_dev;
+ bss->ino = st.st_ino;
+ bss->user_pid = 0;
+ bss->user_tgid = 0;
+ return 0;
+}
+
+static int test_current_pid_tgid_tp(void *args)
+{
+ struct test_ns_current_pid_tgid__bss *bss;
+ struct test_ns_current_pid_tgid *skel;
+ int ret = -1, err;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
+
+ bpf_program__set_autoload(skel->progs.tp_handler, true);
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
goto cleanup;
- bss_map = bpf_object__find_map_by_name(obj, "test_ns_.bss");
- if (CHECK(!bss_map, "find_bss_map", "failed\n"))
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, bss))
goto cleanup;
- prog = bpf_object__find_program_by_title(obj, probe_name);
- if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
- probe_name))
+ err = test_ns_current_pid_tgid__attach(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__attach"))
goto cleanup;
- memset(&bss, 0, sizeof(bss));
- pid_t tid = syscall(SYS_gettid);
- pid_t pid = getpid();
+ /* trigger tracepoint */
+ usleep(1);
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
+ goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
+
+cleanup:
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
+}
+
+static int test_current_pid_tgid_cgrp(void *args)
+{
+ struct test_ns_current_pid_tgid__bss *bss;
+ struct test_ns_current_pid_tgid *skel;
+ int server_fd = -1, ret = -1, err;
+ int cgroup_fd = *(int *)args;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
- id = (__u64) tid << 32 | pid;
- bss.user_pid_tgid = id;
+ bpf_program__set_autoload(skel->progs.cgroup_bind4, true);
- if (CHECK_FAIL(stat("/proc/self/ns/pid", &st))) {
- perror("Failed to stat /proc/self/ns/pid");
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
goto cleanup;
- }
- bss.dev = st.st_dev;
- bss.ino = st.st_ino;
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, bss))
+ goto cleanup;
- err = bpf_map_update_elem(bpf_map__fd(bss_map), &key, &bss, 0);
- if (CHECK(err, "setting_bss", "failed to set bss : %d\n", err))
+ skel->links.cgroup_bind4 = bpf_program__attach_cgroup(
+ skel->progs.cgroup_bind4, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.cgroup_bind4, "bpf_program__attach_cgroup"))
goto cleanup;
- link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
- PTR_ERR(link))) {
- link = NULL;
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
goto cleanup;
- }
- /* trigger some syscalls */
- usleep(1);
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
+ goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
+
+cleanup:
+ if (server_fd >= 0)
+ close(server_fd);
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
+}
+
+static int test_current_pid_tgid_sk_msg(void *args)
+{
+ int verdict, map, server_fd = -1, client_fd = -1;
+ struct test_ns_current_pid_tgid__bss *bss;
+ static const char send_msg[] = "message";
+ struct test_ns_current_pid_tgid *skel;
+ int ret = -1, err, key = 0;
+ pid_t tgid, pid;
+
+ skel = test_ns_current_pid_tgid__open();
+ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
+ return ret;
+
+ bpf_program__set_autoload(skel->progs.sk_msg, true);
+
+ err = test_ns_current_pid_tgid__load(skel);
+ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
+ goto cleanup;
+
+ bss = skel->bss;
+ if (get_pid_tgid(&pid, &tgid, skel->bss))
+ goto cleanup;
+
+ verdict = bpf_program__fd(skel->progs.sk_msg);
+ map = bpf_map__fd(skel->maps.sock_map);
+ err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "prog_attach"))
+ goto cleanup;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto cleanup;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto cleanup;
+
+ err = bpf_map_update_elem(map, &key, &client_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto cleanup;
- err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &key, &bss);
- if (CHECK(err, "set_bss", "failed to get bss : %d\n", err))
+ err = send(client_fd, send_msg, sizeof(send_msg), 0);
+ if (!ASSERT_EQ(err, sizeof(send_msg), "send(msg)"))
goto cleanup;
- if (CHECK(id != bss.pid_tgid, "Compare user pid/tgid vs. bpf pid/tgid",
- "User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
+ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
goto cleanup;
+ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
+ goto cleanup;
+ ret = 0;
+
cleanup:
- bpf_link__destroy(link);
- bpf_object__close(obj);
+ if (server_fd >= 0)
+ close(server_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+ test_ns_current_pid_tgid__destroy(skel);
+ return ret;
+}
+
+static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg)
+{
+ int wstatus;
+ pid_t cpid;
+
+ /* Create a process in a new namespace, this process
+ * will be the init process of this new namespace hence will be pid 1.
+ */
+ cpid = clone(fn, child_stack + STACK_SIZE,
+ CLONE_NEWPID | SIGCHLD, arg);
+
+ if (!ASSERT_NEQ(cpid, -1, "clone"))
+ return;
+
+ if (!ASSERT_NEQ(waitpid(cpid, &wstatus, 0), -1, "waitpid"))
+ return;
+
+ if (!ASSERT_OK(WEXITSTATUS(wstatus), "newns_pidtgid"))
+ return;
}
+
+/* TODO: use a different tracepoint */
+void serial_test_current_pid_tgid(void)
+{
+ if (test__start_subtest("root_ns_tp"))
+ test_current_pid_tgid_tp(NULL);
+ if (test__start_subtest("new_ns_tp"))
+ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL);
+}
+
+void test_ns_current_pid_tgid_cgrp(void)
+{
+ int cgroup_fd = test__join_cgroup("/sock_addr");
+
+ if (ASSERT_OK_FD(cgroup_fd, "join_cgroup")) {
+ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_cgrp, &cgroup_fd);
+ close(cgroup_fd);
+ }
+}
+
+void test_ns_current_pid_tgid_sk_msg(void)
+{
+ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_sk_msg, NULL);
+}
+
+
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
index e178416bddad..7093edca6e08 100644
--- a/tools/testing/selftests/bpf/prog_tests/obj_name.c
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -20,7 +20,7 @@ void test_obj_name(void)
__u32 duration = 0;
int i;
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
size_t name_len = strlen(tests[i].name) + 1;
union bpf_attr attr;
size_t ncopy;
@@ -38,13 +38,13 @@ void test_obj_name(void)
fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
+ (!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-prog-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
- if (fd != -1)
+ if (fd >= 0)
close(fd);
/* test different attr.map_name during BPF_MAP_CREATE */
@@ -59,13 +59,13 @@ void test_obj_name(void)
memcpy(attr.map_name, tests[i].name, ncopy);
fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
+ (!tests[i].success && fd >= 0) ||
(!tests[i].success && errno != tests[i].expected_errno),
"check-bpf-map-name",
"fd %d(%d) errno %d(%d)\n",
fd, tests[i].success, errno, tests[i].expected_errno);
- if (fd != -1)
+ if (fd >= 0)
close(fd);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
new file mode 100644
index 000000000000..e9c07d561ded
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_parse_tcp_hdr_opt.skel.h"
+#include "test_parse_tcp_hdr_opt_dynptr.skel.h"
+#include "test_tcp_hdr_options.h"
+
+struct test_pkt {
+ struct ipv6_packet pk6_v6;
+ u8 options[16];
+} __packed;
+
+struct test_pkt pkt = {
+ .pk6_v6.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .pk6_v6.iph.nexthdr = IPPROTO_TCP,
+ .pk6_v6.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .pk6_v6.tcp.urg_ptr = 123,
+ .pk6_v6.tcp.doff = 9, /* 16 bytes of options */
+
+ .options = {
+ TCPOPT_MSS, 4, 0x05, 0xB4, TCPOPT_NOP, TCPOPT_NOP,
+ 0, 6, 0xBB, 0xBB, 0xBB, 0xBB, TCPOPT_EOL
+ },
+};
+
+static void test_parse_opt(void)
+{
+ struct test_parse_tcp_hdr_opt *skel;
+ struct bpf_program *prog;
+ char buf[128];
+ int err;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt,
+ .data_size_in = sizeof(pkt),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 3,
+ );
+
+ skel = test_parse_tcp_hdr_opt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
+ prog = skel->progs.xdp_ingress_v6;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
+ ASSERT_OK(err, "ipv6 test_run");
+ ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
+ ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
+
+ test_parse_tcp_hdr_opt__destroy(skel);
+}
+
+static void test_parse_opt_dynptr(void)
+{
+ struct test_parse_tcp_hdr_opt_dynptr *skel;
+ struct bpf_program *prog;
+ char buf[128];
+ int err;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt,
+ .data_size_in = sizeof(pkt),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 3,
+ );
+
+ skel = test_parse_tcp_hdr_opt_dynptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr;
+ prog = skel->progs.xdp_ingress_v6;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
+ ASSERT_OK(err, "ipv6 test_run");
+ ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval");
+ ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id");
+
+ test_parse_tcp_hdr_opt_dynptr__destroy(skel);
+}
+
+void test_parse_tcp_hdr_opt(void)
+{
+ if (test__start_subtest("parse_tcp_hdr_opt"))
+ test_parse_opt();
+ if (test__start_subtest("parse_tcp_hdr_opt_dynptr"))
+ test_parse_opt_dynptr();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
new file mode 100644
index 000000000000..343da65864d6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "percpu_alloc_array.skel.h"
+#include "percpu_alloc_cgrp_local_storage.skel.h"
+#include "percpu_alloc_fail.skel.h"
+
+static void test_array(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_1, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_2, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_3, true);
+ bpf_program__set_autoload(skel->progs.test_array_map_4, true);
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map 1-4");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_array_sleepable(void)
+{
+ struct percpu_alloc_array *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.test_array_map_10, true);
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__load"))
+ goto out;
+
+ err = percpu_alloc_array__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run array_map_10");
+ ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+out:
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_cgrp_local_storage(void)
+{
+ struct percpu_alloc_cgrp_local_storage *skel;
+ int err, cgroup_fd, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ cgroup_fd = test__join_cgroup("/percpu_alloc");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
+ return;
+
+ skel = percpu_alloc_cgrp_local_storage__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
+ goto close_fd;
+
+ skel->bss->my_pid = getpid();
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ err = percpu_alloc_cgrp_local_storage__load(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
+ goto destroy_skel;
+
+ err = percpu_alloc_cgrp_local_storage__attach(skel);
+ if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
+ goto destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
+ ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+ ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+
+destroy_skel:
+ percpu_alloc_cgrp_local_storage__destroy(skel);
+close_fd:
+ close(cgroup_fd);
+}
+
+static void test_failure(void) {
+ RUN_TESTS(percpu_alloc_fail);
+}
+
+void test_percpu_alloc(void)
+{
+ if (test__start_subtest("array"))
+ test_array();
+ if (test__start_subtest("array_sleepable"))
+ test_array_sleepable();
+ if (test__start_subtest("cgrp_local_storage"))
+ test_cgrp_local_storage();
+ if (test__start_subtest("failure_tests"))
+ test_failure();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
index e35c444902a7..0a7ef770c487 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -15,6 +15,10 @@ static void check_good_sample(struct test_perf_branches *skel)
int pbe_size = sizeof(struct perf_branch_entry);
int duration = 0;
+ if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+ "checked sample validity before prog run"))
+ return;
+
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
@@ -45,6 +49,10 @@ static void check_bad_sample(struct test_perf_branches *skel)
int written_stack = skel->bss->written_stack_out;
int duration = 0;
+ if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+ "checked sample validity before prog run"))
+ return;
+
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
@@ -74,7 +82,7 @@ static void test_perf_branches_common(int perf_fd,
/* attach perf_event */
link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
- if (CHECK(IS_ERR(link), "attach_perf_event", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto out_destroy_skel;
/* generate some branches on cpu 0 */
@@ -83,8 +91,12 @@ static void test_perf_branches_common(int perf_fd,
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
goto out_destroy;
- /* spin the loop for a while (random high number) */
- for (i = 0; i < 1000000; ++i)
+
+ /* Spin the loop for a while by using a high iteration count, and by
+ * checking whether the specific run count marker has been explicitly
+ * incremented at least once by the backing perf_event BPF program.
+ */
+ for (i = 0; i < 100000000 && !*(volatile int *)&skel->bss->run_cnt; ++i)
++j;
test_perf_branches__detach(skel);
@@ -110,17 +122,17 @@ static void test_perf_branches_hw(void)
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
/*
- * Some setups don't support branch records (virtual machines, !x86),
- * so skip test in this case.
+ * Some setups don't support LBR (virtual machines, !x86, AMD Milan Zen
+ * 3 which only supports BRS), so skip test in this case.
*/
- if (pfd == -1) {
- if (errno == ENOENT || errno == EOPNOTSUPP) {
+ if (pfd < 0) {
+ if (errno == ENOENT || errno == EOPNOTSUPP || errno == EINVAL) {
printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
__func__);
test__skip();
@@ -151,7 +163,7 @@ static void test_perf_branches_no_hw(void)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
- attr.sample_freq = 4000;
+ attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
index ca9f0895ec84..5fc2b3a0711e 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
@@ -43,10 +43,10 @@ int trigger_on_cpu(int cpu)
return 0;
}
-void test_perf_buffer(void)
+void serial_test_perf_buffer(void)
{
- int err, on_len, nr_on_cpus = 0, nr_cpus, i;
- struct perf_buffer_opts pb_opts = {};
+ int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
+ int zero = 0, my_pid = getpid();
struct test_perf_buffer *skel;
cpu_set_t cpu_seen;
struct perf_buffer *pb;
@@ -71,16 +71,19 @@ void test_perf_buffer(void)
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0);
+ if (!ASSERT_OK(err, "my_pid_update"))
+ goto out_close;
+
/* attach probe */
err = test_perf_buffer__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &cpu_seen;
- pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
+ on_sample, NULL, &cpu_seen, NULL);
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out_close;
CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
@@ -107,19 +110,19 @@ void test_perf_buffer(void)
"expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
goto out_free_pb;
- if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt",
- "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus))
+ if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt",
+ "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus))
goto out_close;
- for (i = 0; i < nr_cpus; i++) {
+ for (i = 0, j = 0; i < nr_cpus; i++) {
if (i >= on_len || !online[i])
continue;
- fd = perf_buffer__buffer_fd(pb, i);
+ fd = perf_buffer__buffer_fd(pb, j);
CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
last_fd = fd;
- err = perf_buffer__consume_buffer(pb, i);
+ err = perf_buffer__consume_buffer(pb, j);
if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
goto out_close;
@@ -127,12 +130,13 @@ void test_perf_buffer(void)
if (trigger_on_cpu(i))
goto out_close;
- err = perf_buffer__consume_buffer(pb, i);
- if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err))
+ err = perf_buffer__consume_buffer(pb, j);
+ if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err))
goto out_close;
if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
goto out_close;
+ j++;
}
out_free_pb:
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
index 72c3690844fb..f4aad35afae1 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
@@ -63,7 +63,8 @@ void test_perf_event_stackmap(void)
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
- .sample_period = 5000,
+ .freq = 1,
+ .sample_freq = read_perf_max_sample_freq(),
.size = sizeof(struct perf_event_attr),
};
struct perf_event_stackmap *skel;
@@ -97,8 +98,7 @@ void test_perf_event_stackmap(void)
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.oncpu))) {
+ if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
new file mode 100644
index 000000000000..d940ff87fa08
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sched.h>
+#include <test_progs.h>
+#include "testing_helpers.h"
+#include "test_perf_link.skel.h"
+
+#define BURN_TIMEOUT_MS 100
+#define BURN_TIMEOUT_NS BURN_TIMEOUT_MS * 1000000
+
+static void burn_cpu(void)
+{
+ volatile int j = 0;
+ cpu_set_t cpu_set;
+ int i, err;
+
+ /* generate some branches on cpu 0 */
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+ ASSERT_OK(err, "set_thread_affinity");
+
+ /* spin the loop for a while (random high number) */
+ for (i = 0; i < 1000000; ++i)
+ ++j;
+}
+
+/* TODO: often fails in concurrent mode */
+void serial_test_perf_link(void)
+{
+ struct test_perf_link *skel = NULL;
+ struct perf_event_attr attr;
+ int pfd = -1, link_fd = -1, err;
+ int run_cnt_before, run_cnt_after;
+ struct bpf_link_info info;
+ __u32 info_len = sizeof(info);
+ __u64 timeout_time_ns;
+
+ /* create perf event */
+ memset(&attr, 0, sizeof(attr));
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 1000;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (!ASSERT_GE(pfd, 0, "perf_fd"))
+ goto cleanup;
+
+ skel = test_perf_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
+ BPF_PERF_EVENT, NULL);
+ if (!ASSERT_GE(link_fd, 0, "link_fd"))
+ goto cleanup;
+
+ memset(&info, 0, sizeof(info));
+ err = bpf_link_get_info_by_fd(link_fd, &info, &info_len);
+ if (!ASSERT_OK(err, "link_get_info"))
+ goto cleanup;
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type");
+ ASSERT_GT(info.id, 0, "link_id");
+ ASSERT_GT(info.prog_id, 0, "link_prog_id");
+
+ /* ensure we get at least one perf_event prog execution */
+ timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS;
+ while (true) {
+ burn_cpu();
+ if (skel->bss->run_cnt > 0)
+ break;
+ if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout"))
+ break;
+ }
+
+ /* perf_event is still active, but we close link and BPF program
+ * shouldn't be executed anymore
+ */
+ close(link_fd);
+ link_fd = -1;
+
+ /* make sure there are no stragglers */
+ kern_sync_rcu();
+
+ run_cnt_before = skel->bss->run_cnt;
+ burn_cpu();
+ run_cnt_after = skel->bss->run_cnt;
+
+ ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_before_after");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ if (pfd >= 0)
+ close(pfd);
+ test_perf_link__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_skip.c b/tools/testing/selftests/bpf/prog_tests/perf_skip.c
new file mode 100644
index 000000000000..37d8618800e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/perf_skip.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <test_progs.h>
+#include "test_perf_skip.skel.h"
+#include <linux/compiler.h>
+#include <linux/hw_breakpoint.h>
+#include <sys/mman.h>
+
+#ifndef TRAP_PERF
+#define TRAP_PERF 6
+#endif
+
+int sigio_count, sigtrap_count;
+
+static void handle_sigio(int sig __always_unused)
+{
+ ++sigio_count;
+}
+
+static void handle_sigtrap(int signum __always_unused,
+ siginfo_t *info,
+ void *ucontext __always_unused)
+{
+ ASSERT_EQ(info->si_code, TRAP_PERF, "si_code");
+ ++sigtrap_count;
+}
+
+static noinline int test_function(void)
+{
+ asm volatile ("");
+ return 0;
+}
+
+void serial_test_perf_skip(void)
+{
+ struct sigaction action = {};
+ struct sigaction previous_sigtrap;
+ sighandler_t previous_sigio = SIG_ERR;
+ struct test_perf_skip *skel = NULL;
+ struct perf_event_attr attr = {};
+ int perf_fd = -1;
+ int err;
+ struct f_owner_ex owner;
+ struct bpf_link *prog_link = NULL;
+
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_sigaction = handle_sigtrap;
+ sigemptyset(&action.sa_mask);
+ if (!ASSERT_OK(sigaction(SIGTRAP, &action, &previous_sigtrap), "sigaction"))
+ return;
+
+ previous_sigio = signal(SIGIO, handle_sigio);
+ if (!ASSERT_NEQ(previous_sigio, SIG_ERR, "signal"))
+ goto cleanup;
+
+ skel = test_perf_skip__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.size = sizeof(attr);
+ attr.bp_type = HW_BREAKPOINT_X;
+ attr.bp_addr = (uintptr_t)test_function;
+ attr.bp_len = sizeof(long);
+ attr.sample_period = 1;
+ attr.sample_type = PERF_SAMPLE_IP;
+ attr.pinned = 1;
+ attr.exclude_kernel = 1;
+ attr.exclude_hv = 1;
+ attr.precise_ip = 3;
+ attr.sigtrap = 1;
+ attr.remove_on_exec = 1;
+
+ perf_fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
+ if (perf_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
+ printf("SKIP:no PERF_TYPE_BREAKPOINT/HW_BREAKPOINT_X\n");
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_OK(perf_fd < 0, "perf_event_open"))
+ goto cleanup;
+
+ /* Configure the perf event to signal on sample. */
+ err = fcntl(perf_fd, F_SETFL, O_ASYNC);
+ if (!ASSERT_OK(err, "fcntl(F_SETFL, O_ASYNC)"))
+ goto cleanup;
+
+ owner.type = F_OWNER_TID;
+ owner.pid = syscall(__NR_gettid);
+ err = fcntl(perf_fd, F_SETOWN_EX, &owner);
+ if (!ASSERT_OK(err, "fcntl(F_SETOWN_EX)"))
+ goto cleanup;
+
+ /* Allow at most one sample. A sample rejected by bpf should
+ * not count against this.
+ */
+ err = ioctl(perf_fd, PERF_EVENT_IOC_REFRESH, 1);
+ if (!ASSERT_OK(err, "ioctl(PERF_EVENT_IOC_REFRESH)"))
+ goto cleanup;
+
+ prog_link = bpf_program__attach_perf_event(skel->progs.handler, perf_fd);
+ if (!ASSERT_OK_PTR(prog_link, "bpf_program__attach_perf_event"))
+ goto cleanup;
+
+ /* Configure the bpf program to suppress the sample. */
+ skel->bss->ip = (uintptr_t)test_function;
+ test_function();
+
+ ASSERT_EQ(sigio_count, 0, "sigio_count");
+ ASSERT_EQ(sigtrap_count, 0, "sigtrap_count");
+
+ /* Configure the bpf program to allow the sample. */
+ skel->bss->ip = 0;
+ test_function();
+
+ ASSERT_EQ(sigio_count, 1, "sigio_count");
+ ASSERT_EQ(sigtrap_count, 1, "sigtrap_count");
+
+ /* Test that the sample above is the only one allowed (by perf, not
+ * by bpf)
+ */
+ test_function();
+
+ ASSERT_EQ(sigio_count, 1, "sigio_count");
+ ASSERT_EQ(sigtrap_count, 1, "sigtrap_count");
+
+cleanup:
+ bpf_link__destroy(prog_link);
+ if (perf_fd >= 0)
+ close(perf_fd);
+ test_perf_skip__destroy(skel);
+
+ if (previous_sigio != SIG_ERR)
+ signal(SIGIO, previous_sigio);
+ sigaction(SIGTRAP, &previous_sigtrap, NULL);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c
index fcf54b3a1dd0..c799a3c5ad1f 100644
--- a/tools/testing/selftests/bpf/prog_tests/pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
@@ -18,7 +18,7 @@ __u32 get_map_id(struct bpf_object *obj, const char *name)
if (CHECK(!map, "find map", "NULL map"))
return 0;
- err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
+ err = bpf_map_get_info_by_fd(bpf_map__fd(map),
&map_info, &map_info_len);
CHECK(err, "get map info", "err %d errno %d", err, errno);
return map_info.id;
@@ -26,13 +26,13 @@ __u32 get_map_id(struct bpf_object *obj, const char *name)
void test_pinning(void)
{
- const char *file_invalid = "./test_pinning_invalid.o";
+ const char *file_invalid = "./test_pinning_invalid.bpf.o";
const char *custpinpath = "/sys/fs/bpf/custom/pinmap";
const char *nopinpath = "/sys/fs/bpf/nopinmap";
const char *nopinpath2 = "/sys/fs/bpf/nopinmap2";
const char *custpath = "/sys/fs/bpf/custom";
const char *pinpath = "/sys/fs/bpf/pinmap";
- const char *file = "./test_pinning.o";
+ const char *file = "./test_pinning.bpf.o";
__u32 map_id, map_id2, duration = 0;
struct stat statbuf = {};
struct bpf_object *obj;
@@ -125,6 +125,10 @@ void test_pinning(void)
if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
goto out;
+ /* get pinning path */
+ if (!ASSERT_STREQ(bpf_map__pin_path(map), pinpath, "get pin path"))
+ goto out;
+
/* set pinning path of other map and re-pin all */
map = bpf_object__find_map_by_name(obj, "nopinmap");
if (CHECK(!map, "find map", "NULL map"))
@@ -134,6 +138,11 @@ void test_pinning(void)
if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
goto out;
+ /* get pinning path after set */
+ if (!ASSERT_STREQ(bpf_map__pin_path(map), custpinpath,
+ "get pin path after set"))
+ goto out;
+
/* should only pin the one unpinned map */
err = bpf_object__pin_maps(obj, NULL);
if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))
@@ -232,8 +241,8 @@ void test_pinning(void)
goto out;
}
- map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(__u32),
- sizeof(__u64), 1, 0);
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(__u32),
+ sizeof(__u64), 1, NULL);
if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c b/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c
new file mode 100644
index 000000000000..9ae49b587f3e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <test_progs.h>
+
+
+#include "test_pinning_devmap.skel.h"
+
+void test_pinning_devmap_reuse(void)
+{
+ const char *pinpath1 = "/sys/fs/bpf/pinmap1";
+ const char *pinpath2 = "/sys/fs/bpf/pinmap2";
+ struct test_pinning_devmap *skel1 = NULL, *skel2 = NULL;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+
+ /* load the object a first time */
+ skel1 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel1, "skel_load1"))
+ goto out;
+
+ /* load the object a second time, re-using the pinned map */
+ skel2 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel2, "skel_load2"))
+ goto out;
+
+ /* we can close the reference safely without
+ * the map's refcount falling to 0
+ */
+ test_pinning_devmap__destroy(skel1);
+ skel1 = NULL;
+
+ /* now, swap the pins */
+ err = renameat2(0, pinpath1, 0, pinpath2, RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "swap pins"))
+ goto out;
+
+ /* load the object again, this time the re-use should fail */
+ skel1 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_ERR_PTR(skel1, "skel_load3"))
+ goto out;
+
+out:
+ unlink(pinpath1);
+ unlink(pinpath2);
+ test_pinning_devmap__destroy(skel1);
+ test_pinning_devmap__destroy(skel2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning_htab.c b/tools/testing/selftests/bpf/prog_tests/pinning_htab.c
new file mode 100644
index 000000000000..16bd74be3dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pinning_htab.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_pinning_htab.skel.h"
+
+static void unpin_map(const char *map_name, const char *pin_path)
+{
+ struct test_pinning_htab *skel;
+ struct bpf_map *map;
+ int err;
+
+ skel = test_pinning_htab__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ if (!ASSERT_OK_PTR(map, "bpf_object__find_map_by_name"))
+ goto out;
+
+ err = bpf_map__pin(map, pin_path);
+ if (!ASSERT_OK(err, "bpf_map__pin"))
+ goto out;
+
+ err = bpf_map__unpin(map, pin_path);
+ ASSERT_OK(err, "bpf_map__unpin");
+out:
+ test_pinning_htab__destroy(skel);
+}
+
+void test_pinning_htab(void)
+{
+ if (test__start_subtest("timer_prealloc"))
+ unpin_map("timer_prealloc", "/sys/fs/bpf/timer_prealloc");
+ if (test__start_subtest("timer_no_prealloc"))
+ unpin_map("timer_no_prealloc", "/sys/fs/bpf/timer_no_prealloc");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
index 44b514fabccd..682e4ff45b01 100644
--- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
@@ -4,25 +4,29 @@
void test_pkt_access(void)
{
- const char *file = "./test_pkt_access.o";
+ const char *file = "./test_pkt_access.bpf.o";
struct bpf_object *obj;
- __u32 duration, retval;
int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 100000,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv4",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv4 test_run_opts err");
+ ASSERT_OK(topts.retval, "ipv4 test_run_opts retval");
+
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = 0; /* reset from last call */
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6 test_run_opts err");
+ ASSERT_OK(topts.retval, "ipv6 test_run_opts retval");
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
index 939015cd6dba..0d85e0642811 100644
--- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
@@ -4,20 +4,22 @@
void test_pkt_md_access(void)
{
- const char *file = "./test_pkt_md_access.o";
+ const char *file = "./test_pkt_md_access.bpf.o";
struct bpf_object *obj;
- __u32 duration, retval;
int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 10,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run_opts err");
+ ASSERT_OK(topts.retval, "test_run_opts retval");
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/preempt_lock.c b/tools/testing/selftests/bpf/prog_tests/preempt_lock.c
new file mode 100644
index 000000000000..02917c672441
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/preempt_lock.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <preempt_lock.skel.h>
+
+void test_preempt_lock(void)
+{
+ RUN_TESTS(preempt_lock);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..3a2ec3923fca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <test_progs.h>
+
+#include "preempted_bpf_ma_op.skel.h"
+
+#define ALLOC_THREAD_NR 4
+#define ALLOC_LOOP_NR 512
+
+struct alloc_ctx {
+ /* output */
+ int run_err;
+ /* input */
+ int fd;
+ bool *nomem_err;
+};
+
+static void *run_alloc_prog(void *data)
+{
+ struct alloc_ctx *ctx = data;
+ cpu_set_t cpu_set;
+ int i;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+
+ for (i = 0; i < ALLOC_LOOP_NR && !*ctx->nomem_err; i++) {
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ err = bpf_prog_test_run_opts(ctx->fd, &topts);
+ ctx->run_err |= err | topts.retval;
+ }
+
+ return NULL;
+}
+
+void test_preempted_bpf_ma_op(void)
+{
+ struct alloc_ctx ctx[ALLOC_THREAD_NR];
+ struct preempted_bpf_ma_op *skel;
+ pthread_t tid[ALLOC_THREAD_NR];
+ int i, err;
+
+ skel = preempted_bpf_ma_op__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ err = preempted_bpf_ma_op__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(ctx); i++) {
+ struct bpf_program *prog;
+ char name[8];
+
+ snprintf(name, sizeof(name), "test%d", i);
+ prog = bpf_object__find_program_by_name(skel->obj, name);
+ if (!ASSERT_OK_PTR(prog, "no test prog"))
+ goto out;
+
+ ctx[i].run_err = 0;
+ ctx[i].fd = bpf_program__fd(prog);
+ ctx[i].nomem_err = &skel->bss->nomem_err;
+ }
+
+ memset(tid, 0, sizeof(tid));
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ err = pthread_create(&tid[i], NULL, run_alloc_prog, &ctx[i]);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tid); i++) {
+ if (!tid[i])
+ break;
+ pthread_join(tid[i], NULL);
+ ASSERT_EQ(ctx[i].run_err, 0, "run prog err");
+ }
+
+ ASSERT_FALSE(skel->bss->nomem_err, "ENOMEM");
+out:
+ preempted_bpf_ma_op__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prepare.c b/tools/testing/selftests/bpf/prog_tests/prepare.c
new file mode 100644
index 000000000000..fb5cdad97116
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prepare.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "prepare.skel.h"
+
+static bool check_prepared(struct bpf_object *obj)
+{
+ bool is_prepared = true;
+ const struct bpf_map *map;
+
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__fd(map) < 0)
+ is_prepared = false;
+ }
+
+ return is_prepared;
+}
+
+static void test_prepare_no_load(void)
+{
+ struct prepare *skel;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ );
+
+ skel = prepare__open();
+ if (!ASSERT_OK_PTR(skel, "prepare__open"))
+ return;
+
+ if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
+ goto cleanup;
+
+ err = bpf_object__prepare(skel->obj);
+
+ if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
+ goto cleanup;
+
+ if (!ASSERT_OK(err, "bpf_object__prepare"))
+ goto cleanup;
+
+cleanup:
+ prepare__destroy(skel);
+}
+
+static void test_prepare_load(void)
+{
+ struct prepare *skel;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ );
+
+ skel = prepare__open();
+ if (!ASSERT_OK_PTR(skel, "prepare__open"))
+ return;
+
+ if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
+ goto cleanup;
+
+ err = bpf_object__prepare(skel->obj);
+ if (!ASSERT_OK(err, "bpf_object__prepare"))
+ goto cleanup;
+
+ err = prepare__load(skel);
+ if (!ASSERT_OK(err, "prepare__load"))
+ goto cleanup;
+
+ if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.program);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ goto cleanup;
+
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->err, 0, "err");
+
+cleanup:
+ prepare__destroy(skel);
+}
+
+void test_prepare(void)
+{
+ if (test__start_subtest("prepare_load"))
+ test_prepare_load();
+ if (test__start_subtest("prepare_no_load"))
+ test_prepare_no_load();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
new file mode 100644
index 000000000000..5d3c00a08a88
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "pro_epilogue.skel.h"
+#include "epilogue_tailcall.skel.h"
+#include "pro_epilogue_goto_start.skel.h"
+#include "epilogue_exit.skel.h"
+#include "pro_epilogue_with_kfunc.skel.h"
+
+struct st_ops_args {
+ __u64 a;
+};
+
+static void test_tailcall(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct epilogue_tailcall *skel;
+ struct st_ops_args args;
+ int err, prog_fd;
+
+ skel = epilogue_tailcall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "epilogue_tailcall__open_and_load"))
+ return;
+
+ topts.ctx_in = &args;
+ topts.ctx_size_in = sizeof(args);
+
+ skel->links.epilogue_tailcall =
+ bpf_map__attach_struct_ops(skel->maps.epilogue_tailcall);
+ if (!ASSERT_OK_PTR(skel->links.epilogue_tailcall, "attach_struct_ops"))
+ goto done;
+
+ /* Both test_epilogue_tailcall and test_epilogue_subprog are
+ * patched with epilogue. When syscall_epilogue_tailcall()
+ * is run, test_epilogue_tailcall() is triggered.
+ * It executes a tail call and control is transferred to
+ * test_epilogue_subprog(). Only test_epilogue_subprog()
+ * does args->a += 1, thus final args.a value of 10001
+ * guarantees that only the epilogue of the
+ * test_epilogue_subprog is executed.
+ */
+ memset(&args, 0, sizeof(args));
+ prog_fd = bpf_program__fd(skel->progs.syscall_epilogue_tailcall);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(args.a, 10001, "args.a");
+ ASSERT_EQ(topts.retval, 10001 * 2, "topts.retval");
+
+done:
+ epilogue_tailcall__destroy(skel);
+}
+
+void test_pro_epilogue(void)
+{
+ RUN_TESTS(pro_epilogue);
+ RUN_TESTS(pro_epilogue_goto_start);
+ RUN_TESTS(epilogue_exit);
+ RUN_TESTS(pro_epilogue_with_kfunc);
+ if (test__start_subtest("tailcall"))
+ test_tailcall();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index 7aecfd9e87d1..8721671321de 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -1,27 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-void test_probe_user(void)
+/* TODO: corrupts other tests uses connect() */
+void serial_test_probe_user(void)
{
- const char *prog_name = "kprobe/__sys_connect";
- const char *obj_file = "./test_probe_user.o";
+ static const char *const prog_names[] = {
+ "handle_sys_connect",
+#if defined(__s390x__)
+ "handle_sys_socketcall",
+#endif
+ };
+ enum { prog_count = ARRAY_SIZE(prog_names) };
+ const char *obj_file = "./test_probe_user.bpf.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
struct sockaddr curr, orig, tmp;
struct sockaddr_in *in = (struct sockaddr_in *)&curr;
- struct bpf_link *kprobe_link = NULL;
- struct bpf_program *kprobe_prog;
+ struct bpf_link *kprobe_links[prog_count] = {};
+ struct bpf_program *kprobe_progs[prog_count];
struct bpf_object *obj;
static const int zero = 0;
+ size_t i;
obj = bpf_object__open_file(obj_file, &opts);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
- kprobe_prog = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!kprobe_prog, "find_probe",
- "prog '%s' not found\n", prog_name))
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_progs[i] =
+ bpf_object__find_program_by_name(obj, prog_names[i]);
+ if (CHECK(!kprobe_progs[i], "find_probe",
+ "prog '%s' not found\n", prog_names[i]))
+ goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
@@ -32,11 +43,10 @@ void test_probe_user(void)
"err %d\n", results_map_fd))
goto cleanup;
- kprobe_link = bpf_program__attach(kprobe_prog);
- if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
- "err %ld\n", PTR_ERR(kprobe_link))) {
- kprobe_link = NULL;
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_links[i] = bpf_program__attach(kprobe_progs[i]);
+ if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe"))
+ goto cleanup;
}
memset(&curr, 0, sizeof(curr));
@@ -71,6 +81,7 @@ void test_probe_user(void)
inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
goto cleanup;
cleanup:
- bpf_link__destroy(kprobe_link);
+ for (i = 0; i < prog_count; i++)
+ bpf_link__destroy(kprobe_links[i]);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_array_init.c b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c
new file mode 100644
index 000000000000..fc4657619739
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include <test_progs.h>
+#include "test_prog_array_init.skel.h"
+
+void test_prog_array_init(void)
+{
+ struct test_prog_array_init *skel;
+ int err;
+
+ skel = test_prog_array_init__open();
+ if (!ASSERT_OK_PTR(skel, "could not open BPF object"))
+ return;
+
+ skel->rodata->my_pid = getpid();
+
+ err = test_prog_array_init__load(skel);
+ if (!ASSERT_OK(err, "could not load BPF object"))
+ goto cleanup;
+
+ skel->links.entry = bpf_program__attach_raw_tracepoint(skel->progs.entry, "sys_enter");
+ if (!ASSERT_OK_PTR(skel->links.entry, "could not attach BPF program"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->value, 42, "unexpected value");
+
+cleanup:
+ test_prog_array_init__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
new file mode 100644
index 000000000000..01f1d1b6715a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_pkt_access.skel.h"
+
+static const __u32 duration;
+
+static void check_run_cnt(int prog_fd, __u64 run_cnt)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
+ return;
+
+ CHECK(run_cnt != info.run_cnt, "run_cnt",
+ "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt);
+}
+
+void test_prog_run_opts(void)
+{
+ struct test_pkt_access *skel;
+ int err, stats_fd = -1, prog_fd;
+ char buf[10] = {};
+ __u64 run_cnt = 0;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = 5,
+ );
+
+ stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
+ if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd"))
+ return;
+
+ skel = test_pkt_access__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_EQ(errno, ENOSPC, "test_run errno");
+ ASSERT_ERR(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+
+ ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out");
+ ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint");
+
+ run_cnt += topts.repeat;
+ check_run_cnt(prog_fd, run_cnt);
+
+ topts.data_out = NULL;
+ topts.data_size_out = 0;
+ topts.repeat = 2;
+ errno = 0;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(errno, "run_no_output errno");
+ ASSERT_OK(err, "run_no_output err");
+ ASSERT_OK(topts.retval, "run_no_output retval");
+
+ run_cnt += topts.repeat;
+ check_run_cnt(prog_fd, run_cnt);
+
+cleanup:
+ if (skel)
+ test_pkt_access__destroy(skel);
+ if (stats_fd >= 0)
+ close(stats_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
deleted file mode 100644
index 935a294f049a..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <test_progs.h>
-#include <network_helpers.h>
-
-void test_prog_run_xattr(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- char buf[10];
- int err;
- struct bpf_prog_test_run_attr tattr = {
- .repeat = 1,
- .data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
- .data_out = buf,
- .data_size_out = 5,
- };
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
- return;
-
- memset(buf, 0, sizeof(buf));
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
- "incorrect output size, want %zu have %u\n",
- sizeof(pkt_v4), tattr.data_size_out);
-
- CHECK_ATTR(buf[5] != 0, "overflow",
- "BPF_PROG_TEST_RUN ignored size hint\n");
-
- tattr.data_out = NULL;
- tattr.data_size_out = 0;
- errno = 0;
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- tattr.data_size_out = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
-
- bpf_object__close(obj);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
new file mode 100644
index 000000000000..7607cfc2408c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void clear_test_state(struct test_state *state)
+{
+ state->error_cnt = 0;
+ state->sub_succ_cnt = 0;
+ state->skip_cnt = 0;
+}
+
+void test_prog_tests_framework(void)
+{
+ struct test_state *state = env.test_state;
+
+ /* in all the ASSERT calls below we need to return on the first
+ * error due to the fact that we are cleaning the test state after
+ * each dummy subtest
+ */
+
+ /* test we properly count skipped tests with subtests */
+ if (test__start_subtest("test_good_subtest"))
+ test__end_subtest();
+ if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_fail_subtest")) {
+ test__fail();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+}
+
+static void dummy_emit(const char *buf, bool force) {}
+
+void test_prog_tests_framework_expected_msgs(void)
+{
+ struct expected_msgs msgs;
+ int i, j, error_cnt;
+ const struct {
+ const char *name;
+ const char *log;
+ const char *expected;
+ struct expect_msg *pats;
+ } cases[] = {
+ {
+ .name = "simple-ok",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "simple-fail",
+ .log = "aaabbbddd",
+ .expected = "MATCHED SUBSTR: 'aaa'\n"
+ "EXPECTED SUBSTR: 'ccc'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-mid",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "foo", .negative = true },
+ { .substr = "bar", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-tail",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "foo", .negative = true },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-head",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "foo", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-head",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'aaa'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa", .negative = true },
+ { .substr = "bbb" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-tail",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'ccc'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "bbb" },
+ { .substr = "ccc", .negative = true },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-mid-1",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'bbb'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "bbb", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-mid-2",
+ .log = "aaabbb222ccc",
+ .expected = "UNEXPECTED SUBSTR: '222'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "222", .negative = true },
+ { .substr = "bbb", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ }
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cases); i++) {
+ if (test__start_subtest(cases[i].name)) {
+ error_cnt = env.subtest_state->error_cnt;
+ msgs.patterns = cases[i].pats;
+ msgs.cnt = 0;
+ for (j = 0; cases[i].pats[j].substr; j++)
+ msgs.cnt++;
+ validate_msgs(cases[i].log, &msgs, dummy_emit);
+ fflush(stderr);
+ env.subtest_state->error_cnt = error_cnt;
+ if (cases[i].expected)
+ ASSERT_HAS_SUBSTR(env.subtest_state->log_buf, cases[i].expected, "expected output");
+ else
+ ASSERT_STREQ(env.subtest_state->log_buf, "", "expected no output");
+ test__end_subtest();
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c
new file mode 100644
index 000000000000..8d077d150c56
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include <string.h>
+#include <linux/bpf.h>
+#include <test_progs.h>
+#include "test_ptr_untrusted.skel.h"
+
+#define TP_NAME "sched_switch"
+
+void serial_test_ptr_untrusted(void)
+{
+ struct test_ptr_untrusted *skel;
+ int err;
+
+ skel = test_ptr_untrusted__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ /* First, attach lsm prog */
+ skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(skel->links.lsm_run, "lsm_attach"))
+ goto cleanup;
+
+ /* Second, attach raw_tp prog. The lsm prog will be triggered. */
+ skel->links.raw_tp_run = bpf_program__attach_raw_tracepoint(skel->progs.raw_tp_run,
+ TP_NAME);
+ if (!ASSERT_OK_PTR(skel->links.raw_tp_run, "raw_tp_attach"))
+ goto cleanup;
+
+ err = strncmp(skel->bss->tp_name, TP_NAME, strlen(TP_NAME));
+ ASSERT_EQ(err, 0, "cmp_tp_name");
+
+cleanup:
+ test_ptr_untrusted__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
index f47e7b1cb32c..a043af9cd6d9 100644
--- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -10,24 +10,31 @@ enum {
static void test_queue_stack_map_by_type(int type)
{
const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE], duration, retval, size, val;
+ __u32 vals[MAP_SIZE], val;
int i, err, prog_fd, map_in_fd, map_out_fd;
char file[32], buf[128];
struct bpf_object *obj;
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+ struct iphdr iph = {};
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
/* Fill test values to be used */
for (i = 0; i < MAP_SIZE; i++)
vals[i] = rand();
if (type == QUEUE)
- strncpy(file, "./test_queue_map.o", sizeof(file));
+ strncpy(file, "./test_queue_map.bpf.o", sizeof(file));
else if (type == STACK)
- strncpy(file, "./test_stack_map.o", sizeof(file));
+ strncpy(file, "./test_stack_map.bpf.o", sizeof(file));
else
return;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -58,36 +65,37 @@ static void test_queue_stack_map_by_type(int type)
pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
}
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- if (err || retval || size != sizeof(pkt_v4) ||
- iph->daddr != val)
+ topts.data_size_out = sizeof(buf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (err || topts.retval ||
+ topts.data_size_out != sizeof(pkt_v4))
+ break;
+ memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph));
+ if (iph.daddr != val)
break;
}
- CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
- "bpf_map_pop_elem",
- "err %d errno %d retval %d size %d iph->daddr %u\n",
- err, errno, retval, size, iph->daddr);
+ ASSERT_OK(err, "bpf_map_pop_elem");
+ ASSERT_OK(topts.retval, "bpf_map_pop_elem test retval");
+ ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4),
+ "bpf_map_pop_elem data_size_out");
+ ASSERT_EQ(iph.daddr, val, "bpf_map_pop_elem iph.daddr");
/* Queue is empty, program should return TC_ACT_SHOT */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
- "check-queue-stack-map-empty",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ topts.data_size_out = sizeof(buf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "check-queue-stack-map-empty");
+ ASSERT_EQ(topts.retval, 2 /* TC_ACT_SHOT */,
+ "check-queue-stack-map-empty test retval");
+ ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4),
+ "check-queue-stack-map-empty data_size_out");
/* Check that the program pushed elements correctly */
for (i = 0; i < MAP_SIZE; i++) {
err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
- if (err || val != vals[i] * 5)
- break;
+ ASSERT_OK(err, "bpf_map_lookup_and_delete_elem");
+ ASSERT_EQ(val, vals[i] * 5, "bpf_map_push_elem val");
}
-
- CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
- "bpf_map_push_elem", "err %d value %u\n", err, val);
-
out:
pkt_v4.iph.saddr = 0;
bpf_object__close(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
new file mode 100644
index 000000000000..43676a9922dc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "raw_tp_null.skel.h"
+#include "raw_tp_null_fail.skel.h"
+
+void test_raw_tp_null(void)
+{
+ struct raw_tp_null *skel;
+
+ RUN_TESTS(raw_tp_null_fail);
+
+ skel = raw_tp_null__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load"))
+ return;
+
+ skel->bss->tid = sys_gettid();
+
+ if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach"))
+ goto end;
+
+ ASSERT_OK(trigger_module_test_read(2), "trigger testmod read");
+ ASSERT_EQ(skel->bss->i, 3, "invocations");
+
+end:
+ raw_tp_null__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
index c5fb191874ac..fe5b8fae2c36 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
@@ -5,59 +5,54 @@
#include "bpf/libbpf_internal.h"
#include "test_raw_tp_test_run.skel.h"
-static int duration;
-
void test_raw_tp_test_run(void)
{
- struct bpf_prog_test_run_attr test_attr = {};
int comm_fd = -1, err, nr_online, i, prog_fd;
__u64 args[2] = {0x1234ULL, 0x5678ULL};
int expected_retval = 0x1234 + 0x5678;
struct test_raw_tp_test_run *skel;
char buf[] = "new_name";
bool *online = NULL;
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
- .ctx_in = args,
- .ctx_size_in = sizeof(args),
- .flags = BPF_F_TEST_RUN_ON_CPU,
- );
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ );
err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
&nr_online);
- if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err))
+ if (!ASSERT_OK(err, "parse_cpu_mask_file"))
return;
skel = test_raw_tp_test_run__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_raw_tp_test_run__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
- if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno))
+ if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm"))
goto cleanup;
err = write(comm_fd, buf, sizeof(buf));
- CHECK(err < 0, "task rename", "err %d", errno);
+ ASSERT_GE(err, 0, "task rename");
- CHECK(skel->bss->count == 0, "check_count", "didn't increase\n");
- CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n");
+ ASSERT_NEQ(skel->bss->count, 0, "check_count");
+ ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu");
prog_fd = bpf_program__fd(skel->progs.rename);
- test_attr.prog_fd = prog_fd;
- test_attr.ctx_in = args;
- test_attr.ctx_size_in = sizeof(__u64);
+ opts.ctx_in = args;
+ opts.ctx_size_in = sizeof(__u64);
- err = bpf_prog_test_run_xattr(&test_attr);
- CHECK(err == 0, "test_run", "should fail for too small ctx\n");
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_NEQ(err, 0, "test_run should fail for too small ctx");
- test_attr.ctx_size_in = sizeof(args);
- err = bpf_prog_test_run_xattr(&test_attr);
- CHECK(err < 0, "test_run", "err %d\n", errno);
- CHECK(test_attr.retval != expected_retval, "check_retval",
- "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
+ opts.ctx_size_in = sizeof(args);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(opts.retval, expected_retval, "check_retval");
for (i = 0; i < nr_online; i++) {
if (!online[i])
@@ -66,28 +61,23 @@ void test_raw_tp_test_run(void)
opts.cpu = i;
opts.retval = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts);
- CHECK(err < 0, "test_run_opts", "err %d\n", errno);
- CHECK(skel->data->on_cpu != i, "check_on_cpu",
- "expect %d got %d\n", i, skel->data->on_cpu);
- CHECK(opts.retval != expected_retval,
- "check_retval", "expect 0x%x, got 0x%x\n",
- expected_retval, opts.retval);
+ ASSERT_OK(err, "test_run_opts");
+ ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu");
+ ASSERT_EQ(opts.retval, expected_retval, "check_retval");
}
/* invalid cpu ID should fail with ENXIO */
opts.cpu = 0xffffffff;
err = bpf_prog_test_run_opts(prog_fd, &opts);
- CHECK(err != -1 || errno != ENXIO,
- "test_run_opts_fail",
- "should failed with ENXIO\n");
+ ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO");
+ ASSERT_ERR(err, "test_run_opts_fail");
/* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */
opts.cpu = 1;
opts.flags = 0;
err = bpf_prog_test_run_opts(prog_fd, &opts);
- CHECK(err != -1 || errno != EINVAL,
- "test_run_opts_fail",
- "should failed with EINVAL\n");
+ ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL");
+ ASSERT_ERR(err, "test_run_opts_fail");
cleanup:
close(comm_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
index 9807336a3016..216b0dfac0fe 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
@@ -2,6 +2,7 @@
#include <test_progs.h>
#include <linux/nbd.h>
+#include "bpf_util.h"
void test_raw_tp_writable_reject_nbd_invalid(void)
{
@@ -18,15 +19,15 @@ void test_raw_tp_writable_reject_nbd_invalid(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
- .license = "GPL v2",
- .insns = program,
- .insns_cnt = sizeof(program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_level = 2,
- };
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
+ program, ARRAY_SIZE(program),
+ &opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
"failed: %d errno %d\n", bpf_fd, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
index 5c45424cac5f..e3668058b7bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
@@ -2,8 +2,10 @@
#include <test_progs.h>
#include <linux/nbd.h>
+#include "bpf_util.h"
-void test_raw_tp_writable_test_run(void)
+/* NOTE: conflict with other tests. */
+void serial_test_raw_tp_writable_test_run(void)
{
__u32 duration = 0;
char error[4096];
@@ -16,15 +18,15 @@ void test_raw_tp_writable_test_run(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
- .license = "GPL v2",
- .insns = trace_program,
- .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.log_level = 2,
- };
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
+ trace_program, ARRAY_SIZE(trace_program),
+ &trace_opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
"failed: %d errno %d\n", bpf_fd, errno))
return;
@@ -34,15 +36,14 @@ void test_raw_tp_writable_test_run(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr skb_load_attr = {
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .license = "GPL v2",
- .insns = skb_program,
- .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
- };
+ LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- int filter_fd =
- bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2",
+ skb_program, ARRAY_SIZE(skb_program),
+ &skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
goto out_bpffd;
@@ -56,21 +57,23 @@ void test_raw_tp_writable_test_run(void)
0,
};
- __u32 prog_ret;
- int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
- 0, &prog_ret, 0);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = test_skb,
+ .data_size_in = sizeof(test_skb),
+ .repeat = 1,
+ );
+ int err = bpf_prog_test_run_opts(filter_fd, &topts);
CHECK(err != 42, "test_run",
"tracepoint did not modify return value\n");
- CHECK(prog_ret != 0, "test_run_ret",
+ CHECK(topts.retval != 0, "test_run_ret",
"socket_filter did not return 0\n");
close(tp_fd);
- err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0,
- &prog_ret, 0);
+ err = bpf_prog_test_run_opts(filter_fd, &topts);
CHECK(err != 0, "test_run_notrace",
"test_run failed with %d errno %d\n", err, errno);
- CHECK(prog_ret != 0, "test_run_ret_notrace",
+ CHECK(topts.retval != 0, "test_run_ret_notrace",
"socket_filter did not return 0\n");
out_filterfd:
diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
new file mode 100644
index 000000000000..d8f3d7a45fe9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "rbtree.skel.h"
+#include "rbtree_fail.skel.h"
+#include "rbtree_btf_fail__wrong_node_type.skel.h"
+#include "rbtree_btf_fail__add_wrong_type.skel.h"
+#include "rbtree_search.skel.h"
+
+static void test_rbtree_add_nodes(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
+ ASSERT_OK(ret, "rbtree_add_nodes run");
+ ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
+ ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_add_nodes_nested(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
+ ASSERT_OK(ret, "rbtree_add_nodes_nested run");
+ ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
+ ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_add_and_remove(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
+ ASSERT_OK(ret, "rbtree_add_and_remove");
+ ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
+ ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_add_and_remove_array(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
+ ASSERT_OK(ret, "rbtree_add_and_remove_array");
+ ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_first_and_remove(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
+ ASSERT_OK(ret, "rbtree_first_and_remove");
+ ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
+ ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
+ ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
+ ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
+
+ rbtree__destroy(skel);
+}
+
+static void test_rbtree_api_release_aliasing(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct rbtree *skel;
+ int ret;
+
+ skel = rbtree__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
+ ASSERT_OK(ret, "rbtree_api_release_aliasing");
+ ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
+ ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
+ ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
+
+ rbtree__destroy(skel);
+}
+
+void test_rbtree_success(void)
+{
+ if (test__start_subtest("rbtree_add_nodes"))
+ test_rbtree_add_nodes();
+ if (test__start_subtest("rbtree_add_nodes_nested"))
+ test_rbtree_add_nodes_nested();
+ if (test__start_subtest("rbtree_add_and_remove"))
+ test_rbtree_add_and_remove();
+ if (test__start_subtest("rbtree_add_and_remove_array"))
+ test_rbtree_add_and_remove_array();
+ if (test__start_subtest("rbtree_first_and_remove"))
+ test_rbtree_first_and_remove();
+ if (test__start_subtest("rbtree_api_release_aliasing"))
+ test_rbtree_api_release_aliasing();
+}
+
+#define BTF_FAIL_TEST(suffix) \
+void test_rbtree_btf_fail__##suffix(void) \
+{ \
+ struct rbtree_btf_fail__##suffix *skel; \
+ \
+ skel = rbtree_btf_fail__##suffix##__open_and_load(); \
+ if (!ASSERT_ERR_PTR(skel, \
+ "rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
+ rbtree_btf_fail__##suffix##__destroy(skel); \
+}
+
+#define RUN_BTF_FAIL_TEST(suffix) \
+ if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
+ test_rbtree_btf_fail__##suffix();
+
+BTF_FAIL_TEST(wrong_node_type);
+BTF_FAIL_TEST(add_wrong_type);
+
+void test_rbtree_btf_fail(void)
+{
+ RUN_BTF_FAIL_TEST(wrong_node_type);
+ RUN_BTF_FAIL_TEST(add_wrong_type);
+}
+
+void test_rbtree_fail(void)
+{
+ RUN_TESTS(rbtree_fail);
+}
+
+void test_rbtree_search(void)
+{
+ RUN_TESTS(rbtree_search);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
new file mode 100644
index 000000000000..246eb259c08a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "rcu_read_lock.skel.h"
+#include "cgroup_helpers.h"
+
+static unsigned long long cgroup_id;
+
+static void test_success(void)
+{
+ struct rcu_read_lock *skel;
+ int err;
+
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->target_pid = sys_gettid();
+
+ bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
+ bpf_program__set_autoload(skel->progs.task_succ, true);
+ bpf_program__set_autoload(skel->progs.two_regions, true);
+ bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
+ bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
+ bpf_program__set_autoload(skel->progs.nested_rcu_region, true);
+ bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_lock, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_unlock, true);
+ err = rcu_read_lock__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ err = rcu_read_lock__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val");
+ ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
+out:
+ rcu_read_lock__destroy(skel);
+}
+
+static void test_rcuptr_acquire(void)
+{
+ struct rcu_read_lock *skel;
+ int err;
+
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->target_pid = sys_gettid();
+
+ bpf_program__set_autoload(skel->progs.task_acquire, true);
+ err = rcu_read_lock__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ err = rcu_read_lock__attach(skel);
+ ASSERT_OK(err, "skel_attach");
+out:
+ rcu_read_lock__destroy(skel);
+}
+
+static const char * const inproper_region_tests[] = {
+ "miss_lock",
+ "no_lock",
+ "miss_unlock",
+ "non_sleepable_rcu_mismatch",
+ "inproper_sleepable_helper",
+ "inproper_sleepable_kfunc",
+ "nested_rcu_region_unbalanced_1",
+ "nested_rcu_region_unbalanced_2",
+ "rcu_read_lock_global_subprog_lock",
+ "rcu_read_lock_global_subprog_unlock",
+ "rcu_read_lock_sleepable_helper_global_subprog",
+ "rcu_read_lock_sleepable_kfunc_global_subprog",
+ "rcu_read_lock_sleepable_global_subprog_indirect",
+};
+
+static void test_inproper_region(void)
+{
+ struct rcu_read_lock *skel;
+ struct bpf_program *prog;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) {
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = rcu_read_lock__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ rcu_read_lock__destroy(skel);
+ }
+}
+
+static const char * const rcuptr_misuse_tests[] = {
+ "task_untrusted_rcuptr",
+ "cross_rcu_region",
+};
+
+static void test_rcuptr_misuse(void)
+{
+ struct rcu_read_lock *skel;
+ struct bpf_program *prog;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) {
+ skel = rcu_read_lock__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = rcu_read_lock__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ rcu_read_lock__destroy(skel);
+ }
+}
+
+void test_rcu_read_lock(void)
+{
+ int cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/rcu_read_lock");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
+ goto out;
+
+ cgroup_id = get_cgroup_id("/rcu_read_lock");
+ if (test__start_subtest("success"))
+ test_success();
+ if (test__start_subtest("rcuptr_acquire"))
+ test_rcuptr_acquire();
+ if (test__start_subtest("negative_tests_inproper_region"))
+ test_inproper_region();
+ if (test__start_subtest("negative_tests_rcuptr_misuse"))
+ test_rcuptr_misuse();
+ close(cgroup_fd);
+out:;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
index 563e12120e77..19e2f2526dbd 100644
--- a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
+++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
@@ -16,7 +16,7 @@ struct rdonly_map_subtest {
void test_rdonly_maps(void)
{
- const char *file = "test_rdonly_maps.o";
+ const char *file = "test_rdonly_maps.bpf.o";
struct rdonly_map_subtest subtests[] = {
{ "skip loop", "skip_loop", 0, 0 },
{ "part loop", "part_loop", 3, 2 + 3 + 4 },
@@ -30,14 +30,14 @@ void test_rdonly_maps(void)
struct bss bss;
obj = bpf_object__open_file(file, NULL);
- if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ if (!ASSERT_OK_PTR(obj, "obj_open"))
return;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
goto cleanup;
- bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss");
+ bss_map = bpf_object__find_map_by_name(obj, ".bss");
if (CHECK(!bss_map, "find_bss_map", "failed\n"))
goto cleanup;
@@ -58,11 +58,8 @@ void test_rdonly_maps(void)
goto cleanup;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
- if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
- t->prog_name, PTR_ERR(link))) {
- link = NULL;
+ if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
- }
/* trigger probe */
usleep(1);
diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
new file mode 100644
index 000000000000..a8d1eaa67020
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "test_progs.h"
+#include "read_vsyscall.skel.h"
+
+#if defined(__x86_64__)
+/* For VSYSCALL_ADDR */
+#include <asm/vsyscall.h>
+#else
+/* To prevent build failure on non-x86 arch */
+#define VSYSCALL_ADDR 0UL
+#endif
+
+struct read_ret_desc {
+ const char *name;
+ int ret;
+} all_read[] = {
+ { .name = "probe_read_kernel", .ret = -ERANGE },
+ { .name = "probe_read_kernel_str", .ret = -ERANGE },
+ { .name = "probe_read", .ret = -ERANGE },
+ { .name = "probe_read_str", .ret = -ERANGE },
+ { .name = "probe_read_user", .ret = -EFAULT },
+ { .name = "probe_read_user_str", .ret = -EFAULT },
+ { .name = "copy_from_user", .ret = -EFAULT },
+ { .name = "copy_from_user_task", .ret = -EFAULT },
+ { .name = "copy_from_user_str", .ret = -EFAULT },
+ { .name = "copy_from_user_task_str", .ret = -EFAULT },
+};
+
+void test_read_vsyscall(void)
+{
+ struct read_vsyscall *skel;
+ unsigned int i;
+ int err;
+
+#if !defined(__x86_64__)
+ test__skip();
+ return;
+#endif
+ skel = read_vsyscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "read_vsyscall open_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+ err = read_vsyscall__attach(skel);
+ if (!ASSERT_EQ(err, 0, "read_vsyscall attach"))
+ goto out;
+
+ /* userspace may don't have vsyscall page due to LEGACY_VSYSCALL_NONE,
+ * but it doesn't affect the returned error codes.
+ */
+ skel->bss->user_ptr = (void *)VSYSCALL_ADDR;
+ usleep(1);
+
+ for (i = 0; i < ARRAY_SIZE(all_read); i++)
+ ASSERT_EQ(skel->bss->read_ret[i], all_read[i].ret, all_read[i].name);
+out:
+ read_vsyscall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c
new file mode 100644
index 000000000000..23552d3e3365
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/recursion.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "recursion.skel.h"
+
+void test_recursion(void)
+{
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_info_len = sizeof(prog_info);
+ struct recursion *skel;
+ int key = 0;
+ int err;
+
+ skel = recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = recursion__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0");
+ bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key);
+ ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1");
+ bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key);
+ ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2");
+
+ ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0");
+ bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
+ ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1");
+ bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
+ ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
+
+ err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
+ &prog_info, &prog_info_len);
+ if (!ASSERT_OK(err, "get_prog_info"))
+ goto out;
+ ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses");
+out:
+ recursion__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
new file mode 100644
index 000000000000..0ffa01d54ce2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Red Hat, Inc. */
+#include <test_progs.h>
+#include "fentry_recursive.skel.h"
+#include "fentry_recursive_target.skel.h"
+#include <bpf/btf.h>
+#include "bpf/libbpf_internal.h"
+
+/* Test recursive attachment of tracing progs with more than one nesting level
+ * is not possible. Create a chain of attachment, verify that the last prog
+ * will fail. Depending on the arguments, following cases are tested:
+ *
+ * - Recursive loading of tracing progs, without attaching (attach = false,
+ * detach = false). The chain looks like this:
+ * load target
+ * load fentry1 -> target
+ * load fentry2 -> fentry1 (fail)
+ *
+ * - Recursive attach of tracing progs (attach = true, detach = false). The
+ * chain looks like this:
+ * load target
+ * load fentry1 -> target
+ * attach fentry1 -> target
+ * load fentry2 -> fentry1 (fail)
+ *
+ * - Recursive attach and detach of tracing progs (attach = true, detach =
+ * true). This validates that attach_tracing_prog flag will be set throughout
+ * the whole lifecycle of an fentry prog, independently from whether it's
+ * detached. The chain looks like this:
+ * load target
+ * load fentry1 -> target
+ * attach fentry1 -> target
+ * detach fentry1
+ * load fentry2 -> fentry1 (fail)
+ */
+static void test_recursive_fentry_chain(bool attach, bool detach)
+{
+ struct fentry_recursive_target *target_skel = NULL;
+ struct fentry_recursive *tracing_chain[2] = {};
+ struct bpf_program *prog;
+ int prev_fd, err;
+
+ target_skel = fentry_recursive_target__open_and_load();
+ if (!ASSERT_OK_PTR(target_skel, "fentry_recursive_target__open_and_load"))
+ return;
+
+ /* Create an attachment chain with two fentry progs */
+ for (int i = 0; i < 2; i++) {
+ tracing_chain[i] = fentry_recursive__open();
+ if (!ASSERT_OK_PTR(tracing_chain[i], "fentry_recursive__open"))
+ goto close_prog;
+
+ /* The first prog in the chain is going to be attached to the target
+ * fentry program, the second one to the previous in the chain.
+ */
+ prog = tracing_chain[i]->progs.recursive_attach;
+ if (i == 0) {
+ prev_fd = bpf_program__fd(target_skel->progs.test1);
+ err = bpf_program__set_attach_target(prog, prev_fd, "test1");
+ } else {
+ prev_fd = bpf_program__fd(tracing_chain[i-1]->progs.recursive_attach);
+ err = bpf_program__set_attach_target(prog, prev_fd, "recursive_attach");
+ }
+
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto close_prog;
+
+ err = fentry_recursive__load(tracing_chain[i]);
+ /* The first attach should succeed, the second fail */
+ if (i == 0) {
+ if (!ASSERT_OK(err, "fentry_recursive__load"))
+ goto close_prog;
+
+ if (attach) {
+ err = fentry_recursive__attach(tracing_chain[i]);
+ if (!ASSERT_OK(err, "fentry_recursive__attach"))
+ goto close_prog;
+ }
+
+ if (detach) {
+ /* Flag attach_tracing_prog should still be set, preventing
+ * attachment of the following prog.
+ */
+ fentry_recursive__detach(tracing_chain[i]);
+ }
+ } else {
+ if (!ASSERT_ERR(err, "fentry_recursive__load"))
+ goto close_prog;
+ }
+ }
+
+close_prog:
+ fentry_recursive_target__destroy(target_skel);
+ for (int i = 0; i < 2; i++) {
+ fentry_recursive__destroy(tracing_chain[i]);
+ }
+}
+
+void test_recursive_fentry(void)
+{
+ if (test__start_subtest("attach"))
+ test_recursive_fentry_chain(true, false);
+ if (test__start_subtest("load"))
+ test_recursive_fentry_chain(false, false);
+ if (test__start_subtest("detach"))
+ test_recursive_fentry_chain(true, true);
+}
+
+/* Test that a tracing prog reattachment (when we land in
+ * "prog->aux->dst_trampoline and tgt_prog is NULL" branch in
+ * bpf_tracing_prog_attach) does not lead to a crash due to missing attach_btf
+ */
+void test_fentry_attach_btf_presence(void)
+{
+ struct fentry_recursive_target *target_skel = NULL;
+ struct fentry_recursive *tracing_skel = NULL;
+ struct bpf_program *prog;
+ int err, link_fd, tgt_prog_fd;
+
+ target_skel = fentry_recursive_target__open_and_load();
+ if (!ASSERT_OK_PTR(target_skel, "fentry_recursive_target__open_and_load"))
+ goto close_prog;
+
+ tracing_skel = fentry_recursive__open();
+ if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
+ goto close_prog;
+
+ prog = tracing_skel->progs.recursive_attach;
+ tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
+ err = bpf_program__set_attach_target(prog, tgt_prog_fd, "fentry_target");
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto close_prog;
+
+ err = fentry_recursive__load(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__load"))
+ goto close_prog;
+
+ tgt_prog_fd = bpf_program__fd(tracing_skel->progs.recursive_attach);
+ link_fd = bpf_link_create(tgt_prog_fd, 0, BPF_TRACE_FENTRY, NULL);
+ if (!ASSERT_GE(link_fd, 0, "link_fd"))
+ goto close_prog;
+
+ fentry_recursive__detach(tracing_skel);
+
+ err = fentry_recursive__attach(tracing_skel);
+ ASSERT_ERR(err, "fentry_recursive__attach");
+
+close_prog:
+ fentry_recursive_target__destroy(target_skel);
+ fentry_recursive__destroy(tracing_skel);
+}
+
+static void *fentry_target_test_run(void *arg)
+{
+ for (;;) {
+ int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ if (prog_fd == -1)
+ break;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "fentry_target test_run"))
+ break;
+ }
+
+ return NULL;
+}
+
+void test_fentry_attach_stress(void)
+{
+ struct fentry_recursive_target *target_skel = NULL;
+ struct fentry_recursive *tracing_skel = NULL;
+ struct bpf_program *prog;
+ int err, i, tgt_prog_fd;
+ pthread_t thread;
+
+ target_skel = fentry_recursive_target__open_and_load();
+ if (!ASSERT_OK_PTR(target_skel,
+ "fentry_recursive_target__open_and_load"))
+ goto close_prog;
+ tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
+ err = pthread_create(&thread, NULL,
+ fentry_target_test_run, &tgt_prog_fd);
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto close_prog;
+
+ for (i = 0; i < 1000; i++) {
+ tracing_skel = fentry_recursive__open();
+ if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
+ goto stop_thread;
+
+ prog = tracing_skel->progs.recursive_attach;
+ err = bpf_program__set_attach_target(prog, tgt_prog_fd,
+ "fentry_target");
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto stop_thread;
+
+ err = fentry_recursive__load(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__load"))
+ goto stop_thread;
+
+ err = fentry_recursive__attach(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__attach"))
+ goto stop_thread;
+
+ fentry_recursive__destroy(tracing_skel);
+ tracing_skel = NULL;
+ }
+
+stop_thread:
+ __atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST);
+ err = pthread_join(thread, NULL);
+ ASSERT_OK(err, "pthread_join");
+close_prog:
+ fentry_recursive__destroy(tracing_skel);
+ fentry_recursive_target__destroy(target_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
new file mode 100644
index 000000000000..d2c0542716a8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "refcounted_kptr.skel.h"
+#include "refcounted_kptr_fail.skel.h"
+
+void test_refcounted_kptr(void)
+{
+ RUN_TESTS(refcounted_kptr);
+}
+
+void test_refcounted_kptr_fail(void)
+{
+ RUN_TESTS(refcounted_kptr_fail);
+}
+
+void test_refcounted_kptr_wrong_owner(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct refcounted_kptr *skel;
+ int ret;
+
+ skel = refcounted_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
+ refcounted_kptr__destroy(skel);
+}
+
+void test_percpu_hash_refcounted_kptr_refcount_leak(void)
+{
+ struct refcounted_kptr *skel;
+ int cpu_nr, fd, err, key = 0;
+ struct bpf_map *map;
+ size_t values_sz;
+ u64 *values;
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ cpu_nr = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
+ return;
+
+ values = calloc(cpu_nr, sizeof(u64));
+ if (!ASSERT_OK_PTR(values, "calloc values"))
+ return;
+
+ skel = refcounted_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
+ free(values);
+ return;
+ }
+
+ values_sz = cpu_nr * sizeof(u64);
+ memset(values, 0, values_sz);
+
+ map = skel->maps.percpu_hash;
+ err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
+ if (!ASSERT_OK(err, "bpf_map__update_elem"))
+ goto out;
+
+ fd = bpf_program__fd(skel->progs.percpu_hash_refcount_leak);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ if (!ASSERT_EQ(opts.retval, 2, "opts.retval"))
+ goto out;
+
+ err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
+ if (!ASSERT_OK(err, "bpf_map__update_elem"))
+ goto out;
+
+ fd = bpf_program__fd(skel->progs.check_percpu_hash_refcount);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(opts.retval, 1, "opts.retval");
+
+out:
+ refcounted_kptr__destroy(skel);
+ free(values);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index ac1ee10cffd8..d863205bbe95 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -3,50 +3,61 @@
void test_reference_tracking(void)
{
- const char *file = "test_sk_lookup_kern.o";
+ const char *file = "test_sk_lookup_kern.bpf.o";
const char *obj_name = "ref_track";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name,
.relaxed_maps = true,
);
- struct bpf_object *obj;
+ struct bpf_object *obj_iter, *obj = NULL;
struct bpf_program *prog;
__u32 duration = 0;
int err = 0;
- obj = bpf_object__open_file(file, &open_opts);
- if (CHECK_FAIL(IS_ERR(obj)))
+ obj_iter = bpf_object__open_file(file, &open_opts);
+ if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file"))
return;
- if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
+ if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
- bpf_object__name(obj), obj_name))
+ bpf_object__name(obj_iter), obj_name))
goto cleanup;
- bpf_object__for_each_program(prog, obj) {
- const char *title;
+ bpf_object__for_each_program(prog, obj_iter) {
+ struct bpf_program *p;
+ const char *name;
- /* Ignore .text sections */
- title = bpf_program__section_name(prog);
- if (strstr(title, ".text") != NULL)
+ name = bpf_program__name(prog);
+ if (!test__start_subtest(name))
continue;
- if (!test__start_subtest(title))
- continue;
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+ goto cleanup;
+
+ /* all programs are not loaded by default, so just set
+ * autoload to true for the single prog under test
+ */
+ p = bpf_object__find_program_by_name(obj, name);
+ bpf_program__set_autoload(p, true);
- /* Expect verifier failure if test name has 'fail' */
- if (strstr(title, "fail") != NULL) {
+ /* Expect verifier failure if test name has 'err' */
+ if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
old_print_fn = libbpf_set_print(NULL);
- err = !bpf_program__load(prog, "GPL", 0);
+ err = !bpf_object__load(obj);
libbpf_set_print(old_print_fn);
} else {
- err = bpf_program__load(prog, "GPL", 0);
+ err = bpf_object__load(obj);
}
- CHECK(err, title, "\n");
+ ASSERT_OK(err, name);
+
+ bpf_object__close(obj);
+ obj = NULL;
}
cleanup:
bpf_object__close(obj);
+ bpf_object__close(obj_iter);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
new file mode 100644
index 000000000000..d93a0c7b1786
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -0,0 +1,2161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <limits.h>
+#include <test_progs.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+/* =================================
+ * SHORT AND CONSISTENT NUMBER TYPES
+ * =================================
+ */
+#define U64_MAX ((u64)UINT64_MAX)
+#define U32_MAX ((u32)UINT_MAX)
+#define U16_MAX ((u32)UINT_MAX)
+#define S64_MIN ((s64)INT64_MIN)
+#define S64_MAX ((s64)INT64_MAX)
+#define S32_MIN ((s32)INT_MIN)
+#define S32_MAX ((s32)INT_MAX)
+#define S16_MIN ((s16)0x80000000)
+#define S16_MAX ((s16)0x7fffffff)
+
+typedef unsigned long long ___u64;
+typedef unsigned int ___u32;
+typedef long long ___s64;
+typedef int ___s32;
+
+/* avoid conflicts with already defined types in kernel headers */
+#define u64 ___u64
+#define u32 ___u32
+#define s64 ___s64
+#define s32 ___s32
+
+/* ==================================
+ * STRING BUF ABSTRACTION AND HELPERS
+ * ==================================
+ */
+struct strbuf {
+ size_t buf_sz;
+ int pos;
+ char buf[0];
+};
+
+#define DEFINE_STRBUF(name, N) \
+ struct { struct strbuf buf; char data[(N)]; } ___##name; \
+ struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf)
+
+__printf(2, 3)
+static inline void snappendf(struct strbuf *s, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ s->pos += vsnprintf(s->buf + s->pos,
+ s->pos < s->buf_sz ? s->buf_sz - s->pos : 0,
+ fmt, args);
+ va_end(args);
+}
+
+/* ==================================
+ * GENERIC NUMBER TYPE AND OPERATIONS
+ * ==================================
+ */
+enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 };
+
+static __always_inline u64 min_t(enum num_t t, u64 x, u64 y)
+{
+ switch (t) {
+ case U64: return (u64)x < (u64)y ? (u64)x : (u64)y;
+ case U32: return (u32)x < (u32)y ? (u32)x : (u32)y;
+ case S64: return (s64)x < (s64)y ? (s64)x : (s64)y;
+ case S32: return (s32)x < (s32)y ? (s32)x : (s32)y;
+ default: printf("min_t!\n"); exit(1);
+ }
+}
+
+static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
+{
+ switch (t) {
+ case U64: return (u64)x > (u64)y ? (u64)x : (u64)y;
+ case U32: return (u32)x > (u32)y ? (u32)x : (u32)y;
+ case S64: return (s64)x > (s64)y ? (s64)x : (s64)y;
+ case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y;
+ default: printf("max_t!\n"); exit(1);
+ }
+}
+
+static __always_inline u64 cast_t(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return (u64)x;
+ case U32: return (u32)x;
+ case S64: return (s64)x;
+ case S32: return (u32)(s32)x;
+ default: printf("cast_t!\n"); exit(1);
+ }
+}
+
+static const char *t_str(enum num_t t)
+{
+ switch (t) {
+ case U64: return "u64";
+ case U32: return "u32";
+ case S64: return "s64";
+ case S32: return "s32";
+ default: printf("t_str!\n"); exit(1);
+ }
+}
+
+static enum num_t t_is_32(enum num_t t)
+{
+ switch (t) {
+ case U64: return false;
+ case U32: return true;
+ case S64: return false;
+ case S32: return true;
+ default: printf("t_is_32!\n"); exit(1);
+ }
+}
+
+static enum num_t t_signed(enum num_t t)
+{
+ switch (t) {
+ case U64: return S64;
+ case U32: return S32;
+ case S64: return S64;
+ case S32: return S32;
+ default: printf("t_signed!\n"); exit(1);
+ }
+}
+
+static enum num_t t_unsigned(enum num_t t)
+{
+ switch (t) {
+ case U64: return U64;
+ case U32: return U32;
+ case S64: return U64;
+ case S32: return U32;
+ default: printf("t_unsigned!\n"); exit(1);
+ }
+}
+
+#define UNUM_MAX_DECIMAL U16_MAX
+#define SNUM_MAX_DECIMAL S16_MAX
+#define SNUM_MIN_DECIMAL S16_MIN
+
+static bool num_is_small(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return (u64)x <= UNUM_MAX_DECIMAL;
+ case U32: return (u32)x <= UNUM_MAX_DECIMAL;
+ case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL;
+ case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL;
+ default: printf("num_is_small!\n"); exit(1);
+ }
+}
+
+static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x)
+{
+ bool is_small = num_is_small(t, x);
+
+ if (is_small) {
+ switch (t) {
+ case U64: return snappendf(sb, "%llu", (u64)x);
+ case U32: return snappendf(sb, "%u", (u32)x);
+ case S64: return snappendf(sb, "%lld", (s64)x);
+ case S32: return snappendf(sb, "%d", (s32)x);
+ default: printf("snprintf_num!\n"); exit(1);
+ }
+ } else {
+ switch (t) {
+ case U64:
+ if (x == U64_MAX)
+ return snappendf(sb, "U64_MAX");
+ else if (x >= U64_MAX - 256)
+ return snappendf(sb, "U64_MAX-%llu", U64_MAX - x);
+ else
+ return snappendf(sb, "%#llx", (u64)x);
+ case U32:
+ if ((u32)x == U32_MAX)
+ return snappendf(sb, "U32_MAX");
+ else if ((u32)x >= U32_MAX - 256)
+ return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x);
+ else
+ return snappendf(sb, "%#x", (u32)x);
+ case S64:
+ if ((s64)x == S64_MAX)
+ return snappendf(sb, "S64_MAX");
+ else if ((s64)x >= S64_MAX - 256)
+ return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x);
+ else if ((s64)x == S64_MIN)
+ return snappendf(sb, "S64_MIN");
+ else if ((s64)x <= S64_MIN + 256)
+ return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN);
+ else
+ return snappendf(sb, "%#llx", (s64)x);
+ case S32:
+ if ((s32)x == S32_MAX)
+ return snappendf(sb, "S32_MAX");
+ else if ((s32)x >= S32_MAX - 256)
+ return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x);
+ else if ((s32)x == S32_MIN)
+ return snappendf(sb, "S32_MIN");
+ else if ((s32)x <= S32_MIN + 256)
+ return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN);
+ else
+ return snappendf(sb, "%#x", (s32)x);
+ default: printf("snprintf_num!\n"); exit(1);
+ }
+ }
+}
+
+/* ===================================
+ * GENERIC RANGE STRUCT AND OPERATIONS
+ * ===================================
+ */
+struct range {
+ u64 a, b;
+};
+
+static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
+{
+ if (x.a == x.b)
+ return snprintf_num(t, sb, x.a);
+
+ snappendf(sb, "[");
+ snprintf_num(t, sb, x.a);
+ snappendf(sb, "; ");
+ snprintf_num(t, sb, x.b);
+ snappendf(sb, "]");
+}
+
+static void print_range(enum num_t t, struct range x, const char *sfx)
+{
+ DEFINE_STRBUF(sb, 128);
+
+ snprintf_range(t, sb, x);
+ printf("%s%s", sb->buf, sfx);
+}
+
+static const struct range unkn[] = {
+ [U64] = { 0, U64_MAX },
+ [U32] = { 0, U32_MAX },
+ [S64] = { (u64)S64_MIN, (u64)S64_MAX },
+ [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX },
+};
+
+static struct range unkn_subreg(enum num_t t)
+{
+ switch (t) {
+ case U64: return unkn[U32];
+ case U32: return unkn[U32];
+ case S64: return unkn[U32];
+ case S32: return unkn[S32];
+ default: printf("unkn_subreg!\n"); exit(1);
+ }
+}
+
+static struct range range(enum num_t t, u64 a, u64 b)
+{
+ switch (t) {
+ case U64: return (struct range){ (u64)a, (u64)b };
+ case U32: return (struct range){ (u32)a, (u32)b };
+ case S64: return (struct range){ (s64)a, (s64)b };
+ case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
+ default: printf("range!\n"); exit(1);
+ }
+}
+
+static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; }
+static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; }
+static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); }
+static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; }
+
+static bool range_eq(struct range x, struct range y)
+{
+ return x.a == y.a && x.b == y.b;
+}
+
+static struct range range_cast_to_s32(struct range x)
+{
+ u64 a = x.a, b = x.b;
+
+ /* if upper 32 bits are constant, lower 32 bits should form a proper
+ * s32 range to be correct
+ */
+ if (upper32(a) == upper32(b) && (s32)a <= (s32)b)
+ return range(S32, a, b);
+
+ /* Special case where upper bits form a small sequence of two
+ * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
+ * 0x00000000 is also valid), while lower bits form a proper s32 range
+ * going from negative numbers to positive numbers.
+ *
+ * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating
+ * over full 64-bit numbers range will form a proper [-16, 16]
+ * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
+ */
+ if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0)
+ return range(S32, a, b);
+
+ /* otherwise we can't derive much meaningful information */
+ return unkn[S32];
+}
+
+static struct range range_cast_u64(enum num_t to_t, struct range x)
+{
+ u64 a = (u64)x.a, b = (u64)x.b;
+
+ switch (to_t) {
+ case U64:
+ return x;
+ case U32:
+ if (upper32(a) != upper32(b))
+ return unkn[U32];
+ return range(U32, a, b);
+ case S64:
+ if (sign64(a) != sign64(b))
+ return unkn[S64];
+ return range(S64, a, b);
+ case S32:
+ return range_cast_to_s32(x);
+ default: printf("range_cast_u64!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_s64(enum num_t to_t, struct range x)
+{
+ s64 a = (s64)x.a, b = (s64)x.b;
+
+ switch (to_t) {
+ case U64:
+ /* equivalent to (s64)a <= (s64)b check */
+ if (sign64(a) != sign64(b))
+ return unkn[U64];
+ return range(U64, a, b);
+ case U32:
+ if (upper32(a) != upper32(b) || sign32(a) != sign32(b))
+ return unkn[U32];
+ return range(U32, a, b);
+ case S64:
+ return x;
+ case S32:
+ return range_cast_to_s32(x);
+ default: printf("range_cast_s64!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_u32(enum num_t to_t, struct range x)
+{
+ u32 a = (u32)x.a, b = (u32)x.b;
+
+ switch (to_t) {
+ case U64:
+ case S64:
+ /* u32 is always a valid zero-extended u64/s64 */
+ return range(to_t, a, b);
+ case U32:
+ return x;
+ case S32:
+ return range_cast_to_s32(range(U32, a, b));
+ default: printf("range_cast_u32!\n"); exit(1);
+ }
+}
+
+static struct range range_cast_s32(enum num_t to_t, struct range x)
+{
+ s32 a = (s32)x.a, b = (s32)x.b;
+
+ switch (to_t) {
+ case U64:
+ case U32:
+ case S64:
+ if (sign32(a) != sign32(b))
+ return unkn[to_t];
+ return range(to_t, a, b);
+ case S32:
+ return x;
+ default: printf("range_cast_s32!\n"); exit(1);
+ }
+}
+
+/* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving
+ * all possible information. Worst case, it will be unknown range within
+ * *to_t* domain, if nothing more specific can be guaranteed during the
+ * conversion
+ */
+static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
+{
+ switch (from_t) {
+ case U64: return range_cast_u64(to_t, from);
+ case U32: return range_cast_u32(to_t, from);
+ case S64: return range_cast_s64(to_t, from);
+ case S32: return range_cast_s32(to_t, from);
+ default: printf("range_cast!\n"); exit(1);
+ }
+}
+
+static bool is_valid_num(enum num_t t, u64 x)
+{
+ switch (t) {
+ case U64: return true;
+ case U32: return upper32(x) == 0;
+ case S64: return true;
+ case S32: return upper32(x) == 0;
+ default: printf("is_valid_num!\n"); exit(1);
+ }
+}
+
+static bool is_valid_range(enum num_t t, struct range x)
+{
+ if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b))
+ return false;
+
+ switch (t) {
+ case U64: return (u64)x.a <= (u64)x.b;
+ case U32: return (u32)x.a <= (u32)x.b;
+ case S64: return (s64)x.a <= (s64)x.b;
+ case S32: return (s32)x.a <= (s32)x.b;
+ default: printf("is_valid_range!\n"); exit(1);
+ }
+}
+
+static struct range range_improve(enum num_t t, struct range old, struct range new)
+{
+ return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
+}
+
+static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
+{
+ struct range y_cast;
+
+ y_cast = range_cast(y_t, x_t, y);
+
+ /* If we know that
+ * - *x* is in the range of signed 32bit value, and
+ * - *y_cast* range is 32-bit signed non-negative
+ * then *x* range can be improved with *y_cast* such that *x* range
+ * is 32-bit signed non-negative. Otherwise, if the new range for *x*
+ * allows upper 32-bit * 0xffffffff then the eventual new range for
+ * *x* will be out of signed 32-bit range which violates the origin
+ * *x* range.
+ */
+ if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
+ (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
+ return range_improve(x_t, x, y_cast);
+
+ /* the case when new range knowledge, *y*, is a 32-bit subregister
+ * range, while previous range knowledge, *x*, is a full register
+ * 64-bit range, needs special treatment to take into account upper 32
+ * bits of full register range
+ */
+ if (t_is_32(y_t) && !t_is_32(x_t)) {
+ struct range x_swap;
+
+ /* some combinations of upper 32 bits and sign bit can lead to
+ * invalid ranges, in such cases it's easier to detect them
+ * after cast/swap than try to enumerate all the conditions
+ * under which transformation and knowledge transfer is valid
+ */
+ x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
+ if (!is_valid_range(x_t, x_swap))
+ return x;
+ return range_improve(x_t, x, x_swap);
+ }
+
+ if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
+ if (x_t == S64 && x.a > x.b) {
+ if (x.b < y.a && x.a <= y.b)
+ return range(x_t, x.a, y.b);
+ if (x.a > y.b && x.b >= y.a)
+ return range(x_t, y.a, x.b);
+ } else if (x_t == U64 && y.a > y.b) {
+ if (y.b < x.a && y.a <= x.b)
+ return range(x_t, y.a, x.b);
+ if (y.a > x.b && y.b >= x.a)
+ return range(x_t, x.a, y.b);
+ }
+ }
+
+ /* otherwise, plain range cast and intersection works */
+ return range_improve(x_t, x, y_cast);
+}
+
+/* =======================
+ * GENERIC CONDITIONAL OPS
+ * =======================
+ */
+enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE };
+
+static enum op complement_op(enum op op)
+{
+ switch (op) {
+ case OP_LT: return OP_GE;
+ case OP_LE: return OP_GT;
+ case OP_GT: return OP_LE;
+ case OP_GE: return OP_LT;
+ case OP_EQ: return OP_NE;
+ case OP_NE: return OP_EQ;
+ default: printf("complement_op!\n"); exit(1);
+ }
+}
+
+static const char *op_str(enum op op)
+{
+ switch (op) {
+ case OP_LT: return "<";
+ case OP_LE: return "<=";
+ case OP_GT: return ">";
+ case OP_GE: return ">=";
+ case OP_EQ: return "==";
+ case OP_NE: return "!=";
+ default: printf("op_str!\n"); exit(1);
+ }
+}
+
+/* Can register with range [x.a, x.b] *EVER* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a register with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+#define range_canbe(T) do { \
+ switch (op) { \
+ case OP_LT: return (T)x.a < (T)y.b; \
+ case OP_LE: return (T)x.a <= (T)y.b; \
+ case OP_GT: return (T)x.b > (T)y.a; \
+ case OP_GE: return (T)x.b >= (T)y.a; \
+ case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \
+ case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \
+ default: printf("range_canbe op %d\n", op); exit(1); \
+ } \
+} while (0)
+
+ switch (t) {
+ case U64: { range_canbe(u64); }
+ case U32: { range_canbe(u32); }
+ case S64: { range_canbe(s64); }
+ case S32: { range_canbe(s32); }
+ default: printf("range_canbe!\n"); exit(1);
+ }
+#undef range_canbe
+}
+
+/* Does register with range [x.a, x.b] *ALWAYS* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a register with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ /* always op <=> ! canbe complement(op) */
+ return !range_canbe_op(t, x, y, complement_op(op));
+}
+
+/* Does register with range [x.a, x.b] *NEVER* satisfy
+ * OP (<, <=, >, >=, ==, !=) relation to
+ * a register with range [y.a, y.b]
+ * _in *num_t* domain_
+ */
+static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ return !range_canbe_op(t, x, y, op);
+}
+
+/* similar to verifier's is_branch_taken():
+ * 1 - always taken;
+ * 0 - never taken,
+ * -1 - unsure.
+ */
+static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
+{
+ if (range_always_op(t, x, y, op))
+ return 1;
+ if (range_never_op(t, x, y, op))
+ return 0;
+ return -1;
+}
+
+/* What would be the new estimates for register x and y ranges assuming truthful
+ * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy.
+ *
+ * We assume "interesting" cases where ranges overlap. Cases where it's
+ * obvious that (x OP y) is either always true or false should be filtered with
+ * range_never and range_always checks.
+ */
+static void range_cond(enum num_t t, struct range x, struct range y,
+ enum op op, struct range *newx, struct range *newy)
+{
+ if (!range_canbe_op(t, x, y, op)) {
+ /* nothing to adjust, can't happen, return original values */
+ *newx = x;
+ *newy = y;
+ return;
+ }
+ switch (op) {
+ case OP_LT:
+ *newx = range(t, x.a, min_t(t, x.b, y.b - 1));
+ *newy = range(t, max_t(t, x.a + 1, y.a), y.b);
+ break;
+ case OP_LE:
+ *newx = range(t, x.a, min_t(t, x.b, y.b));
+ *newy = range(t, max_t(t, x.a, y.a), y.b);
+ break;
+ case OP_GT:
+ *newx = range(t, max_t(t, x.a, y.a + 1), x.b);
+ *newy = range(t, y.a, min_t(t, x.b - 1, y.b));
+ break;
+ case OP_GE:
+ *newx = range(t, max_t(t, x.a, y.a), x.b);
+ *newy = range(t, y.a, min_t(t, x.b, y.b));
+ break;
+ case OP_EQ:
+ *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
+ *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
+ break;
+ case OP_NE:
+ /* below logic is supported by the verifier now */
+ if (x.a == x.b && x.a == y.a) {
+ /* X is a constant matching left side of Y */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a + 1, y.b);
+ } else if (x.a == x.b && x.b == y.b) {
+ /* X is a constant matching right side of Y */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a, y.b - 1);
+ } else if (y.a == y.b && x.a == y.a) {
+ /* Y is a constant matching left side of X */
+ *newx = range(t, x.a + 1, x.b);
+ *newy = range(t, y.a, y.b);
+ } else if (y.a == y.b && x.b == y.b) {
+ /* Y is a constant matching right side of X */
+ *newx = range(t, x.a, x.b - 1);
+ *newy = range(t, y.a, y.b);
+ } else {
+ /* generic case, can't derive more information */
+ *newx = range(t, x.a, x.b);
+ *newy = range(t, y.a, y.b);
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+/* =======================
+ * REGISTER STATE HANDLING
+ * =======================
+ */
+struct reg_state {
+ struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
+ bool valid;
+};
+
+static void print_reg_state(struct reg_state *r, const char *sfx)
+{
+ DEFINE_STRBUF(sb, 512);
+ enum num_t t;
+ int cnt = 0;
+
+ if (!r->valid) {
+ printf("<not found>%s", sfx);
+ return;
+ }
+
+ snappendf(sb, "scalar(");
+ for (t = first_t; t <= last_t; t++) {
+ snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t));
+ snprintf_range(t, sb, r->r[t]);
+ }
+ snappendf(sb, ")");
+
+ printf("%s%s", sb->buf, sfx);
+}
+
+static void print_refinement(enum num_t s_t, struct range src,
+ enum num_t d_t, struct range old, struct range new,
+ const char *ctx)
+{
+ printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t));
+ print_range(s_t, src, "");
+ printf(" (%s)DST_OLD=", t_str(d_t));
+ print_range(d_t, old, "");
+ printf(" (%s)DST_NEW=", t_str(d_t));
+ print_range(d_t, new, "\n");
+}
+
+static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
+{
+ enum num_t d_t, s_t;
+ struct range old;
+ bool keep_going = false;
+
+again:
+ /* try to derive new knowledge from just learned range x of type t */
+ for (d_t = first_t; d_t <= last_t; d_t++) {
+ old = r->r[d_t];
+ r->r[d_t] = range_refine(d_t, r->r[d_t], t, x);
+ if (!range_eq(r->r[d_t], old)) {
+ keep_going = true;
+ if (env.verbosity >= VERBOSE_VERY)
+ print_refinement(t, x, d_t, old, r->r[d_t], ctx);
+ }
+ }
+
+ /* now see if we can derive anything new from updated reg_state's ranges */
+ for (s_t = first_t; s_t <= last_t; s_t++) {
+ for (d_t = first_t; d_t <= last_t; d_t++) {
+ old = r->r[d_t];
+ r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]);
+ if (!range_eq(r->r[d_t], old)) {
+ keep_going = true;
+ if (env.verbosity >= VERBOSE_VERY)
+ print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
+ }
+ }
+ }
+
+ /* keep refining until we converge */
+ if (keep_going) {
+ keep_going = false;
+ goto again;
+ }
+}
+
+static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val)
+{
+ enum num_t tt;
+
+ rs->valid = true;
+ for (tt = first_t; tt <= last_t; tt++)
+ rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
+
+ reg_state_refine(rs, t, rs->r[t], "CONST");
+}
+
+static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op,
+ struct reg_state *newx, struct reg_state *newy, const char *ctx)
+{
+ char buf[32];
+ enum num_t ts[2];
+ struct reg_state xx = *x, yy = *y;
+ int i, t_cnt;
+ struct range z1, z2;
+
+ if (op == OP_EQ || op == OP_NE) {
+ /* OP_EQ and OP_NE are sign-agnostic, so we need to process
+ * both signed and unsigned domains at the same time
+ */
+ ts[0] = t_unsigned(t);
+ ts[1] = t_signed(t);
+ t_cnt = 2;
+ } else {
+ ts[0] = t;
+ t_cnt = 1;
+ }
+
+ for (i = 0; i < t_cnt; i++) {
+ t = ts[i];
+ z1 = x->r[t];
+ z2 = y->r[t];
+
+ range_cond(t, z1, z2, op, &z1, &z2);
+
+ if (newx) {
+ snprintf(buf, sizeof(buf), "%s R1", ctx);
+ reg_state_refine(&xx, t, z1, buf);
+ }
+ if (newy) {
+ snprintf(buf, sizeof(buf), "%s R2", ctx);
+ reg_state_refine(&yy, t, z2, buf);
+ }
+ }
+
+ if (newx)
+ *newx = xx;
+ if (newy)
+ *newy = yy;
+}
+
+static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y,
+ enum op op)
+{
+ if (op == OP_EQ || op == OP_NE) {
+ /* OP_EQ and OP_NE are sign-agnostic */
+ enum num_t tu = t_unsigned(t);
+ enum num_t ts = t_signed(t);
+ int br_u, br_s, br;
+
+ br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op);
+ br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op);
+
+ if (br_u >= 0 && br_s >= 0 && br_u != br_s)
+ ASSERT_FALSE(true, "branch taken inconsistency!\n");
+
+ /* if 64-bit ranges are indecisive, use 32-bit subranges to
+ * eliminate always/never taken branches, if possible
+ */
+ if (br_u == -1 && (t == U64 || t == S64)) {
+ br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op);
+ /* we can only reject for OP_EQ, never take branch
+ * based on lower 32 bits
+ */
+ if (op == OP_EQ && br == 0)
+ return 0;
+ /* for OP_NEQ we can be conclusive only if lower 32 bits
+ * differ and thus inequality branch is always taken
+ */
+ if (op == OP_NE && br == 1)
+ return 1;
+
+ br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op);
+ if (op == OP_EQ && br == 0)
+ return 0;
+ if (op == OP_NE && br == 1)
+ return 1;
+ }
+
+ return br_u >= 0 ? br_u : br_s;
+ }
+ return range_branch_taken_op(t, x->r[t], y->r[t], op);
+}
+
+/* =====================================
+ * BPF PROGS GENERATION AND VERIFICATION
+ * =====================================
+ */
+struct case_spec {
+ /* whether to init full register (r1) or sub-register (w1) */
+ bool init_subregs;
+ /* whether to establish initial value range on full register (r1) or
+ * sub-register (w1)
+ */
+ bool setup_subregs;
+ /* whether to establish initial value range using signed or unsigned
+ * comparisons (i.e., initialize umin/umax or smin/smax directly)
+ */
+ bool setup_signed;
+ /* whether to perform comparison on full registers or sub-registers */
+ bool compare_subregs;
+ /* whether to perform comparison using signed or unsigned operations */
+ bool compare_signed;
+};
+
+/* Generate test BPF program based on provided test ranges, operation, and
+ * specifications about register bitness and signedness.
+ */
+static int load_range_cmp_prog(struct range x, struct range y, enum op op,
+ int branch_taken, struct case_spec spec,
+ char *log_buf, size_t log_sz,
+ int *false_pos, int *true_pos)
+{
+#define emit(insn) ({ \
+ struct bpf_insn __insns[] = { insn }; \
+ int __i; \
+ for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
+ insns[cur_pos + __i] = __insns[__i]; \
+ cur_pos += __i; \
+})
+#define JMP_TO(target) (target - cur_pos - 1)
+ int cur_pos = 0, exit_pos, fd, op_code;
+ struct bpf_insn insns[64];
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_level = 2,
+ .log_buf = log_buf,
+ .log_size = log_sz,
+ .prog_flags = testing_prog_flags(),
+ );
+
+ /* ; skip exit block below
+ * goto +2;
+ */
+ emit(BPF_JMP_A(2));
+ exit_pos = cur_pos;
+ /* ; exit block for all the preparatory conditionals
+ * out:
+ * r0 = 0;
+ * exit;
+ */
+ emit(BPF_MOV64_IMM(BPF_REG_0, 0));
+ emit(BPF_EXIT_INSN());
+ /*
+ * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value
+ * call bpf_get_current_pid_tgid;
+ * r6 = r0; | w6 = w0;
+ * call bpf_get_current_pid_tgid;
+ * r7 = r0; | w7 = w0;
+ */
+ emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
+ if (spec.init_subregs)
+ emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0));
+ else
+ emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
+ emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
+ if (spec.init_subregs)
+ emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0));
+ else
+ emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ /* ; setup initial r6/w6 possible value range ([x.a, x.b])
+ * r1 = %[x.a] ll; | w1 = %[x.a];
+ * r2 = %[x.b] ll; | w2 = %[x.b];
+ * if r6 < r1 goto out; | if w6 < w1 goto out;
+ * if r6 > r2 goto out; | if w6 > w2 goto out;
+ */
+ if (spec.setup_subregs) {
+ emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
+ emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
+ } else {
+ emit(BPF_LD_IMM64(BPF_REG_1, x.a));
+ emit(BPF_LD_IMM64(BPF_REG_2, x.b));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
+ }
+ /* ; setup initial r7/w7 possible value range ([y.a, y.b])
+ * r1 = %[y.a] ll; | w1 = %[y.a];
+ * r2 = %[y.b] ll; | w2 = %[y.b];
+ * if r7 < r1 goto out; | if w7 < w1 goto out;
+ * if r7 > r2 goto out; | if w7 > w2 goto out;
+ */
+ if (spec.setup_subregs) {
+ emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
+ emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
+ } else {
+ emit(BPF_LD_IMM64(BPF_REG_1, y.a));
+ emit(BPF_LD_IMM64(BPF_REG_2, y.b));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
+ BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
+ emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
+ BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
+ }
+ /* ; range test instruction
+ * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
+ */
+ switch (op) {
+ case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break;
+ case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break;
+ case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break;
+ case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break;
+ case OP_EQ: op_code = BPF_JEQ; break;
+ case OP_NE: op_code = BPF_JNE; break;
+ default:
+ printf("unrecognized op %d\n", op);
+ return -ENOTSUP;
+ }
+ /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * ; this is used for debugging, as verifier doesn't always print
+ * ; registers states as of condition jump instruction (e.g., when
+ * ; precision marking happens)
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ */
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (spec.compare_subregs)
+ emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
+ else
+ emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
+ /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ * exit;
+ */
+ *false_pos = cur_pos;
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (branch_taken == 1) /* false branch is never taken */
+ emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
+ else
+ emit(BPF_EXIT_INSN());
+ /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
+ * r0 = r6; | w0 = w6;
+ * r0 = r7; | w0 = w7;
+ * exit;
+ */
+ *true_pos = cur_pos;
+ if (spec.compare_subregs) {
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
+ } else {
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
+ emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ }
+ if (branch_taken == 0) /* true branch is never taken */
+ emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
+ emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test",
+ "GPL", insns, cur_pos, &opts);
+ if (fd < 0)
+ return fd;
+
+ close(fd);
+ return 0;
+#undef emit
+#undef JMP_TO
+}
+
+#define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0)
+
+/* Parse register state from verifier log.
+ * `s` should point to the start of "Rx = ..." substring in the verifier log.
+ */
+static int parse_reg_state(const char *s, struct reg_state *reg)
+{
+ /* There are two generic forms for SCALAR register:
+ * - known constant: R6_rwD=P%lld
+ * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated
+ * list of optional range specifiers:
+ * - umin=%llu, if missing, assumed 0;
+ * - umax=%llu, if missing, assumed U64_MAX;
+ * - smin=%lld, if missing, assumed S64_MIN;
+ * - smax=%lld, if missing, assumed S64_MAX;
+ * - umin32=%d, if missing, assumed 0;
+ * - umax32=%d, if missing, assumed U32_MAX;
+ * - smin32=%d, if missing, assumed S32_MIN;
+ * - smax32=%d, if missing, assumed S32_MAX;
+ * - var_off=(%#llx; %#llx), tnum part, we don't care about it.
+ *
+ * If some of the values are equal, they will be grouped (but min/max
+ * are not mixed together, and similarly negative values are not
+ * grouped with non-negative ones). E.g.:
+ *
+ * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000)
+ *
+ * _rwD part is optional (and any of the letters can be missing).
+ * P (precision mark) is optional as well.
+ *
+ * Anything inside scalar() is optional, including id, of course.
+ */
+ struct {
+ const char *pfx;
+ u64 *dst, def;
+ bool is_32, is_set;
+ } *f, fields[8] = {
+ {"smin=", &reg->r[S64].a, S64_MIN},
+ {"smax=", &reg->r[S64].b, S64_MAX},
+ {"umin=", &reg->r[U64].a, 0},
+ {"umax=", &reg->r[U64].b, U64_MAX},
+ {"smin32=", &reg->r[S32].a, (u32)S32_MIN, true},
+ {"smax32=", &reg->r[S32].b, (u32)S32_MAX, true},
+ {"umin32=", &reg->r[U32].a, 0, true},
+ {"umax32=", &reg->r[U32].b, U32_MAX, true},
+ };
+ const char *p;
+ int i;
+
+ p = strchr(s, '=');
+ if (!p)
+ return -EINVAL;
+ p++;
+ if (*p == 'P')
+ p++;
+
+ if (!str_has_pfx(p, "scalar(")) {
+ long long sval;
+ enum num_t t;
+
+ if (p[0] == '0' && p[1] == 'x') {
+ if (sscanf(p, "%llx", &sval) != 1)
+ return -EINVAL;
+ } else {
+ if (sscanf(p, "%lld", &sval) != 1)
+ return -EINVAL;
+ }
+
+ reg->valid = true;
+ for (t = first_t; t <= last_t; t++) {
+ reg->r[t] = range(t, sval, sval);
+ }
+ return 0;
+ }
+
+ p += sizeof("scalar");
+ while (p) {
+ int midxs[ARRAY_SIZE(fields)], mcnt = 0;
+ u64 val;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ if (!str_has_pfx(p, f->pfx))
+ continue;
+ midxs[mcnt++] = i;
+ p += strlen(f->pfx);
+ }
+
+ if (mcnt) {
+ /* populate all matched fields */
+ if (p[0] == '0' && p[1] == 'x') {
+ if (sscanf(p, "%llx", &val) != 1)
+ return -EINVAL;
+ } else {
+ if (sscanf(p, "%lld", &val) != 1)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mcnt; i++) {
+ f = &fields[midxs[i]];
+ f->is_set = true;
+ *f->dst = f->is_32 ? (u64)(u32)val : val;
+ }
+ } else if (str_has_pfx(p, "var_off")) {
+ /* skip "var_off=(0x0; 0x3f)" part completely */
+ p = strchr(p, ')');
+ if (!p)
+ return -EINVAL;
+ p++;
+ }
+
+ p = strpbrk(p, ",)");
+ if (*p == ')')
+ break;
+ if (p)
+ p++;
+ }
+
+ reg->valid = true;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ if (!f->is_set)
+ *f->dst = f->def;
+ }
+
+ return 0;
+}
+
+
+/* Parse all register states (TRUE/FALSE branches and DST/SRC registers)
+ * out of the verifier log for a corresponding test case BPF program.
+ */
+static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
+ int false_pos, int true_pos,
+ struct reg_state *false1_reg, struct reg_state *false2_reg,
+ struct reg_state *true1_reg, struct reg_state *true2_reg)
+{
+ struct {
+ int insn_idx;
+ int reg_idx;
+ const char *reg_upper;
+ struct reg_state *state;
+ } specs[] = {
+ {false_pos, 6, "R6=", false1_reg},
+ {false_pos + 1, 7, "R7=", false2_reg},
+ {true_pos, 6, "R6=", true1_reg},
+ {true_pos + 1, 7, "R7=", true2_reg},
+ };
+ char buf[32];
+ const char *p = log_buf, *q;
+ int i, err;
+
+ for (i = 0; i < 4; i++) {
+ sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx,
+ spec.compare_subregs ? "bc" : "bf",
+ spec.compare_subregs ? "w0" : "r0",
+ spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
+
+ q = strstr(p, buf);
+ if (!q) {
+ *specs[i].state = (struct reg_state){.valid = false};
+ continue;
+ }
+ p = strstr(q, specs[i].reg_upper);
+ if (!p)
+ return -EINVAL;
+ err = parse_reg_state(p, specs[i].state);
+ if (err)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Validate ranges match, and print details if they don't */
+static bool assert_range_eq(enum num_t t, struct range x, struct range y,
+ const char *ctx1, const char *ctx2)
+{
+ DEFINE_STRBUF(sb, 512);
+
+ if (range_eq(x, y))
+ return true;
+
+ snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2);
+ snprintf_range(t, sb, x);
+ snappendf(sb, " != ");
+ snprintf_range(t, sb, y);
+
+ printf("%s\n", sb->buf);
+
+ return false;
+}
+
+/* Validate that register states match, and print details if they don't */
+static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
+{
+ bool ok = true;
+ enum num_t t;
+
+ if (r->valid != e->valid) {
+ printf("MISMATCH %s: actual %s != expected %s\n", ctx,
+ r->valid ? "<valid>" : "<invalid>",
+ e->valid ? "<valid>" : "<invalid>");
+ return false;
+ }
+
+ if (!r->valid)
+ return true;
+
+ for (t = first_t; t <= last_t; t++) {
+ if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t)))
+ ok = false;
+ }
+
+ return ok;
+}
+
+/* Printf verifier log, filtering out irrelevant noise */
+static void print_verifier_log(const char *buf)
+{
+ const char *p;
+
+ while (buf[0]) {
+ p = strchrnul(buf, '\n');
+
+ /* filter out irrelevant precision backtracking logs */
+ if (str_has_pfx(buf, "mark_precise: "))
+ goto skip_line;
+
+ printf("%.*s\n", (int)(p - buf), buf);
+
+skip_line:
+ buf = *p == '\0' ? p : p + 1;
+ }
+}
+
+/* Simulate provided test case purely with our own range-based logic.
+ * This is done to set up expectations for verifier's branch_taken logic and
+ * verifier's register states in the verifier log.
+ */
+static void sim_case(enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, enum op op,
+ struct reg_state *fr1, struct reg_state *fr2,
+ struct reg_state *tr1, struct reg_state *tr2,
+ int *branch_taken)
+{
+ const u64 A = x.a;
+ const u64 B = x.b;
+ const u64 C = y.a;
+ const u64 D = y.b;
+ struct reg_state rc;
+ enum op rev_op = complement_op(op);
+ enum num_t t;
+
+ fr1->valid = fr2->valid = true;
+ tr1->valid = tr2->valid = true;
+ for (t = first_t; t <= last_t; t++) {
+ /* if we are initializing using 32-bit subregisters,
+ * full registers get upper 32 bits zeroed automatically
+ */
+ struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
+
+ fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z;
+ }
+
+ /* step 1: r1 >= A, r2 >= C */
+ reg_state_set_const(&rc, init_t, A);
+ reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A");
+ reg_state_set_const(&rc, init_t, C);
+ reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C");
+ *tr1 = *fr1;
+ *tr2 = *fr2;
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
+ printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
+ }
+
+ /* step 2: r1 <= B, r2 <= D */
+ reg_state_set_const(&rc, init_t, B);
+ reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B");
+ reg_state_set_const(&rc, init_t, D);
+ reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D");
+ *tr1 = *fr1;
+ *tr2 = *fr2;
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
+ printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
+ }
+
+ /* step 3: r1 <op> r2 */
+ *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op);
+ fr1->valid = fr2->valid = false;
+ tr1->valid = tr2->valid = false;
+ if (*branch_taken != 1) { /* FALSE is possible */
+ fr1->valid = fr2->valid = true;
+ reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE");
+ }
+ if (*branch_taken != 0) { /* TRUE is possible */
+ tr1->valid = tr2->valid = true;
+ reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE");
+ }
+ if (env.verbosity >= VERBOSE_VERY) {
+ printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n");
+ printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n");
+ printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n");
+ printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n");
+ }
+}
+
+/* ===============================
+ * HIGH-LEVEL TEST CASE VALIDATION
+ * ===============================
+ */
+static u32 upper_seeds[] = {
+ 0,
+ 1,
+ U32_MAX,
+ U32_MAX - 1,
+ S32_MAX,
+ (u32)S32_MIN,
+};
+
+static u32 lower_seeds[] = {
+ 0,
+ 1,
+ 2, (u32)-2,
+ 255, (u32)-255,
+ UINT_MAX,
+ UINT_MAX - 1,
+ INT_MAX,
+ (u32)INT_MIN,
+};
+
+struct ctx {
+ int val_cnt, subval_cnt, range_cnt, subrange_cnt;
+ u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
+ s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
+ u32 usubvals[ARRAY_SIZE(lower_seeds)];
+ s32 ssubvals[ARRAY_SIZE(lower_seeds)];
+ struct range *uranges, *sranges;
+ struct range *usubranges, *ssubranges;
+ int max_failure_cnt, cur_failure_cnt;
+ int total_case_cnt, case_cnt;
+ int rand_case_cnt;
+ unsigned rand_seed;
+ __u64 start_ns;
+ char progress_ctx[64];
+};
+
+static void cleanup_ctx(struct ctx *ctx)
+{
+ free(ctx->uranges);
+ free(ctx->sranges);
+ free(ctx->usubranges);
+ free(ctx->ssubranges);
+}
+
+struct subtest_case {
+ enum num_t init_t;
+ enum num_t cond_t;
+ struct range x;
+ struct range y;
+ enum op op;
+};
+
+static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op)
+{
+ snappendf(sb, "(%s)", t_str(t->init_t));
+ snprintf_range(t->init_t, sb, t->x);
+ snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>");
+ snprintf_range(t->init_t, sb, t->y);
+}
+
+/* Generate and validate test case based on specific combination of setup
+ * register ranges (including their expected num_t domain), and conditional
+ * operation to perform (including num_t domain in which it has to be
+ * performed)
+ */
+static int verify_case_op(enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, enum op op)
+{
+ char log_buf[256 * 1024];
+ size_t log_sz = sizeof(log_buf);
+ int err, false_pos = 0, true_pos = 0, branch_taken;
+ struct reg_state fr1, fr2, tr1, tr2;
+ struct reg_state fe1, fe2, te1, te2;
+ bool failed = false;
+ struct case_spec spec = {
+ .init_subregs = (init_t == U32 || init_t == S32),
+ .setup_subregs = (init_t == U32 || init_t == S32),
+ .setup_signed = (init_t == S64 || init_t == S32),
+ .compare_subregs = (cond_t == U32 || cond_t == S32),
+ .compare_signed = (cond_t == S64 || cond_t == S32),
+ };
+
+ log_buf[0] = '\0';
+
+ sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken);
+
+ err = load_range_cmp_prog(x, y, op, branch_taken, spec,
+ log_buf, log_sz, &false_pos, &true_pos);
+ if (err) {
+ ASSERT_OK(err, "load_range_cmp_prog");
+ failed = true;
+ }
+
+ err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos,
+ &fr1, &fr2, &tr1, &tr2);
+ if (err) {
+ ASSERT_OK(err, "parse_range_cmp_log");
+ failed = true;
+ }
+
+ if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") ||
+ !assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
+ !assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
+ !assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
+ failed = true;
+ }
+
+ if (failed || env.verbosity >= VERBOSE_NORMAL) {
+ if (failed || env.verbosity >= VERBOSE_VERY) {
+ printf("VERIFIER LOG:\n========================\n");
+ print_verifier_log(log_buf);
+ printf("=====================\n");
+ }
+ printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n");
+ printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n");
+ printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n");
+ printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n");
+ printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n");
+ printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n");
+ printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n");
+ printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n");
+
+ return failed ? -EINVAL : 0;
+ }
+
+ return 0;
+}
+
+/* Given setup ranges and number types, go over all supported operations,
+ * generating individual subtest for each allowed combination
+ */
+static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y, bool is_subtest)
+{
+ DEFINE_STRBUF(sb, 256);
+ int err;
+ struct subtest_case sub = {
+ .init_t = init_t,
+ .cond_t = cond_t,
+ .x = x,
+ .y = y,
+ };
+
+ sb->pos = 0; /* reset position in strbuf */
+ subtest_case_str(sb, &sub, false /* ignore op */);
+ if (is_subtest && !test__start_subtest(sb->buf))
+ return 0;
+
+ for (sub.op = first_op; sub.op <= last_op; sub.op++) {
+ sb->pos = 0; /* reset position in strbuf */
+ subtest_case_str(sb, &sub, true /* print op */);
+
+ if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
+ printf("TEST CASE: %s\n", sb->buf);
+
+ err = verify_case_op(init_t, cond_t, x, y, sub.op);
+ if (err || env.verbosity >= VERBOSE_NORMAL)
+ ASSERT_OK(err, sb->buf);
+ if (err) {
+ ctx->cur_failure_cnt++;
+ if (ctx->cur_failure_cnt > ctx->max_failure_cnt)
+ return err;
+ return 0; /* keep testing other cases */
+ }
+ ctx->case_cnt++;
+ if ((ctx->case_cnt % 10000) == 0) {
+ double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt;
+ u64 elapsed_ns = get_time_ns() - ctx->start_ns;
+ double remain_ns = elapsed_ns / progress * (1 - progress);
+
+ fprintf(env.stderr_saved, "PROGRESS (%s): %d/%d (%.2lf%%), "
+ "elapsed %llu mins (%.2lf hrs), "
+ "ETA %.0lf mins (%.2lf hrs)\n",
+ ctx->progress_ctx,
+ ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress,
+ elapsed_ns / 1000000000 / 60,
+ elapsed_ns / 1000000000.0 / 3600,
+ remain_ns / 1000000000.0 / 60,
+ remain_ns / 1000000000.0 / 3600);
+ }
+ }
+
+ return 0;
+}
+
+static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
+ struct range x, struct range y)
+{
+ return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */);
+}
+
+/* ================================
+ * GENERATED CASES FROM SEED VALUES
+ * ================================
+ */
+static int u64_cmp(const void *p1, const void *p2)
+{
+ u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int u32_cmp(const void *p1, const void *p2)
+{
+ u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int s64_cmp(const void *p1, const void *p2)
+{
+ s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+static int s32_cmp(const void *p1, const void *p2)
+{
+ s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2;
+
+ return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
+}
+
+/* Generate valid unique constants from seeds, both signed and unsigned */
+static void gen_vals(struct ctx *ctx)
+{
+ int i, j, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) {
+ for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
+ ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
+ }
+ }
+
+ /* sort and compact uvals (i.e., it's `sort | uniq`) */
+ qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp);
+ for (i = 1, j = 0; i < cnt; i++) {
+ if (ctx->uvals[j] == ctx->uvals[i])
+ continue;
+ j++;
+ ctx->uvals[j] = ctx->uvals[i];
+ }
+ ctx->val_cnt = j + 1;
+
+ /* we have exactly the same number of s64 values, they are just in
+ * a different order than u64s, so just sort them differently
+ */
+ for (i = 0; i < ctx->val_cnt; i++)
+ ctx->svals[i] = ctx->uvals[i];
+ qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
+
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ for (i = 0; i < ctx->val_cnt; i++) {
+ sb1->pos = sb2->pos = 0;
+ snprintf_num(U64, sb1, ctx->uvals[i]);
+ snprintf_num(S64, sb2, ctx->svals[i]);
+ printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
+ }
+ }
+
+ /* 32-bit values are generated separately */
+ cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
+ ctx->usubvals[cnt++] = lower_seeds[i];
+ }
+
+ /* sort and compact usubvals (i.e., it's `sort | uniq`) */
+ qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp);
+ for (i = 1, j = 0; i < cnt; i++) {
+ if (ctx->usubvals[j] == ctx->usubvals[i])
+ continue;
+ j++;
+ ctx->usubvals[j] = ctx->usubvals[i];
+ }
+ ctx->subval_cnt = j + 1;
+
+ for (i = 0; i < ctx->subval_cnt; i++)
+ ctx->ssubvals[i] = ctx->usubvals[i];
+ qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
+
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ sb1->pos = sb2->pos = 0;
+ snprintf_num(U32, sb1, ctx->usubvals[i]);
+ snprintf_num(S32, sb2, ctx->ssubvals[i]);
+ printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
+ }
+ }
+}
+
+/* Generate valid ranges from upper/lower seeds */
+static int gen_ranges(struct ctx *ctx)
+{
+ int i, j, cnt = 0;
+
+ for (i = 0; i < ctx->val_cnt; i++) {
+ for (j = i; j < ctx->val_cnt; j++) {
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ sb1->pos = sb2->pos = 0;
+ snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
+ snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
+ printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf);
+ }
+ cnt++;
+ }
+ }
+ ctx->range_cnt = cnt;
+
+ ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges));
+ if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc"))
+ return -EINVAL;
+ ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges));
+ if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc"))
+ return -EINVAL;
+
+ cnt = 0;
+ for (i = 0; i < ctx->val_cnt; i++) {
+ for (j = i; j < ctx->val_cnt; j++) {
+ ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
+ ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
+ cnt++;
+ }
+ }
+
+ cnt = 0;
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ for (j = i; j < ctx->subval_cnt; j++) {
+ if (env.verbosity >= VERBOSE_SUPER) {
+ DEFINE_STRBUF(sb1, 256);
+ DEFINE_STRBUF(sb2, 256);
+
+ sb1->pos = sb2->pos = 0;
+ snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
+ snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
+ printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf);
+ }
+ cnt++;
+ }
+ }
+ ctx->subrange_cnt = cnt;
+
+ ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges));
+ if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc"))
+ return -EINVAL;
+ ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges));
+ if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc"))
+ return -EINVAL;
+
+ cnt = 0;
+ for (i = 0; i < ctx->subval_cnt; i++) {
+ for (j = i; j < ctx->subval_cnt; j++) {
+ ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
+ ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
+ cnt++;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_env_vars(struct ctx *ctx)
+{
+ const char *s;
+
+ if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
+ errno = 0;
+ ctx->max_failure_cnt = strtol(s, NULL, 10);
+ if (errno || ctx->max_failure_cnt < 0) {
+ ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT");
+ return -EINVAL;
+ }
+ }
+
+ if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
+ errno = 0;
+ ctx->rand_case_cnt = strtol(s, NULL, 10);
+ if (errno || ctx->rand_case_cnt < 0) {
+ ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
+ return -EINVAL;
+ }
+ }
+
+ if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
+ errno = 0;
+ ctx->rand_seed = strtoul(s, NULL, 10);
+ if (errno) {
+ ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int prepare_gen_tests(struct ctx *ctx)
+{
+ const char *s;
+ int err;
+
+ if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
+ test__skip();
+ return -ENOTSUP;
+ }
+
+ err = parse_env_vars(ctx);
+ if (err)
+ return err;
+
+ gen_vals(ctx);
+ err = gen_ranges(ctx);
+ if (err) {
+ ASSERT_OK(err, "gen_ranges");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Go over generated constants and ranges and validate various supported
+ * combinations of them
+ */
+static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ struct range rconst;
+ const struct range *ranges;
+ const u64 *vals;
+ int i, j;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ ranges = init_t == U64 ? ctx.uranges : ctx.sranges;
+ vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals;
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x CONST, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.val_cnt; i++) {
+ for (j = 0; j < ctx.range_cnt; j++) {
+ rconst = range(init_t, vals[i], vals[i]);
+
+ /* (u64|s64)(<range> x <const>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
+ goto cleanup;
+ /* (u64|s64)(<const> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ struct range rconst;
+ const struct range *ranges;
+ const u32 *vals;
+ int i, j;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges;
+ vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals;
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x CONST, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.subval_cnt; i++) {
+ for (j = 0; j < ctx.subrange_cnt; j++) {
+ rconst = range(init_t, vals[i], vals[i]);
+
+ /* (u32|s32)(<range> x <const>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
+ goto cleanup;
+ /* (u32|s32)(<const> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t)
+{
+ struct ctx ctx;
+ const struct range *ranges;
+ int i, j, rcnt;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ if (prepare_gen_tests(&ctx))
+ goto cleanup;
+
+ switch (init_t)
+ {
+ case U64:
+ ranges = ctx.uranges;
+ rcnt = ctx.range_cnt;
+ break;
+ case U32:
+ ranges = ctx.usubranges;
+ rcnt = ctx.subrange_cnt;
+ break;
+ case S64:
+ ranges = ctx.sranges;
+ rcnt = ctx.range_cnt;
+ break;
+ case S32:
+ ranges = ctx.ssubranges;
+ rcnt = ctx.subrange_cnt;
+ break;
+ default:
+ printf("validate_gen_range_vs_range!\n");
+ exit(1);
+ }
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "RANGE x RANGE, %s -> %s",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < rcnt; i++) {
+ for (j = i; j < rcnt; j++) {
+ /* (<range> x <range>) */
+ if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j]))
+ goto cleanup;
+ if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i]))
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ cleanup_ctx(&ctx);
+}
+
+/* Go over thousands of test cases generated from initial seed values.
+ * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If
+ * envvar is not set, this test is skipped during test_progs testing.
+ *
+ * We split this up into smaller subsets based on initialization and
+ * conditional numeric domains to get an easy parallelization with test_progs'
+ * -j argument.
+ */
+
+/* RANGE x CONST, U64 initial range */
+void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); }
+void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); }
+void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); }
+void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); }
+/* RANGE x CONST, S64 initial range */
+void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); }
+void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); }
+void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); }
+void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); }
+/* RANGE x CONST, U32 initial range */
+void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); }
+void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); }
+void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); }
+void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); }
+/* RANGE x CONST, S32 initial range */
+void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); }
+void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); }
+void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); }
+void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); }
+
+/* RANGE x RANGE, U64 initial range */
+void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); }
+void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); }
+void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); }
+void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); }
+/* RANGE x RANGE, S64 initial range */
+void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); }
+void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); }
+void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); }
+void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); }
+/* RANGE x RANGE, U32 initial range */
+void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); }
+void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); }
+void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); }
+void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); }
+/* RANGE x RANGE, S32 initial range */
+void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); }
+void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); }
+void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
+void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
+
+#define DEFAULT_RAND_CASE_CNT 100
+
+#define RAND_21BIT_MASK ((1 << 22) - 1)
+
+static u64 rand_u64()
+{
+ /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
+ * seems to be 1<<31, so we need to call it thrice to get full u64;
+ * we'll use roughly equal split: 22 + 21 + 21 bits
+ */
+ return ((u64)random() << 42) |
+ (((u64)random() & RAND_21BIT_MASK) << 21) |
+ (random() & RAND_21BIT_MASK);
+}
+
+static u64 rand_const(enum num_t t)
+{
+ return cast_t(t, rand_u64());
+}
+
+static struct range rand_range(enum num_t t)
+{
+ u64 x = rand_const(t), y = rand_const(t);
+
+ return range(t, min_t(t, x, y), max_t(t, x, y));
+}
+
+static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
+{
+ struct ctx ctx;
+ struct range range1, range2;
+ int err, i;
+ u64 t;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ err = parse_env_vars(&ctx);
+ if (err) {
+ ASSERT_OK(err, "parse_env_vars");
+ return;
+ }
+
+ if (ctx.rand_case_cnt == 0)
+ ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
+ if (ctx.rand_seed == 0)
+ ctx.rand_seed = (unsigned)get_time_ns();
+
+ srandom(ctx.rand_seed);
+
+ ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt);
+ ctx.start_ns = get_time_ns();
+ snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+ "[RANDOM SEED %u] RANGE x %s, %s -> %s",
+ ctx.rand_seed, const_range ? "CONST" : "RANGE",
+ t_str(init_t), t_str(cond_t));
+
+ for (i = 0; i < ctx.rand_case_cnt; i++) {
+ range1 = rand_range(init_t);
+ if (const_range) {
+ t = rand_const(init_t);
+ range2 = range(init_t, t, t);
+ } else {
+ range2 = rand_range(init_t);
+ }
+
+ /* <range1> x <range2> */
+ if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */))
+ goto cleanup;
+ /* <range2> x <range1> */
+ if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */))
+ goto cleanup;
+ }
+
+cleanup:
+ /* make sure we report random seed for reproducing */
+ ASSERT_TRUE(true, ctx.progress_ctx);
+ cleanup_ctx(&ctx);
+}
+
+/* [RANDOM] RANGE x CONST, U64 initial range */
+void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S64 initial range */
+void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, U32 initial range */
+void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S32 initial range */
+void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
+
+/* [RANDOM] RANGE x RANGE, U64 initial range */
+void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S64 initial range */
+void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, U32 initial range */
+void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S32 initial range */
+void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
+
+/* A set of hard-coded "interesting" cases to validate as part of normal
+ * test_progs test runs
+ */
+static struct subtest_case crafted_cases[] = {
+ {U64, U64, {0, 0xffffffff}, {0, 0}},
+ {U64, U64, {0, 0x80000000}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}},
+ {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}},
+ {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}},
+
+ /* single point overlap, interesting BPF_EQ and BPF_NE interactions */
+ {U64, U64, {0, 1}, {1, 0x80000000}},
+ {U64, S64, {0, 1}, {1, 0x80000000}},
+ {U64, U32, {0, 1}, {1, 0x80000000}},
+ {U64, S32, {0, 1}, {1, 0x80000000}},
+
+ {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
+ {U64, S64, {0, 0xffffffffULL}, {1, 1}},
+ {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
+
+ {U64, U32, {0, 0x100000000}, {0, 0}},
+ {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}},
+
+ {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
+ /* these are tricky cases where lower 32 bits allow to tighten 64
+ * bit boundaries based on tightened lower 32 bit boundaries
+ */
+ {U64, S32, {0, 0x0ffffffffULL}, {0, 0}},
+ {U64, S32, {0, 0x100000000ULL}, {0, 0}},
+ {U64, S32, {0, 0x100000001ULL}, {0, 0}},
+ {U64, S32, {0, 0x180000000ULL}, {0, 0}},
+ {U64, S32, {0, 0x17fffffffULL}, {0, 0}},
+ {U64, S32, {0, 0x180000001ULL}, {0, 0}},
+
+ /* verifier knows about [-1, 0] range for s32 for this case already */
+ {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
+ /* but didn't know about these cases initially */
+ {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */
+ {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */
+
+ /* longer convergence case: learning from u64 -> s64 -> u64 -> u32,
+ * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX])
+ */
+ {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
+
+ {U32, U32, {1, U32_MAX}, {0, 0}},
+
+ {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
+
+ {S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
+ {S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
+ {S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
+ {S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
+
+ /* edge overlap testings for BPF_NE */
+ {U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
+ {U64, U64, {0, U64_MAX}, {0, 0}},
+ {S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
+ {S64, U64, {S64_MIN, 0}, {0, 0}},
+ {S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
+ {U32, U32, {0, U32_MAX}, {0, 0}},
+ {U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
+ {S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
+ {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
+ {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffff80000000ULL, 0x000000007fffffffULL}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffffffff8000ULL, 0x0000000000007fffULL}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffffffffff80ULL, 0x000000000000007fULL}},
+};
+
+/* Go over crafted hard-coded cases. This is fast, so we do it as part of
+ * normal test_progs run.
+ */
+void test_reg_bounds_crafted(void)
+{
+ struct ctx ctx;
+ int i;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) {
+ struct subtest_case *c = &crafted_cases[i];
+
+ verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y);
+ verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x);
+ }
+
+ cleanup_ctx(&ctx);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
new file mode 100644
index 000000000000..f0a8c828f8f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <sys/sysinfo.h>
+
+#include "res_spin_lock.skel.h"
+#include "res_spin_lock_fail.skel.h"
+
+void test_res_spin_lock_failure(void)
+{
+ RUN_TESTS(res_spin_lock_fail);
+}
+
+static volatile int skip;
+
+static void *spin_lock_thread(void *arg)
+{
+ int err, prog_fd = *(u32 *) arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 10000,
+ );
+
+ while (!READ_ONCE(skip)) {
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (err || topts.retval) {
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ break;
+ }
+ }
+ pthread_exit(arg);
+}
+
+void test_res_spin_lock_success(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct res_spin_lock *skel;
+ pthread_t thread_id[16];
+ int prog_fd, i, err;
+ void *ret;
+
+ if (get_nprocs() < 2) {
+ test__skip();
+ return;
+ }
+
+ skel = res_spin_lock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "res_spin_lock__open_and_load"))
+ return;
+ /* AA deadlock */
+ prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "error");
+ ASSERT_OK(topts.retval, "retval");
+
+ prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_held_lock_max);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "error");
+ ASSERT_OK(topts.retval, "retval");
+
+ /* Multi-threaded ABBA deadlock. */
+
+ prog_fd = bpf_program__fd(skel->progs.res_spin_lock_test_AB);
+ for (i = 0; i < 16; i++) {
+ int err;
+
+ err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto end;
+ }
+
+ topts.retval = 0;
+ topts.repeat = 1000;
+ int fd = bpf_program__fd(skel->progs.res_spin_lock_test_BA);
+ while (!topts.retval && !err && !READ_ONCE(skel->bss->err)) {
+ err = bpf_prog_test_run_opts(fd, &topts);
+ }
+
+ WRITE_ONCE(skip, true);
+
+ for (i = 0; i < 16; i++) {
+ if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
+ goto end;
+ if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
+ goto end;
+ }
+
+ ASSERT_EQ(READ_ONCE(skel->bss->err), -EDEADLK, "timeout err");
+ ASSERT_OK(err, "err");
+ ASSERT_EQ(topts.retval, -EDEADLK, "timeout");
+end:
+ res_spin_lock__destroy(skel);
+ return;
+}
+
+void serial_test_res_spin_lock_stress(void)
+{
+ if (libbpf_num_possible_cpus() < 3) {
+ test__skip();
+ return;
+ }
+
+ ASSERT_OK(load_module("bpf_test_rqspinlock.ko", false), "load module AA");
+ sleep(5);
+ unload_module("bpf_test_rqspinlock", false);
+ /*
+ * Insert bpf_test_rqspinlock.ko manually with test_mode=[1|2] to test
+ * other cases (ABBA, ABBCCA).
+ */
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index 6ace5e9efec1..51544372f52e 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -44,7 +44,7 @@ BTF_ID(union, U)
BTF_ID(func, func)
extern __u32 test_list_global[];
-BTF_ID_LIST_GLOBAL(test_list_global)
+BTF_ID_LIST_GLOBAL(test_list_global, 1)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
@@ -76,7 +76,7 @@ __resolve_symbol(struct btf *btf, int type_id)
}
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
- if (test_symbols[i].id != -1)
+ if (test_symbols[i].id >= 0)
continue;
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
@@ -101,14 +101,14 @@ static int resolve_symbols(void)
int type_id;
__u32 nr;
- btf = btf__parse_elf("btf_data.o", NULL);
+ btf = btf__parse_elf("btf_data.bpf.o", NULL);
if (CHECK(libbpf_get_error(btf), "resolve",
- "Failed to load BTF from btf_data.o\n"))
+ "Failed to load BTF from btf_data.bpf.o\n"))
return -1;
- nr = btf__get_nr_types(btf);
+ nr = btf__type_cnt(btf);
- for (type_id = 1; type_id <= nr; type_id++) {
+ for (type_id = 1; type_id < nr; type_id++) {
if (__resolve_symbol(btf, type_id))
break;
}
@@ -117,14 +117,14 @@ static int resolve_symbols(void)
return 0;
}
-int test_resolve_btfids(void)
+void test_resolve_btfids(void)
{
__u32 *test_list, *test_lists[] = { test_list_local, test_list_global };
unsigned int i, j;
int ret = 0;
if (resolve_symbols())
- return -1;
+ return;
/* Check BTF_ID_LIST(test_list_local) and
* BTF_ID_LIST_GLOBAL(test_list_global) IDs
@@ -138,7 +138,7 @@ int test_resolve_btfids(void)
test_symbols[i].name,
test_list[i], test_symbols[i].id);
if (ret)
- return ret;
+ return;
}
}
@@ -160,13 +160,8 @@ int test_resolve_btfids(void)
break;
if (i > 0) {
- ret = CHECK(test_set.ids[i - 1] > test_set.ids[i],
- "sort_check",
- "test_set is not sorted\n");
- if (ret)
- break;
+ if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check"))
+ return;
}
}
-
- return ret;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index fddbc5db5d6a..64520684d2cb 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -12,7 +12,12 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include "test_ringbuf.skel.h"
+
+#include "test_ringbuf.lskel.h"
+#include "test_ringbuf_n.lskel.h"
+#include "test_ringbuf_map_key.lskel.h"
+#include "test_ringbuf_write.lskel.h"
+#include "test_ringbuf_overwrite.lskel.h"
#define EDONE 7777
@@ -58,7 +63,8 @@ static int process_sample(void *ctx, void *data, size_t len)
}
}
-static struct test_ringbuf *skel;
+static struct test_ringbuf_map_key_lskel *skel_map_key;
+static struct test_ringbuf_lskel *skel;
static struct ring_buffer *ringbuf;
static void trigger_samples()
@@ -81,38 +87,154 @@ static void *poll_thread(void *input)
return (void *)(long)ring_buffer__poll(ringbuf, timeout);
}
-void test_ringbuf(void)
+static void ringbuf_write_subtest(void)
+{
+ struct test_ringbuf_write_lskel *skel;
+ int page_size = getpagesize();
+ size_t *mmap_ptr;
+ int err, rb_fd;
+
+ skel = test_ringbuf_write_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->maps.ringbuf.max_entries = 0x40000;
+
+ err = test_ringbuf_write_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ rb_fd = skel->maps.ringbuf.map_fd;
+
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
+ goto cleanup;
+ *mmap_ptr = 0x30000;
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ skel->bss->pid = getpid();
+
+ ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
+ goto cleanup;
+
+ err = test_ringbuf_write_lskel__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup_ringbuf;
+
+ skel->bss->discarded = 0;
+ skel->bss->passed = 0;
+
+ /* trigger exactly two samples */
+ syscall(__NR_getpgid);
+ syscall(__NR_getpgid);
+
+ ASSERT_EQ(skel->bss->discarded, 2, "discarded");
+ ASSERT_EQ(skel->bss->passed, 0, "passed");
+
+ test_ringbuf_write_lskel__detach(skel);
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_write_lskel__destroy(skel);
+}
+
+static void ringbuf_subtest(void)
{
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
pthread_t thread;
long bg_ret = -1;
- int err, cnt;
-
- skel = test_ringbuf__open_and_load();
- if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
+ int err, cnt, rb_fd;
+ int page_size = getpagesize();
+ void *mmap_ptr, *tmp_ptr;
+ struct ring *ring;
+ int map_fd;
+ unsigned long avail_data, ring_size, cons_pos, prod_pos;
+
+ skel = test_ringbuf_lskel__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ skel->maps.ringbuf.max_entries = page_size;
+
+ err = test_ringbuf_lskel__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
+ rb_fd = skel->maps.ringbuf.map_fd;
+ /* good read/write cons_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
+ tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
+ if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
+ goto cleanup;
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ /* bad writeable prod_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
+ ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
+
+ /* bad writeable data pages */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
+ ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
+ mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
+
+ /* good read-only pages */
+ mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
+
+ /* good read-only pages with initial offset */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
- ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
+ ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
process_sample, NULL, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
- err = test_ringbuf__attach(skel);
+ err = test_ringbuf_lskel__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
trigger_samples();
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
+ goto cleanup;
+
+ map_fd = ring__map_fd(ring);
+ ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
+
/* 2 submitted + 1 discarded records */
CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->avail_data);
- CHECK(skel->bss->ring_size != 4096,
+ CHECK(skel->bss->ring_size != page_size,
"err_ring_size", "exp %ld, got %ld\n",
- 4096L, skel->bss->ring_size);
+ (long)page_size, skel->bss->ring_size);
CHECK(skel->bss->cons_pos != 0,
"err_cons_pos", "exp %ld, got %ld\n",
0L, skel->bss->cons_pos);
@@ -120,6 +242,18 @@ void test_ringbuf(void)
"err_prod_pos", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->prod_pos);
+ /* verify getting this data directly via the ring object yields the same
+ * results
+ */
+ avail_data = ring__avail_data_size(ring);
+ ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
+ ring_size = ring__size(ring);
+ ASSERT_EQ(ring_size, page_size, "ring_ring_size");
+ cons_pos = ring__consumer_pos(ring);
+ ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
+ prod_pos = ring__producer_pos(ring);
+ ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
+
/* poll for samples */
err = ring_buffer__poll(ringbuf, -1);
@@ -226,6 +360,10 @@ void test_ringbuf(void)
err = ring_buffer__consume(ringbuf);
CHECK(err < 0, "rb_consume", "failed: %d\b", err);
+ /* also consume using ring__consume to make sure it works the same */
+ err = ring__consume(ring);
+ ASSERT_GE(err, 0, "ring_consume");
+
/* 3 rounds, 2 samples each */
cnt = atomic_xchg(&sample_cnt, 0);
CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
@@ -238,8 +376,200 @@ void test_ringbuf(void)
CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
1L, skel->bss->discarded);
- test_ringbuf__detach(skel);
+ test_ringbuf_lskel__detach(skel);
cleanup:
ring_buffer__free(ringbuf);
- test_ringbuf__destroy(skel);
+ test_ringbuf_lskel__destroy(skel);
+}
+
+/*
+ * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
+ * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
+ */
+#define N_TOT_SAMPLES 32
+#define N_SAMPLES 4
+
+/* Sample value to verify the callback validity */
+#define SAMPLE_VALUE 42L
+
+static int process_n_sample(void *ctx, void *data, size_t len)
+{
+ struct sample *s = data;
+
+ ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
+
+ return 0;
+}
+
+static void ringbuf_n_subtest(void)
+{
+ struct test_ringbuf_n_lskel *skel_n;
+ int err, i;
+
+ skel_n = test_ringbuf_n_lskel__open();
+ if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
+ return;
+
+ skel_n->maps.ringbuf.max_entries = getpagesize();
+ skel_n->bss->pid = getpid();
+
+ err = test_ringbuf_n_lskel__load(skel_n);
+ if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
+ goto cleanup;
+
+ ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
+ process_n_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+ goto cleanup;
+
+ err = test_ringbuf_n_lskel__attach(skel_n);
+ if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
+ goto cleanup_ringbuf;
+
+ /* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
+ skel_n->bss->value = SAMPLE_VALUE;
+ for (i = 0; i < N_TOT_SAMPLES; i++)
+ syscall(__NR_getpgid);
+
+ /* Consume all samples from the ring buffer in batches of N_SAMPLES */
+ for (i = 0; i < N_TOT_SAMPLES; i += err) {
+ err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
+ if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
+ goto cleanup_ringbuf;
+ }
+
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_n_lskel__destroy(skel_n);
+}
+
+static int process_map_key_sample(void *ctx, void *data, size_t len)
+{
+ struct sample *s;
+ int err, val;
+
+ s = data;
+ switch (s->seq) {
+ case 1:
+ ASSERT_EQ(s->value, 42, "sample_value");
+ err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
+ s, &val);
+ ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
+ ASSERT_EQ(val, 1, "hash_map val");
+ return -EDONE;
+ default:
+ return 0;
+ }
+}
+
+static void ringbuf_map_key_subtest(void)
+{
+ int err;
+
+ skel_map_key = test_ringbuf_map_key_lskel__open();
+ if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
+ return;
+
+ skel_map_key->maps.ringbuf.max_entries = getpagesize();
+ skel_map_key->bss->pid = getpid();
+
+ err = test_ringbuf_map_key_lskel__load(skel_map_key);
+ if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
+ goto cleanup;
+
+ ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
+ process_map_key_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+ goto cleanup;
+
+ err = test_ringbuf_map_key_lskel__attach(skel_map_key);
+ if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
+ goto cleanup_ringbuf;
+
+ syscall(__NR_getpgid);
+ ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
+ err = ring_buffer__poll(ringbuf, -1);
+ ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
+
+cleanup_ringbuf:
+ ring_buffer__free(ringbuf);
+cleanup:
+ test_ringbuf_map_key_lskel__destroy(skel_map_key);
+}
+
+static void ringbuf_overwrite_mode_subtest(void)
+{
+ unsigned long size, len1, len2, len3, len4, len5;
+ unsigned long expect_avail_data, expect_prod_pos, expect_over_pos;
+ struct test_ringbuf_overwrite_lskel *skel;
+ int page_size = getpagesize();
+ int err;
+
+ skel = test_ringbuf_overwrite_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ size = page_size;
+ len1 = page_size / 2;
+ len2 = page_size / 4;
+ len3 = size - len1 - len2 - BPF_RINGBUF_HDR_SZ * 3;
+ len4 = len3 - 8;
+ len5 = len3; /* retry with len3 */
+
+ skel->maps.ringbuf.max_entries = size;
+ skel->rodata->LEN1 = len1;
+ skel->rodata->LEN2 = len2;
+ skel->rodata->LEN3 = len3;
+ skel->rodata->LEN4 = len4;
+ skel->rodata->LEN5 = len5;
+
+ skel->bss->pid = getpid();
+
+ err = test_ringbuf_overwrite_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_ringbuf_overwrite_lskel__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ syscall(__NR_getpgid);
+
+ ASSERT_EQ(skel->bss->reserve1_fail, 0, "reserve 1");
+ ASSERT_EQ(skel->bss->reserve2_fail, 0, "reserve 2");
+ ASSERT_EQ(skel->bss->reserve3_fail, 1, "reserve 3");
+ ASSERT_EQ(skel->bss->reserve4_fail, 0, "reserve 4");
+ ASSERT_EQ(skel->bss->reserve5_fail, 0, "reserve 5");
+
+ ASSERT_EQ(skel->bss->ring_size, size, "check_ring_size");
+
+ expect_avail_data = len2 + len4 + len5 + 3 * BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->avail_data, expect_avail_data, "check_avail_size");
+
+ ASSERT_EQ(skel->bss->cons_pos, 0, "check_cons_pos");
+
+ expect_prod_pos = len1 + len2 + len4 + len5 + 4 * BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->prod_pos, expect_prod_pos, "check_prod_pos");
+
+ expect_over_pos = len1 + BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->over_pos, expect_over_pos, "check_over_pos");
+
+ test_ringbuf_overwrite_lskel__detach(skel);
+cleanup:
+ test_ringbuf_overwrite_lskel__destroy(skel);
+}
+
+void test_ringbuf(void)
+{
+ if (test__start_subtest("ringbuf"))
+ ringbuf_subtest();
+ if (test__start_subtest("ringbuf_n"))
+ ringbuf_n_subtest();
+ if (test__start_subtest("ringbuf_map_key"))
+ ringbuf_map_key_subtest();
+ if (test__start_subtest("ringbuf_write"))
+ ringbuf_write_subtest();
+ if (test__start_subtest("ringbuf_overwrite_mode"))
+ ringbuf_overwrite_mode_subtest();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index d37161e59bb2..58522195081b 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -41,13 +41,43 @@ static int process_sample(void *ctx, void *data, size_t len)
void test_ringbuf_multi(void)
{
struct test_ringbuf_multi *skel;
- struct ring_buffer *ringbuf;
+ struct ring_buffer *ringbuf = NULL;
+ struct ring *ring_old;
+ struct ring *ring;
int err;
+ int page_size = getpagesize();
+ int proto_fd = -1;
- skel = test_ringbuf_multi__open_and_load();
- if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
+ skel = test_ringbuf_multi__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
+ /* validate ringbuf size adjustment logic */
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before");
+ ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize");
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after");
+ ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset");
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final");
+
+ proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL);
+ if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n"))
+ goto cleanup;
+
+ err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
+ if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
+ goto cleanup;
+
+ err = test_ringbuf_multi__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
+ close(proto_fd);
+ proto_fd = -1;
+
+ /* make sure we can't resize ringbuf after object load */
+ if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_load"))
+ goto cleanup;
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
@@ -56,11 +86,24 @@ void test_ringbuf_multi(void)
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;
+ /* verify ring_buffer__ring returns expected results */
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
+ goto cleanup;
+ ring_old = ring;
+ ring = ring_buffer__ring(ringbuf, 1);
+ ASSERT_ERR_PTR(ring, "ring_buffer__ring_idx_1");
+
err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2),
process_sample, (void *)(long)2);
if (CHECK(err, "ringbuf_add", "failed to add another ring\n"))
goto cleanup;
+ /* verify adding a new ring didn't invalidate our older pointer */
+ ring = ring_buffer__ring(ringbuf, 0);
+ if (!ASSERT_EQ(ring, ring_old, "ring_buffer__ring_again"))
+ goto cleanup;
+
err = test_ringbuf_multi__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
goto cleanup;
@@ -97,6 +140,8 @@ void test_ringbuf_multi(void)
2L, skel->bss->total);
cleanup:
+ if (proto_fd >= 0)
+ close(proto_fd);
ring_buffer__free(ringbuf);
test_ringbuf_multi__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c
index 8b571890c57e..c3d78846f31a 100644
--- a/tools/testing/selftests/bpf/prog_tests/section_names.c
+++ b/tools/testing/selftests/bpf/prog_tests/section_names.c
@@ -124,6 +124,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_INET6_CONNECT},
},
{
+ "cgroup/connect_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT},
+ {0, BPF_CGROUP_UNIX_CONNECT},
+ },
+ {
"cgroup/sendmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
{0, BPF_CGROUP_UDP4_SENDMSG},
@@ -134,6 +139,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_UDP6_SENDMSG},
},
{
+ "cgroup/sendmsg_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG},
+ {0, BPF_CGROUP_UNIX_SENDMSG},
+ },
+ {
"cgroup/recvmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
{0, BPF_CGROUP_UDP4_RECVMSG},
@@ -144,6 +154,11 @@ static struct sec_name_test tests[] = {
{0, BPF_CGROUP_UDP6_RECVMSG},
},
{
+ "cgroup/recvmsg_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG},
+ {0, BPF_CGROUP_UNIX_RECVMSG},
+ },
+ {
"cgroup/sysctl",
{0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
{0, BPF_CGROUP_SYSCTL},
@@ -158,6 +173,36 @@ static struct sec_name_test tests[] = {
{0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
{0, BPF_CGROUP_SETSOCKOPT},
},
+ {
+ "cgroup/getpeername4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME},
+ {0, BPF_CGROUP_INET4_GETPEERNAME},
+ },
+ {
+ "cgroup/getpeername6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME},
+ {0, BPF_CGROUP_INET6_GETPEERNAME},
+ },
+ {
+ "cgroup/getpeername_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME},
+ {0, BPF_CGROUP_UNIX_GETPEERNAME},
+ },
+ {
+ "cgroup/getsockname4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME},
+ {0, BPF_CGROUP_INET4_GETSOCKNAME},
+ },
+ {
+ "cgroup/getsockname6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME},
+ {0, BPF_CGROUP_INET6_GETSOCKNAME},
+ },
+ {
+ "cgroup/getsockname_unix",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME},
+ {0, BPF_CGROUP_UNIX_GETSOCKNAME},
+ },
};
static void test_prog_type_by_name(const struct sec_name_test *test)
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 821b4146b7b6..3dbcc091f16c 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -18,7 +18,6 @@
#include <netinet/in.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "test_progs.h"
@@ -38,17 +37,11 @@ static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
-static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
-static int saved_tcp_fo = -1;
static __u32 index_zero;
static int epfd;
-static union sa46 {
- struct sockaddr_in6 v6;
- struct sockaddr_in v4;
- sa_family_t family;
-} srv_sa;
+static struct sockaddr_storage srv_sa;
#define RET_IF(condition, tag, format...) ({ \
if (CHECK_FAIL(condition)) { \
@@ -66,30 +59,21 @@ static union sa46 {
static int create_maps(enum bpf_map_type inner_type)
{
- struct bpf_create_map_attr attr = {};
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
inner_map_type = inner_type;
/* Creating reuseport_array */
- attr.name = "reuseport_array";
- attr.map_type = inner_type;
- attr.key_size = sizeof(__u32);
- attr.value_size = sizeof(__u32);
- attr.max_entries = REUSEPORT_ARRAY_SIZE;
-
- reuseport_array = bpf_create_map_xattr(&attr);
- RET_ERR(reuseport_array == -1, "creating reuseport_array",
+ reuseport_array = bpf_map_create(inner_type, "reuseport_array",
+ sizeof(__u32), sizeof(__u32), REUSEPORT_ARRAY_SIZE, NULL);
+ RET_ERR(reuseport_array < 0, "creating reuseport_array",
"reuseport_array:%d errno:%d\n", reuseport_array, errno);
/* Creating outer_map */
- attr.name = "outer_map";
- attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS;
- attr.key_size = sizeof(__u32);
- attr.value_size = sizeof(__u32);
- attr.max_entries = 1;
- attr.inner_map_fd = reuseport_array;
- outer_map = bpf_create_map_xattr(&attr);
- RET_ERR(outer_map == -1, "creating outer_map",
+ opts.inner_map_fd = reuseport_array;
+ outer_map = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, "outer_map",
+ sizeof(__u32), sizeof(__u32), 1, &opts);
+ RET_ERR(outer_map < 0, "creating outer_map",
"outer_map:%d errno:%d\n", outer_map, errno);
return 0;
@@ -101,9 +85,10 @@ static int prepare_bpf_obj(void)
struct bpf_map *map;
int err;
- obj = bpf_object__open("test_select_reuseport_kern.o");
- RET_ERR(IS_ERR_OR_NULL(obj), "open test_select_reuseport_kern.o",
- "obj:%p PTR_ERR(obj):%ld\n", obj, PTR_ERR(obj));
+ obj = bpf_object__open("test_select_reuseport_kern.bpf.o");
+ err = libbpf_get_error(obj);
+ RET_ERR(err, "open test_select_reuseport_kern.bpf.o",
+ "obj:%p PTR_ERR(obj):%d\n", obj, err);
map = bpf_object__find_map_by_name(obj, "outer_map");
RET_ERR(!map, "find outer_map", "!map\n");
@@ -113,57 +98,57 @@ static int prepare_bpf_obj(void)
err = bpf_object__load(obj);
RET_ERR(err, "load bpf_object", "err:%d\n", err);
- prog = bpf_program__next(NULL, obj);
+ prog = bpf_object__next_program(obj, NULL);
RET_ERR(!prog, "get first bpf_program", "!prog\n");
select_by_skb_data_prog = bpf_program__fd(prog);
- RET_ERR(select_by_skb_data_prog == -1, "get prog fd",
+ RET_ERR(select_by_skb_data_prog < 0, "get prog fd",
"select_by_skb_data_prog:%d\n", select_by_skb_data_prog);
map = bpf_object__find_map_by_name(obj, "result_map");
RET_ERR(!map, "find result_map", "!map\n");
result_map = bpf_map__fd(map);
- RET_ERR(result_map == -1, "get result_map fd",
+ RET_ERR(result_map < 0, "get result_map fd",
"result_map:%d\n", result_map);
map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map");
RET_ERR(!map, "find tmp_index_ovr_map\n", "!map");
tmp_index_ovr_map = bpf_map__fd(map);
- RET_ERR(tmp_index_ovr_map == -1, "get tmp_index_ovr_map fd",
+ RET_ERR(tmp_index_ovr_map < 0, "get tmp_index_ovr_map fd",
"tmp_index_ovr_map:%d\n", tmp_index_ovr_map);
map = bpf_object__find_map_by_name(obj, "linum_map");
RET_ERR(!map, "find linum_map", "!map\n");
linum_map = bpf_map__fd(map);
- RET_ERR(linum_map == -1, "get linum_map fd",
+ RET_ERR(linum_map < 0, "get linum_map fd",
"linum_map:%d\n", linum_map);
map = bpf_object__find_map_by_name(obj, "data_check_map");
RET_ERR(!map, "find data_check_map", "!map\n");
data_check_map = bpf_map__fd(map);
- RET_ERR(data_check_map == -1, "get data_check_map fd",
+ RET_ERR(data_check_map < 0, "get data_check_map fd",
"data_check_map:%d\n", data_check_map);
return 0;
}
-static void sa46_init_loopback(union sa46 *sa, sa_family_t family)
+static void ss_init_loopback(struct sockaddr_storage *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
- sa->family = family;
- if (sa->family == AF_INET6)
- sa->v6.sin6_addr = in6addr_loopback;
+ sa->ss_family = family;
+ if (sa->ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)sa)->sin6_addr = in6addr_loopback;
else
- sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
}
-static void sa46_init_inany(union sa46 *sa, sa_family_t family)
+static void ss_init_inany(struct sockaddr_storage *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
- sa->family = family;
- if (sa->family == AF_INET6)
- sa->v6.sin6_addr = in6addr_any;
+ sa->ss_family = family;
+ if (sa->ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)sa)->sin6_addr = in6addr_any;
else
- sa->v4.sin_addr.s_addr = INADDR_ANY;
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr = INADDR_ANY;
}
static int read_int_sysctl(const char *sysctl)
@@ -202,14 +187,6 @@ static int write_int_sysctl(const char *sysctl, int v)
return 0;
}
-static void restore_sysctls(void)
-{
- if (saved_tcp_fo != -1)
- write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
- if (saved_tcp_syncookie != -1)
- write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
-}
-
static int enable_fastopen(void)
{
int fo;
@@ -237,7 +214,7 @@ static long get_linum(void)
int err;
err = bpf_map_lookup_elem(linum_map, &index_zero, &linum);
- RET_ERR(err == -1, "lookup_elem(linum_map)", "err:%d errno:%d\n",
+ RET_ERR(err < 0, "lookup_elem(linum_map)", "err:%d errno:%d\n",
err, errno);
return linum;
@@ -247,18 +224,18 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
int cli_fd)
{
struct data_check expected = {}, result;
- union sa46 cli_sa;
+ struct sockaddr_storage cli_sa;
socklen_t addrlen;
int err;
addrlen = sizeof(cli_sa);
err = getsockname(cli_fd, (struct sockaddr *)&cli_sa,
&addrlen);
- RET_IF(err == -1, "getsockname(cli_fd)", "err:%d errno:%d\n",
+ RET_IF(err < 0, "getsockname(cli_fd)", "err:%d errno:%d\n",
err, errno);
err = bpf_map_lookup_elem(data_check_map, &index_zero, &result);
- RET_IF(err == -1, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
+ RET_IF(err < 0, "lookup_elem(data_check_map)", "err:%d errno:%d\n",
err, errno);
if (type == SOCK_STREAM) {
@@ -270,26 +247,32 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
}
if (family == AF_INET6) {
+ struct sockaddr_in6 *srv_v6 = (struct sockaddr_in6 *)&srv_sa;
+ struct sockaddr_in6 *cli_v6 = (struct sockaddr_in6 *)&cli_sa;
+
expected.eth_protocol = htons(ETH_P_IPV6);
- expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] &&
- !srv_sa.v6.sin6_addr.s6_addr32[2] &&
- !srv_sa.v6.sin6_addr.s6_addr32[1] &&
- !srv_sa.v6.sin6_addr.s6_addr32[0];
+ expected.bind_inany = !srv_v6->sin6_addr.s6_addr32[3] &&
+ !srv_v6->sin6_addr.s6_addr32[2] &&
+ !srv_v6->sin6_addr.s6_addr32[1] &&
+ !srv_v6->sin6_addr.s6_addr32[0];
- memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32,
- sizeof(cli_sa.v6.sin6_addr));
+ memcpy(&expected.skb_addrs[0], cli_v6->sin6_addr.s6_addr32,
+ sizeof(cli_v6->sin6_addr));
memcpy(&expected.skb_addrs[4], &in6addr_loopback,
sizeof(in6addr_loopback));
- expected.skb_ports[0] = cli_sa.v6.sin6_port;
- expected.skb_ports[1] = srv_sa.v6.sin6_port;
+ expected.skb_ports[0] = cli_v6->sin6_port;
+ expected.skb_ports[1] = srv_v6->sin6_port;
} else {
+ struct sockaddr_in *srv_v4 = (struct sockaddr_in *)&srv_sa;
+ struct sockaddr_in *cli_v4 = (struct sockaddr_in *)&cli_sa;
+
expected.eth_protocol = htons(ETH_P_IP);
- expected.bind_inany = !srv_sa.v4.sin_addr.s_addr;
+ expected.bind_inany = !srv_v4->sin_addr.s_addr;
- expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr;
+ expected.skb_addrs[0] = cli_v4->sin_addr.s_addr;
expected.skb_addrs[1] = htonl(INADDR_LOOPBACK);
- expected.skb_ports[0] = cli_sa.v4.sin_port;
- expected.skb_ports[1] = srv_sa.v4.sin_port;
+ expected.skb_ports[0] = cli_v4->sin_port;
+ expected.skb_ports[1] = srv_v4->sin_port;
}
if (memcmp(&result, &expected, offsetof(struct data_check,
@@ -347,7 +330,7 @@ static void check_results(void)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &results[i]);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
}
@@ -383,16 +366,15 @@ static void check_results(void)
static int send_data(int type, sa_family_t family, void *data, size_t len,
enum result expected)
{
- union sa46 cli_sa;
+ struct sockaddr_storage cli_sa;
int fd, err;
fd = socket(family, type, 0);
RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
- sa46_init_loopback(&cli_sa, family);
+ ss_init_loopback(&cli_sa, family);
err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa));
RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
-
err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa,
sizeof(srv_sa));
RET_ERR(err != len && expected >= PASS,
@@ -524,12 +506,12 @@ static void test_syncookie(int type, sa_family_t family)
*/
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero,
&tmp_index, BPF_ANY);
- RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, 1)",
+ RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, 1)",
"err:%d errno:%d\n", err, errno);
do_test(type, family, &cmd, PASS);
err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero,
&tmp_index);
- RET_IF(err == -1 || tmp_index != -1,
+ RET_IF(err < 0 || tmp_index >= 0,
"lookup_elem(tmp_index_ovr_map)",
"err:%d errno:%d tmp_index:%d\n",
err, errno, tmp_index);
@@ -569,7 +551,7 @@ static void test_detach_bpf(int type, sa_family_t family)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_before += tmp;
}
@@ -584,7 +566,7 @@ static void test_detach_bpf(int type, sa_family_t family)
for (i = 0; i < NR_RESULTS; i++) {
err = bpf_map_lookup_elem(result_map, &i, &tmp);
- RET_IF(err == -1, "lookup_elem(result_map)",
+ RET_IF(err < 0, "lookup_elem(result_map)",
"i:%u err:%d errno:%d\n", i, err, errno);
nr_run_after += tmp;
}
@@ -608,9 +590,9 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany)
socklen_t addrlen;
if (inany)
- sa46_init_inany(&srv_sa, family);
+ ss_init_inany(&srv_sa, family);
else
- sa46_init_loopback(&srv_sa, family);
+ ss_init_loopback(&srv_sa, family);
addrlen = sizeof(srv_sa);
/*
@@ -632,24 +614,24 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany)
SO_ATTACH_REUSEPORT_EBPF,
&select_by_skb_data_prog,
sizeof(select_by_skb_data_prog));
- RET_IF(err == -1, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
+ RET_IF(err < 0, "setsockopt(SO_ATTACH_REUEPORT_EBPF)",
"err:%d errno:%d\n", err, errno);
}
err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen);
- RET_IF(err == -1, "bind()", "sk_fds[%d] err:%d errno:%d\n",
+ RET_IF(err < 0, "bind()", "sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
if (type == SOCK_STREAM) {
err = listen(sk_fds[i], 10);
- RET_IF(err == -1, "listen()",
+ RET_IF(err < 0, "listen()",
"sk_fds[%d] err:%d errno:%d\n",
i, err, errno);
}
err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i],
BPF_NOEXIST);
- RET_IF(err == -1, "update_elem(reuseport_array)",
+ RET_IF(err < 0, "update_elem(reuseport_array)",
"sk_fds[%d] err:%d errno:%d\n", i, err, errno);
if (i == first) {
@@ -682,7 +664,7 @@ static void setup_per_test(int type, sa_family_t family, bool inany,
prepare_sk_fds(type, family, inany);
err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr,
BPF_ANY);
- RET_IF(err == -1, "update_elem(tmp_index_ovr_map, 0, -1)",
+ RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, -1)",
"err:%d errno:%d\n", err, errno);
/* Install reuseport_array to outer_map? */
@@ -691,7 +673,7 @@ static void setup_per_test(int type, sa_family_t family, bool inany,
err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array,
BPF_ANY);
- RET_IF(err == -1, "update_elem(outer_map, 0, reuseport_array)",
+ RET_IF(err < 0, "update_elem(outer_map, 0, reuseport_array)",
"err:%d errno:%d\n", err, errno);
}
@@ -720,18 +702,18 @@ static void cleanup_per_test(bool no_inner_map)
return;
err = bpf_map_delete_elem(outer_map, &index_zero);
- RET_IF(err == -1, "delete_elem(outer_map)",
+ RET_IF(err < 0, "delete_elem(outer_map)",
"err:%d errno:%d\n", err, errno);
}
static void cleanup(void)
{
- if (outer_map != -1) {
+ if (outer_map >= 0) {
close(outer_map);
outer_map = -1;
}
- if (reuseport_array != -1) {
+ if (reuseport_array >= 0) {
close(reuseport_array);
reuseport_array = -1;
}
@@ -802,6 +784,7 @@ static void test_config(int sotype, sa_family_t family, bool inany)
TEST_INIT(test_pass_on_err),
TEST_INIT(test_detach_bpf),
};
+ struct netns_obj *netns;
char s[MAX_TEST_NAME];
const struct test *t;
@@ -817,9 +800,21 @@ static void test_config(int sotype, sa_family_t family, bool inany)
if (!test__start_subtest(s))
continue;
+ netns = netns_new("select_reuseport", true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ continue;
+
+ if (CHECK_FAIL(enable_fastopen()))
+ goto out;
+ if (CHECK_FAIL(disable_syncookie()))
+ goto out;
+
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
+
+out:
+ netns_free(netns);
}
}
@@ -857,23 +852,9 @@ out:
cleanup();
}
-void test_select_reuseport(void)
+void serial_test_select_reuseport(void)
{
- saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
- if (saved_tcp_fo < 0)
- goto out;
- saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
- if (saved_tcp_syncookie < 0)
- goto out;
-
- if (enable_fastopen())
- goto out;
- if (disable_syncookie())
- goto out;
-
test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
test_map_type(BPF_MAP_TYPE_SOCKMAP);
test_map_type(BPF_MAP_TYPE_SOCKHASH);
-out:
- restore_sysctls();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 7043e6ded0e6..7ac4d5a488aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -1,38 +1,46 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
+#include "io_helpers.h"
-static volatile int sigusr1_received = 0;
+static int sigusr1_received;
static void sigusr1_handler(int signum)
{
- sigusr1_received++;
+ sigusr1_received = 8;
+}
+
+static void sigusr1_siginfo_handler(int s, siginfo_t *i, void *v)
+{
+ sigusr1_received = (int)(long long)i->si_value.sival_ptr;
}
static void test_send_signal_common(struct perf_event_attr *attr,
- bool signal_thread,
- const char *test_name)
+ bool signal_thread, bool remote)
{
struct test_send_signal_kern *skel;
+ struct sigaction sa;
int pipe_c2p[2], pipe_p2c[2];
int err = -1, pmu_fd = -1;
- __u32 duration = 0;
+ volatile int j = 0;
+ int retry_count;
char buf[256];
pid_t pid;
+ int old_prio;
- if (CHECK(pipe(pipe_c2p), test_name,
- "pipe pipe_c2p error: %s\n", strerror(errno)))
+ if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p"))
return;
- if (CHECK(pipe(pipe_p2c), test_name,
- "pipe pipe_p2c error: %s\n", strerror(errno))) {
+ if (!ASSERT_OK(pipe(pipe_p2c), "pipe_p2c")) {
close(pipe_c2p[0]);
close(pipe_c2p[1]);
return;
}
pid = fork();
- if (CHECK(pid < 0, test_name, "fork error: %s\n", strerror(errno))) {
+ if (!ASSERT_GE(pid, 0, "fork")) {
close(pipe_c2p[0]);
close(pipe_c2p[1]);
close(pipe_p2c[0]);
@@ -42,25 +50,56 @@ static void test_send_signal_common(struct perf_event_attr *attr,
if (pid == 0) {
/* install signal handler and notify parent */
- signal(SIGUSR1, sigusr1_handler);
+ if (remote) {
+ sa.sa_sigaction = sigusr1_siginfo_handler;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ ASSERT_NEQ(sigaction(SIGUSR1, &sa, NULL), -1, "sigaction");
+ } else {
+ ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
+ }
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ if (!remote) {
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+ }
+
/* notify parent signal handler is installed */
- CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
+ ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* make sure parent enabled bpf program to send_signal */
- CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
- sleep(1);
+ for (int i = 0; i < 1000000000 && !sigusr1_received; i++) {
+ j /= i + j + 1;
+ if (remote)
+ sleep(1);
+ else
+ if (!attr)
+ /* trigger the nanosleep tracepoint program. */
+ usleep(1);
+ }
+
+ buf[0] = sigusr1_received;
- buf[0] = sigusr1_received ? '2' : '0';
- CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
+ ASSERT_EQ(sigusr1_received, 8, "sigusr1_received");
+ ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* wait for parent notification and exit */
- CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
+
+ /* restore the old priority */
+ if (!remote)
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
close(pipe_c2p[1]);
close(pipe_p2c[0]);
@@ -71,86 +110,132 @@ static void test_send_signal_common(struct perf_event_attr *attr,
close(pipe_p2c[0]); /* close read */
skel = test_send_signal_kern__open_and_load();
- if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto skel_open_load_failure;
+ /* boost with a high priority so we got a higher chance
+ * that if an interrupt happens, the underlying task
+ * is this process.
+ */
+ if (remote) {
+ errno = 0;
+ old_prio = getpriority(PRIO_PROCESS, 0);
+ ASSERT_OK(errno, "getpriority");
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
+ }
+
if (!attr) {
err = test_send_signal_kern__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed\n")) {
+ if (!ASSERT_OK(err, "skel_attach")) {
err = -1;
goto destroy_skel;
}
} else {
- pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,
- -1 /* group id */, 0 /* flags */);
- if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n",
- strerror(errno))) {
+ if (!remote)
+ pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
+ -1 /* group id */, 0 /* flags */);
+ else
+ pmu_fd = syscall(__NR_perf_event_open, attr, getpid(), -1 /* cpu */,
+ -1 /* group id */, 0 /* flags */);
+ if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {
err = -1;
goto destroy_skel;
}
skel->links.send_signal_perf =
bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd);
- if (CHECK(IS_ERR(skel->links.send_signal_perf), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.send_signal_perf)))
+ if (!ASSERT_OK_PTR(skel->links.send_signal_perf, "attach_perf_event"))
goto disable_pmu;
}
/* wait until child signal handler installed */
- CHECK(read(pipe_c2p[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
+ ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");
/* trigger the bpf send_signal */
- skel->bss->pid = pid;
- skel->bss->sig = SIGUSR1;
skel->bss->signal_thread = signal_thread;
+ skel->bss->sig = SIGUSR1;
+ if (!remote) {
+ skel->bss->target_pid = 0;
+ skel->bss->pid = pid;
+ } else {
+ skel->bss->target_pid = pid;
+ skel->bss->pid = getpid();
+ }
/* notify child that bpf program can send_signal now */
- CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
-
- /* wait for result */
- err = read(pipe_c2p[0], buf, 1);
- if (CHECK(err < 0, test_name, "reading pipe error: %s\n", strerror(errno)))
+ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
+
+ for (retry_count = 0;;) {
+ /* For the remote test, the BPF program is triggered from this
+ * process but the other process/thread is signaled.
+ */
+ if (remote) {
+ if (!attr) {
+ for (int i = 0; i < 10; i++)
+ usleep(1);
+ } else {
+ for (int i = 0; i < 100000000; i++)
+ j /= i + 1;
+ }
+ }
+ /* wait for result */
+ err = read_with_timeout(pipe_c2p[0], buf, 1, 100);
+ if (err == -EAGAIN && retry_count++ < 10000)
+ continue;
+ break;
+ }
+ if (!ASSERT_GE(err, 0, "reading pipe"))
goto disable_pmu;
- if (CHECK(err == 0, test_name, "reading pipe error: size 0\n")) {
+ if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) {
err = -1;
goto disable_pmu;
}
- CHECK(buf[0] != '2', test_name, "incorrect result\n");
+ ASSERT_EQ(buf[0], 8, "incorrect result");
/* notify child safe to exit */
- CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
+ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");
disable_pmu:
close(pmu_fd);
destroy_skel:
test_send_signal_kern__destroy(skel);
+ /* restore the old priority */
+ if (remote)
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
skel_open_load_failure:
close(pipe_c2p[0]);
close(pipe_p2c[1]);
+ /*
+ * Child is either about to exit cleanly or stuck in case of errors.
+ * Nudge it to exit.
+ */
+ kill(pid, SIGKILL);
wait(NULL);
}
-static void test_send_signal_tracepoint(bool signal_thread)
+static void test_send_signal_tracepoint(bool signal_thread, bool remote)
{
- test_send_signal_common(NULL, signal_thread, "tracepoint");
+ test_send_signal_common(NULL, signal_thread, remote);
}
-static void test_send_signal_perf(bool signal_thread)
+static void test_send_signal_perf(bool signal_thread, bool remote)
{
struct perf_event_attr attr = {
- .sample_period = 1,
+ .freq = 1,
+ .sample_freq = 1000,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
- test_send_signal_common(&attr, signal_thread, "perf_sw_event");
+ test_send_signal_common(&attr, signal_thread, remote);
}
-static void test_send_signal_nmi(bool signal_thread)
+static void test_send_signal_nmi(bool signal_thread, bool remote)
{
struct perf_event_attr attr = {
- .sample_period = 1,
+ .freq = 1,
+ .sample_freq = 1000,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
@@ -162,7 +247,7 @@ static void test_send_signal_nmi(bool signal_thread)
pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
-1 /* cpu */, -1 /* group_fd */, 0 /* flags */);
if (pmu_fd == -1) {
- if (errno == ENOENT) {
+ if (errno == ENOENT || errno == EOPNOTSUPP) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n",
__func__);
test__skip();
@@ -173,21 +258,35 @@ static void test_send_signal_nmi(bool signal_thread)
close(pmu_fd);
}
- test_send_signal_common(&attr, signal_thread, "perf_hw_event");
+ test_send_signal_common(&attr, signal_thread, remote);
}
void test_send_signal(void)
{
if (test__start_subtest("send_signal_tracepoint"))
- test_send_signal_tracepoint(false);
+ test_send_signal_tracepoint(false, false);
if (test__start_subtest("send_signal_perf"))
- test_send_signal_perf(false);
+ test_send_signal_perf(false, false);
if (test__start_subtest("send_signal_nmi"))
- test_send_signal_nmi(false);
+ test_send_signal_nmi(false, false);
if (test__start_subtest("send_signal_tracepoint_thread"))
- test_send_signal_tracepoint(true);
+ test_send_signal_tracepoint(true, false);
if (test__start_subtest("send_signal_perf_thread"))
- test_send_signal_perf(true);
+ test_send_signal_perf(true, false);
if (test__start_subtest("send_signal_nmi_thread"))
- test_send_signal_nmi(true);
+ test_send_signal_nmi(true, false);
+
+ /* Signal remote thread and thread group */
+ if (test__start_subtest("send_signal_tracepoint_remote"))
+ test_send_signal_tracepoint(false, true);
+ if (test__start_subtest("send_signal_perf_remote"))
+ test_send_signal_perf(false, true);
+ if (test__start_subtest("send_signal_nmi_remote"))
+ test_send_signal_nmi(false, true);
+ if (test__start_subtest("send_signal_tracepoint_thread_remote"))
+ test_send_signal_tracepoint(true, true);
+ if (test__start_subtest("send_signal_perf_thread_remote"))
+ test_send_signal_perf(true, true);
+ if (test__start_subtest("send_signal_nmi_thread_remote"))
+ test_send_signal_nmi(true, true);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
index 189a34a7addb..15dacfcfaa6d 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
@@ -25,7 +25,8 @@ static void *worker(void *p)
return NULL;
}
-void test_send_signal_sched_switch(void)
+/* NOTE: cause events loss */
+void serial_test_send_signal_sched_switch(void)
{
struct test_send_signal_kern *skel;
pthread_t threads[THREAD_COUNT];
diff --git a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
new file mode 100644
index 000000000000..e4dac529d424
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <linux/socket.h>
+#include <linux/tls.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+#include "setget_sockopt.skel.h"
+
+#define CG_NAME "/setget-sockopt-test"
+
+static const char addr4_str[] = "127.0.0.1";
+static const char addr6_str[] = "::1";
+static struct setget_sockopt *skel;
+static int cg_fd;
+
+static int create_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "set lo up"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link add dev binddevtest1 type veth peer name binddevtest2"),
+ "add veth"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev binddevtest1 up"),
+ "bring veth up"))
+ return -1;
+
+ return 0;
+}
+
+static void test_tcp(int family)
+{
+ struct setget_sockopt__bss *bss = skel->bss;
+ int sfd, cfd;
+
+ memset(bss, 0, sizeof(*bss));
+
+ sfd = start_server(family, SOCK_STREAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_GE(sfd, 0, "start_server"))
+ return;
+
+ cfd = connect_to_fd(sfd, 0);
+ if (!ASSERT_GE(cfd, 0, "connect_to_fd_server")) {
+ close(sfd);
+ return;
+ }
+ close(sfd);
+ close(cfd);
+
+ ASSERT_EQ(bss->nr_listen, 1, "nr_listen");
+ ASSERT_EQ(bss->nr_connect, 1, "nr_connect");
+ ASSERT_EQ(bss->nr_active, 1, "nr_active");
+ ASSERT_EQ(bss->nr_passive, 1, "nr_passive");
+ ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create");
+ ASSERT_EQ(bss->nr_binddev, 2, "nr_bind");
+}
+
+static void test_udp(int family)
+{
+ struct setget_sockopt__bss *bss = skel->bss;
+ int sfd;
+
+ memset(bss, 0, sizeof(*bss));
+
+ sfd = start_server(family, SOCK_DGRAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_GE(sfd, 0, "start_server"))
+ return;
+ close(sfd);
+
+ ASSERT_GE(bss->nr_socket_post_create, 1, "nr_socket_post_create");
+ ASSERT_EQ(bss->nr_binddev, 1, "nr_bind");
+}
+
+static void test_ktls(int family)
+{
+ struct tls12_crypto_info_aes_gcm_128 aes128;
+ struct setget_sockopt__bss *bss = skel->bss;
+ int cfd = -1, sfd = -1, fd = -1, ret;
+ char buf;
+
+ memset(bss, 0, sizeof(*bss));
+
+ sfd = start_server(family, SOCK_STREAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_GE(sfd, 0, "start_server"))
+ return;
+ fd = connect_to_fd(sfd, 0);
+ if (!ASSERT_GE(fd, 0, "connect_to_fd"))
+ goto err_out;
+
+ cfd = accept(sfd, NULL, 0);
+ if (!ASSERT_GE(cfd, 0, "accept"))
+ goto err_out;
+
+ close(sfd);
+ sfd = -1;
+
+ /* Setup KTLS */
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ memset(&aes128, 0, sizeof(aes128));
+ aes128.info.version = TLS_1_2_VERSION;
+ aes128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &aes128, sizeof(aes128));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &aes128, sizeof(aes128));
+ if (!ASSERT_OK(ret, "setsockopt"))
+ goto err_out;
+
+ /* KTLS is enabled */
+
+ close(fd);
+ /* At this point, the cfd socket is at the CLOSE_WAIT state
+ * and still run TLS protocol. The test for
+ * BPF_TCP_CLOSE_WAIT should be run at this point.
+ */
+ ret = read(cfd, &buf, sizeof(buf));
+ ASSERT_EQ(ret, 0, "read");
+ close(cfd);
+
+ ASSERT_EQ(bss->nr_listen, 1, "nr_listen");
+ ASSERT_EQ(bss->nr_connect, 1, "nr_connect");
+ ASSERT_EQ(bss->nr_active, 1, "nr_active");
+ ASSERT_EQ(bss->nr_passive, 1, "nr_passive");
+ ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create");
+ ASSERT_EQ(bss->nr_binddev, 2, "nr_bind");
+ ASSERT_EQ(bss->nr_fin_wait1, 1, "nr_fin_wait1");
+ return;
+
+err_out:
+ close(fd);
+ close(cfd);
+ close(sfd);
+}
+
+static void test_nonstandard_opt(int family)
+{
+ struct setget_sockopt__bss *bss = skel->bss;
+ struct bpf_link *getsockopt_link = NULL;
+ int sfd = -1, fd = -1, cfd = -1, flags;
+ socklen_t flagslen = sizeof(flags);
+
+ memset(bss, 0, sizeof(*bss));
+
+ sfd = start_server(family, SOCK_STREAM,
+ family == AF_INET6 ? addr6_str : addr4_str, 0, 0);
+ if (!ASSERT_GE(sfd, 0, "start_server"))
+ return;
+
+ fd = connect_to_fd(sfd, 0);
+ if (!ASSERT_GE(fd, 0, "connect_to_fd_server"))
+ goto err_out;
+
+ /* cgroup/getsockopt prog will intercept getsockopt() below and
+ * retrieve the tcp socket bpf_sock_ops_cb_flags value for the
+ * accept()ed socket; this was set earlier in the passive established
+ * callback for the accept()ed socket via bpf_setsockopt().
+ */
+ getsockopt_link = bpf_program__attach_cgroup(skel->progs._getsockopt, cg_fd);
+ if (!ASSERT_OK_PTR(getsockopt_link, "getsockopt prog"))
+ goto err_out;
+
+ cfd = accept(sfd, NULL, 0);
+ if (!ASSERT_GE(cfd, 0, "accept"))
+ goto err_out;
+
+ if (!ASSERT_OK(getsockopt(cfd, SOL_TCP, TCP_BPF_SOCK_OPS_CB_FLAGS, &flags, &flagslen),
+ "getsockopt_flags"))
+ goto err_out;
+ ASSERT_EQ(flags & BPF_SOCK_OPS_STATE_CB_FLAG, BPF_SOCK_OPS_STATE_CB_FLAG,
+ "cb_flags_set");
+err_out:
+ close(sfd);
+ if (fd != -1)
+ close(fd);
+ if (cfd != -1)
+ close(cfd);
+ bpf_link__destroy(getsockopt_link);
+}
+
+void test_setget_sockopt(void)
+{
+ cg_fd = test__join_cgroup(CG_NAME);
+ if (!ASSERT_OK_FD(cg_fd, "join cgroup"))
+ return;
+
+ if (create_netns())
+ goto done;
+
+ skel = setget_sockopt__open();
+ if (!ASSERT_OK_PTR(skel, "open skel"))
+ goto done;
+
+ strcpy(skel->rodata->veth, "binddevtest1");
+ skel->rodata->veth_ifindex = if_nametoindex("binddevtest1");
+ if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex"))
+ goto done;
+
+ if (!ASSERT_OK(setget_sockopt__load(skel), "load skel"))
+ goto done;
+
+ skel->links.skops_sockopt =
+ bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd);
+ if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup"))
+ goto done;
+
+ skel->links.socket_post_create =
+ bpf_program__attach_cgroup(skel->progs.socket_post_create, cg_fd);
+ if (!ASSERT_OK_PTR(skel->links.socket_post_create, "attach_cgroup"))
+ goto done;
+
+ test_tcp(AF_INET6);
+ test_tcp(AF_INET);
+ test_udp(AF_INET6);
+ test_udp(AF_INET);
+ test_ktls(AF_INET6);
+ test_ktls(AF_INET);
+ test_nonstandard_opt(AF_INET);
+ test_nonstandard_opt(AF_INET6);
+
+done:
+ setget_sockopt__destroy(skel);
+ close(cg_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sha256.c b/tools/testing/selftests/bpf/prog_tests/sha256.c
new file mode 100644
index 000000000000..604a0b1423d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sha256.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2025 Google LLC */
+
+#include <test_progs.h>
+#include "bpf/libbpf_internal.h"
+
+#define MAX_LEN 4096
+
+/* Test libbpf_sha256() for all lengths from 0 to MAX_LEN inclusively. */
+void test_sha256(void)
+{
+ /*
+ * The correctness of this value was verified by running this test with
+ * libbpf_sha256() replaced by OpenSSL's SHA256().
+ */
+ static const __u8 expected_digest_of_digests[SHA256_DIGEST_LENGTH] = {
+ 0x62, 0x30, 0x0e, 0x1d, 0xea, 0x7f, 0xc4, 0x74,
+ 0xfd, 0x8e, 0x64, 0x0b, 0xd8, 0x5f, 0xea, 0x04,
+ 0xf3, 0xef, 0x77, 0x42, 0xc2, 0x01, 0xb8, 0x90,
+ 0x6e, 0x19, 0x91, 0x1b, 0xca, 0xb3, 0x28, 0x42,
+ };
+ __u64 seed = 0;
+ __u8 *data = NULL, *digests = NULL;
+ __u8 digest_of_digests[SHA256_DIGEST_LENGTH];
+ size_t i;
+
+ data = malloc(MAX_LEN);
+ if (!ASSERT_OK_PTR(data, "malloc"))
+ goto out;
+ digests = malloc((MAX_LEN + 1) * SHA256_DIGEST_LENGTH);
+ if (!ASSERT_OK_PTR(digests, "malloc"))
+ goto out;
+
+ /* Generate MAX_LEN bytes of "random" data deterministically. */
+ for (i = 0; i < MAX_LEN; i++) {
+ seed = (seed * 25214903917 + 11) & ((1ULL << 48) - 1);
+ data[i] = (__u8)(seed >> 16);
+ }
+
+ /* Calculate a digest for each length 0 through MAX_LEN inclusively. */
+ for (i = 0; i <= MAX_LEN; i++)
+ libbpf_sha256(data, i, &digests[i * SHA256_DIGEST_LENGTH]);
+
+ /* Calculate and verify the digest of all the digests. */
+ libbpf_sha256(digests, (MAX_LEN + 1) * SHA256_DIGEST_LENGTH,
+ digest_of_digests);
+ ASSERT_MEMEQ(digest_of_digests, expected_digest_of_digests,
+ SHA256_DIGEST_LENGTH, "digest_of_digests");
+out:
+ free(data);
+ free(digests);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
index dfcbddcbe4d3..70b49da5ca0a 100644
--- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -13,36 +13,37 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
struct itimerval timeo = {
.it_value.tv_usec = 100000, /* 100ms */
};
- __u32 duration = 0, retval;
int prog_fd;
int err;
int i;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 0xffffffff,
+ );
for (i = 0; i < ARRAY_SIZE(prog); i++)
prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
- prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+ prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog),
"GPL", 0, NULL, 0);
- CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
+ ASSERT_GE(prog_fd, 0, "test-run load");
err = sigaction(SIGALRM, &sigalrm_action, NULL);
- CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno);
+ ASSERT_OK(err, "test-run-signal-sigaction");
err = setitimer(ITIMER_REAL, &timeo, NULL);
- CHECK(err, "test-run-signal-timer", "errno %d\n", errno);
+ ASSERT_OK(err, "test-run-signal-timer");
- err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(duration > 500000000, /* 500ms */
- "test-run-signal-duration",
- "duration %dns > 500ms\n",
- duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_LE(topts.duration, 500000000 /* 500ms */,
+ "test-run-signal-duration");
signal(SIGALRM, SIG_DFL);
}
-void test_signal_pending(enum bpf_prog_type prog_type)
+void test_signal_pending(void)
{
test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER);
test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR);
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 3a469099f30d..10a0ab954b8a 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include "test_progs.h"
+#include "network_helpers.h"
#define BIND_PORT 1234
#define CONNECT_PORT 4321
@@ -22,14 +23,30 @@
#define NS_SELF "/proc/self/ns/net"
#define SERVER_MAP_PATH "/sys/fs/bpf/tc/globals/server_map"
-static const struct timeval timeo_sec = { .tv_sec = 3 };
-static const size_t timeo_optlen = sizeof(timeo_sec);
static int stop, duration;
static bool
configure_stack(void)
{
+ char tc_version[128];
char tc_cmd[BUFSIZ];
+ char *prog;
+ FILE *tc;
+
+ /* Check whether tc is built with libbpf. */
+ tc = popen("tc -V", "r");
+ if (CHECK_FAIL(!tc))
+ return false;
+ if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) {
+ pclose(tc);
+ return false;
+ }
+ if (strstr(tc_version, ", libbpf "))
+ prog = "test_sk_assign_libbpf.bpf.o";
+ else
+ prog = "test_sk_assign.bpf.o";
+ if (CHECK_FAIL(pclose(tc)))
+ return false;
/* Move to a new networking namespace */
if (CHECK_FAIL(unshare(CLONE_NEWNET)))
@@ -46,9 +63,9 @@ configure_stack(void)
/* Load qdisc, BPF program */
if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
return false;
- sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
- "direct-action object-file ./test_sk_assign.o",
- "section classifier/sk_assign_test",
+ sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf",
+ "direct-action object-file", prog,
+ "section tc",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
@@ -57,52 +74,6 @@ configure_stack(void)
return true;
}
-static int
-start_server(const struct sockaddr *addr, socklen_t len, int type)
-{
- int fd;
-
- fd = socket(addr->sa_family, type, 0);
- if (CHECK_FAIL(fd == -1))
- goto out;
- if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
- timeo_optlen)))
- goto close_out;
- if (CHECK_FAIL(bind(fd, addr, len) == -1))
- goto close_out;
- if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
- goto close_out;
-
- goto out;
-close_out:
- close(fd);
- fd = -1;
-out:
- return fd;
-}
-
-static int
-connect_to_server(const struct sockaddr *addr, socklen_t len, int type)
-{
- int fd = -1;
-
- fd = socket(addr->sa_family, type, 0);
- if (CHECK_FAIL(fd == -1))
- goto out;
- if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
- timeo_optlen)))
- goto close_out;
- if (CHECK_FAIL(connect(fd, addr, len)))
- goto close_out;
-
- goto out;
-close_out:
- close(fd);
- fd = -1;
-out:
- return fd;
-}
-
static in_port_t
get_port(int fd)
{
@@ -129,15 +100,12 @@ get_port(int fd)
static ssize_t
rcv_msg(int srv_client, int type)
{
- struct sockaddr_storage ss;
char buf[BUFSIZ];
- socklen_t slen;
if (type == SOCK_STREAM)
return read(srv_client, &buf, sizeof(buf));
else
- return recvfrom(srv_client, &buf, sizeof(buf), 0,
- (struct sockaddr *)&ss, &slen);
+ return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL);
}
static int
@@ -148,7 +116,7 @@ run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type)
in_port_t port;
int ret = 1;
- client = connect_to_server(addr, len, type);
+ client = connect_to_addr(type, (struct sockaddr_storage *)addr, len, NULL);
if (client == -1) {
perror("Cannot connect to server");
goto out;
@@ -297,7 +265,9 @@ void test_sk_assign(void)
continue;
prepare_addr(test->addr, test->family, BIND_PORT, false);
addr = (const struct sockaddr *)test->addr;
- server = start_server(addr, test->len, test->type);
+ server = start_server_addr(test->type,
+ (const struct sockaddr_storage *)addr,
+ test->len, NULL);
if (server == -1)
goto close;
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c b/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
new file mode 100644
index 000000000000..e4940583924b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include <test_progs.h>
+#include "sk_bypass_prot_mem.skel.h"
+#include "network_helpers.h"
+
+#define NR_PAGES 32
+#define NR_SOCKETS 2
+#define BUF_TOTAL (NR_PAGES * 4096 / NR_SOCKETS)
+#define BUF_SINGLE 1024
+#define NR_SEND (BUF_TOTAL / BUF_SINGLE)
+
+struct test_case {
+ char name[8];
+ int family;
+ int type;
+ int (*create_sockets)(struct test_case *test_case, int sk[], int len);
+ long (*get_memory_allocated)(struct test_case *test_case, struct sk_bypass_prot_mem *skel);
+};
+
+static int tcp_create_sockets(struct test_case *test_case, int sk[], int len)
+{
+ int server, i, err = 0;
+
+ server = start_server(test_case->family, test_case->type, NULL, 0, 0);
+ if (!ASSERT_GE(server, 0, "start_server_str"))
+ return server;
+
+ /* Keep for-loop so we can change NR_SOCKETS easily. */
+ for (i = 0; i < len; i += 2) {
+ sk[i] = connect_to_fd(server, 0);
+ if (sk[i] < 0) {
+ ASSERT_GE(sk[i], 0, "connect_to_fd");
+ err = sk[i];
+ break;
+ }
+
+ sk[i + 1] = accept(server, NULL, NULL);
+ if (sk[i + 1] < 0) {
+ ASSERT_GE(sk[i + 1], 0, "accept");
+ err = sk[i + 1];
+ break;
+ }
+ }
+
+ close(server);
+
+ return err;
+}
+
+static int udp_create_sockets(struct test_case *test_case, int sk[], int len)
+{
+ int i, j, err, rcvbuf = BUF_TOTAL;
+
+ /* Keep for-loop so we can change NR_SOCKETS easily. */
+ for (i = 0; i < len; i += 2) {
+ sk[i] = start_server(test_case->family, test_case->type, NULL, 0, 0);
+ if (sk[i] < 0) {
+ ASSERT_GE(sk[i], 0, "start_server");
+ return sk[i];
+ }
+
+ sk[i + 1] = connect_to_fd(sk[i], 0);
+ if (sk[i + 1] < 0) {
+ ASSERT_GE(sk[i + 1], 0, "connect_to_fd");
+ return sk[i + 1];
+ }
+
+ err = connect_fd_to_fd(sk[i], sk[i + 1], 0);
+ if (err) {
+ ASSERT_EQ(err, 0, "connect_fd_to_fd");
+ return err;
+ }
+
+ for (j = 0; j < 2; j++) {
+ err = setsockopt(sk[i + j], SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof(int));
+ if (err) {
+ ASSERT_EQ(err, 0, "setsockopt(SO_RCVBUF)");
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static long get_memory_allocated(struct test_case *test_case,
+ bool *activated, long *memory_allocated)
+{
+ int sk;
+
+ *activated = true;
+
+ /* AF_INET and AF_INET6 share the same memory_allocated.
+ * tcp_init_sock() is called by AF_INET and AF_INET6,
+ * but udp_lib_init_sock() is inline.
+ */
+ sk = socket(AF_INET, test_case->type, 0);
+ if (!ASSERT_GE(sk, 0, "get_memory_allocated"))
+ return -1;
+
+ close(sk);
+
+ return *memory_allocated;
+}
+
+static long tcp_get_memory_allocated(struct test_case *test_case, struct sk_bypass_prot_mem *skel)
+{
+ return get_memory_allocated(test_case,
+ &skel->bss->tcp_activated,
+ &skel->bss->tcp_memory_allocated);
+}
+
+static long udp_get_memory_allocated(struct test_case *test_case, struct sk_bypass_prot_mem *skel)
+{
+ return get_memory_allocated(test_case,
+ &skel->bss->udp_activated,
+ &skel->bss->udp_memory_allocated);
+}
+
+static int check_bypass(struct test_case *test_case,
+ struct sk_bypass_prot_mem *skel, bool bypass)
+{
+ char buf[BUF_SINGLE] = {};
+ long memory_allocated[2];
+ int sk[NR_SOCKETS];
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(sk); i++)
+ sk[i] = -1;
+
+ err = test_case->create_sockets(test_case, sk, ARRAY_SIZE(sk));
+ if (err)
+ goto close;
+
+ memory_allocated[0] = test_case->get_memory_allocated(test_case, skel);
+
+ /* allocate pages >= NR_PAGES */
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ for (j = 0; j < NR_SEND; j++) {
+ int bytes = send(sk[i], buf, sizeof(buf), 0);
+
+ /* Avoid too noisy logs when something failed. */
+ if (bytes != sizeof(buf)) {
+ ASSERT_EQ(bytes, sizeof(buf), "send");
+ if (bytes < 0) {
+ err = bytes;
+ goto drain;
+ }
+ }
+ }
+ }
+
+ memory_allocated[1] = test_case->get_memory_allocated(test_case, skel);
+
+ if (bypass)
+ ASSERT_LE(memory_allocated[1], memory_allocated[0] + 10, "bypass");
+ else
+ ASSERT_GT(memory_allocated[1], memory_allocated[0] + NR_PAGES, "no bypass");
+
+drain:
+ if (test_case->type == SOCK_DGRAM) {
+ /* UDP starts purging sk->sk_receive_queue after one RCU
+ * grace period, then udp_memory_allocated goes down,
+ * so drain the queue before close().
+ */
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ for (j = 0; j < NR_SEND; j++) {
+ int bytes = recv(sk[i], buf, 1, MSG_DONTWAIT | MSG_TRUNC);
+
+ if (bytes == sizeof(buf))
+ continue;
+ if (bytes != -1 || errno != EAGAIN)
+ PRINT_FAIL("bytes: %d, errno: %s\n", bytes, strerror(errno));
+ break;
+ }
+ }
+ }
+
+close:
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ if (sk[i] < 0)
+ break;
+
+ close(sk[i]);
+ }
+
+ return err;
+}
+
+static void run_test(struct test_case *test_case)
+{
+ struct sk_bypass_prot_mem *skel;
+ struct nstoken *nstoken;
+ int cgroup, err;
+
+ skel = sk_bypass_prot_mem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ skel->bss->nr_cpus = libbpf_num_possible_cpus();
+
+ err = sk_bypass_prot_mem__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto destroy_skel;
+
+ cgroup = test__join_cgroup("/sk_bypass_prot_mem");
+ if (!ASSERT_GE(cgroup, 0, "join_cgroup"))
+ goto destroy_skel;
+
+ err = make_netns("sk_bypass_prot_mem");
+ if (!ASSERT_EQ(err, 0, "make_netns"))
+ goto close_cgroup;
+
+ nstoken = open_netns("sk_bypass_prot_mem");
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto remove_netns;
+
+ err = check_bypass(test_case, skel, false);
+ if (!ASSERT_EQ(err, 0, "test_bypass(false)"))
+ goto close_netns;
+
+ err = write_sysctl("/proc/sys/net/core/bypass_prot_mem", "1");
+ if (!ASSERT_EQ(err, 0, "write_sysctl(1)"))
+ goto close_netns;
+
+ err = check_bypass(test_case, skel, true);
+ if (!ASSERT_EQ(err, 0, "test_bypass(true by sysctl)"))
+ goto close_netns;
+
+ err = write_sysctl("/proc/sys/net/core/bypass_prot_mem", "0");
+ if (!ASSERT_EQ(err, 0, "write_sysctl(0)"))
+ goto close_netns;
+
+ skel->links.sock_create = bpf_program__attach_cgroup(skel->progs.sock_create, cgroup);
+ if (!ASSERT_OK_PTR(skel->links.sock_create, "attach_cgroup(sock_create)"))
+ goto close_netns;
+
+ err = check_bypass(test_case, skel, true);
+ ASSERT_EQ(err, 0, "test_bypass(true by bpf)");
+
+close_netns:
+ close_netns(nstoken);
+remove_netns:
+ remove_netns("sk_bypass_prot_mem");
+close_cgroup:
+ close(cgroup);
+destroy_skel:
+ sk_bypass_prot_mem__destroy(skel);
+}
+
+static struct test_case test_cases[] = {
+ {
+ .name = "TCP ",
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .create_sockets = tcp_create_sockets,
+ .get_memory_allocated = tcp_get_memory_allocated,
+ },
+ {
+ .name = "UDP ",
+ .family = AF_INET,
+ .type = SOCK_DGRAM,
+ .create_sockets = udp_create_sockets,
+ .get_memory_allocated = udp_get_memory_allocated,
+ },
+ {
+ .name = "TCPv6",
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .create_sockets = tcp_create_sockets,
+ .get_memory_allocated = tcp_get_memory_allocated,
+ },
+ {
+ .name = "UDPv6",
+ .family = AF_INET6,
+ .type = SOCK_DGRAM,
+ .create_sockets = udp_create_sockets,
+ .get_memory_allocated = udp_get_memory_allocated,
+ },
+};
+
+void serial_test_sk_bypass_prot_mem(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ if (test__start_subtest(test_cases[i].name))
+ run_test(&test_cases[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 9ff0412e1fd3..023c31bde229 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -18,7 +18,6 @@
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
-#include <error.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -30,7 +29,6 @@
#include <bpf/bpf.h>
#include "test_progs.h"
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
@@ -48,8 +46,6 @@
#define INT_IP6 "fd00::2"
#define INT_PORT 8008
-#define IO_TIMEOUT_SEC 3
-
enum server {
SERVER_A = 0,
SERVER_B = 1,
@@ -78,6 +74,12 @@ struct test {
bool reuseport_has_conns; /* Add a connected socket to reuseport group */
};
+struct cb_opts {
+ int family;
+ int sotype;
+ bool reuseport;
+};
+
static __u32 duration; /* for CHECK macro */
static bool is_ipv6(const char *ip)
@@ -103,59 +105,14 @@ static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog)
return 0;
}
-static socklen_t inetaddr_len(const struct sockaddr_storage *addr)
-{
- return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) :
- addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0);
-}
-
-static int make_socket(int sotype, const char *ip, int port,
- struct sockaddr_storage *addr)
+static int setsockopts(int fd, void *opts)
{
- struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC };
- int err, family, fd;
-
- family = is_ipv6(ip) ? AF_INET6 : AF_INET;
- err = make_sockaddr(family, ip, port, addr, NULL);
- if (CHECK(err, "make_address", "failed\n"))
- return -1;
-
- fd = socket(addr->ss_family, sotype, 0);
- if (CHECK(fd < 0, "socket", "failed\n")) {
- log_err("failed to make socket");
- return -1;
- }
-
- err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
- if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) {
- log_err("failed to set SNDTIMEO");
- close(fd);
- return -1;
- }
-
- err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
- if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) {
- log_err("failed to set RCVTIMEO");
- close(fd);
- return -1;
- }
-
- return fd;
-}
-
-static int make_server(int sotype, const char *ip, int port,
- struct bpf_program *reuseport_prog)
-{
- struct sockaddr_storage addr = {0};
+ struct cb_opts *co = (struct cb_opts *)opts;
const int one = 1;
- int err, fd = -1;
-
- fd = make_socket(sotype, ip, port, &addr);
- if (fd < 0)
- return -1;
+ int err = 0;
/* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
- if (sotype == SOCK_DGRAM) {
+ if (co->sotype == SOCK_DGRAM) {
err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
@@ -164,7 +121,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
+ if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) {
err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
@@ -173,7 +130,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (sotype == SOCK_STREAM) {
+ if (co->sotype == SOCK_STREAM) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
@@ -182,7 +139,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- if (reuseport_prog) {
+ if (co->reuseport) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
@@ -191,19 +148,28 @@ static int make_server(int sotype, const char *ip, int port,
}
}
- err = bind(fd, (void *)&addr, inetaddr_len(&addr));
- if (CHECK(err, "bind", "failed\n")) {
- log_err("failed to bind listen socket");
- goto fail;
- }
+fail:
+ return err;
+}
- if (sotype == SOCK_STREAM) {
- err = listen(fd, SOMAXCONN);
- if (CHECK(err, "make_server", "listen")) {
- log_err("failed to listen on port %d", port);
- goto fail;
- }
- }
+static int make_server(int sotype, const char *ip, int port,
+ struct bpf_program *reuseport_prog)
+{
+ struct cb_opts cb_opts = {
+ .family = is_ipv6(ip) ? AF_INET6 : AF_INET,
+ .sotype = sotype,
+ .reuseport = reuseport_prog,
+ };
+ struct network_helper_opts opts = {
+ .backlog = SOMAXCONN,
+ .post_socket_cb = setsockopts,
+ .cb_opts = &cb_opts,
+ };
+ int err, fd;
+
+ fd = start_server_str(cb_opts.family, sotype, ip, port, &opts);
+ if (!ASSERT_OK_FD(fd, "start_server_str"))
+ return -1;
/* Late attach reuseport prog so we can have one init path */
if (reuseport_prog) {
@@ -220,25 +186,46 @@ fail:
return -1;
}
-static int make_client(int sotype, const char *ip, int port)
+static __u64 socket_cookie(int fd)
{
- struct sockaddr_storage addr = {0};
- int err, fd;
+ __u64 cookie;
+ socklen_t cookie_len = sizeof(cookie);
- fd = make_socket(sotype, ip, port, &addr);
- if (fd < 0)
- return -1;
+ if (CHECK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len) < 0,
+ "getsockopt(SO_COOKIE)", "%s\n", strerror(errno)))
+ return 0;
+ return cookie;
+}
+
+static int fill_sk_lookup_ctx(struct bpf_sk_lookup *ctx, const char *local_ip, __u16 local_port,
+ const char *remote_ip, __u16 remote_port)
+{
+ void *local, *remote;
+ int err;
- err = connect(fd, (void *)&addr, inetaddr_len(&addr));
- if (CHECK(err, "make_client", "connect")) {
- log_err("failed to connect client socket");
- goto fail;
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->local_port = local_port;
+ ctx->remote_port = htons(remote_port);
+
+ if (is_ipv6(local_ip)) {
+ ctx->family = AF_INET6;
+ local = &ctx->local_ip6[0];
+ remote = &ctx->remote_ip6[0];
+ } else {
+ ctx->family = AF_INET;
+ local = &ctx->local_ip4;
+ remote = &ctx->remote_ip4;
}
- return fd;
-fail:
- close(fd);
- return -1;
+ err = inet_pton(ctx->family, local_ip, local);
+ if (CHECK(err != 1, "inet_pton", "local_ip failed\n"))
+ return 1;
+
+ err = inet_pton(ctx->family, remote_ip, remote);
+ if (CHECK(err != 1, "inet_pton", "remote_ip failed\n"))
+ return 1;
+
+ return 0;
}
static int send_byte(int fd)
@@ -365,18 +352,12 @@ static int udp_recv_send(int server_fd)
}
/* Reply from original destination address. */
- fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
- if (CHECK(fd < 0, "socket", "failed\n")) {
+ fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL);
+ if (!ASSERT_OK_FD(fd, "start_server_addr")) {
log_err("failed to create tx socket");
return -1;
}
- ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
- if (CHECK(ret, "bind", "failed\n")) {
- log_err("failed to bind tx socket");
- goto out;
- }
-
msg.msg_control = NULL;
msg.msg_controllen = 0;
n = sendmsg(fd, &msg, 0);
@@ -438,7 +419,7 @@ static struct bpf_link *attach_lookup_prog(struct bpf_program *prog)
}
link = bpf_program__attach_netns(prog, net_fd);
- if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) {
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_netns")) {
errno = -PTR_ERR(link);
log_err("failed to attach program '%s' to netns",
bpf_program__name(prog));
@@ -556,7 +537,7 @@ close:
static void run_lookup_prog(const struct test *t)
{
- int server_fds[MAX_SERVERS] = { -1 };
+ int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
int client_fd, reuse_conn_fd = -1;
struct bpf_link *lookup_link;
int i, err;
@@ -588,9 +569,6 @@ static void run_lookup_prog(const struct test *t)
* BPF socket lookup.
*/
if (t->reuseport_has_conns) {
- struct sockaddr_storage addr = {};
- socklen_t len = sizeof(addr);
-
/* Add an extra socket to reuseport group */
reuse_conn_fd = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
@@ -598,17 +576,15 @@ static void run_lookup_prog(const struct test *t)
if (reuse_conn_fd < 0)
goto close;
- /* Connect the extra socket to itself */
- err = getsockname(reuse_conn_fd, (void *)&addr, &len);
- if (CHECK(err, "getsockname", "errno %d\n", errno))
- goto close;
- err = connect(reuse_conn_fd, (void *)&addr, len);
- if (CHECK(err, "connect", "errno %d\n", errno))
+ /* Connect the extra socket to itself */
+ err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0);
+ if (!ASSERT_OK(err, "connect_fd_to_fd"))
goto close;
}
- client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port);
- if (client_fd < 0)
+ client_fd = connect_to_addr_str(is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET,
+ t->sotype, t->connect_to.ip, t->connect_to.port, NULL);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str"))
goto close;
if (t->sotype == SOCK_STREAM)
@@ -823,9 +799,11 @@ static void test_redirect_lookup(struct test_sk_lookup *skel)
static void drop_on_lookup(const struct test *t)
{
+ int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET;
struct sockaddr_storage dst = {};
int client_fd, server_fd, err;
struct bpf_link *lookup_link;
+ socklen_t len;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
@@ -837,12 +815,14 @@ static void drop_on_lookup(const struct test *t)
if (server_fd < 0)
goto detach;
- client_fd = make_socket(t->sotype, t->connect_to.ip,
- t->connect_to.port, &dst);
- if (client_fd < 0)
+ client_fd = client_socket(family, t->sotype, NULL);
+ if (!ASSERT_OK_FD(client_fd, "client_socket"))
goto close_srv;
- err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto close_all;
+ err = connect(client_fd, (void *)&dst, len);
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client_fd);
if (err)
@@ -895,6 +875,37 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel)
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT },
},
+ /* The program will drop on success, meaning that the ifindex
+ * was 1.
+ */
+ {
+ .desc = "TCP IPv4 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_STREAM,
+ .connect_to = { EXT_IP4, EXT_PORT },
+ .listen_at = { EXT_IP4, EXT_PORT },
+ },
+ {
+ .desc = "TCP IPv6 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_STREAM,
+ .connect_to = { EXT_IP6, EXT_PORT },
+ .listen_at = { EXT_IP6, EXT_PORT },
+ },
+ {
+ .desc = "UDP IPv4 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_DGRAM,
+ .connect_to = { EXT_IP4, EXT_PORT },
+ .listen_at = { EXT_IP4, EXT_PORT },
+ },
+ {
+ .desc = "UDP IPv6 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_DGRAM,
+ .connect_to = { EXT_IP6, EXT_PORT },
+ .listen_at = { EXT_IP6, EXT_PORT },
+ },
};
const struct test *t;
@@ -906,9 +917,11 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel)
static void drop_on_reuseport(const struct test *t)
{
+ int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET;
struct sockaddr_storage dst = { 0 };
int client, server1, server2, err;
struct bpf_link *lookup_link;
+ socklen_t len;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
@@ -922,7 +935,7 @@ static void drop_on_reuseport(const struct test *t)
err = update_lookup_map(t->sock_map, SERVER_A, server1);
if (err)
- goto detach;
+ goto close_srv1;
/* second server on destination address we should never reach */
server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
@@ -930,12 +943,14 @@ static void drop_on_reuseport(const struct test *t)
if (server2 < 0)
goto close_srv1;
- client = make_socket(t->sotype, t->connect_to.ip,
- t->connect_to.port, &dst);
- if (client < 0)
+ client = client_socket(family, t->sotype, NULL);
+ if (!ASSERT_OK_FD(client, "client_socket"))
goto close_srv2;
- err = connect(client, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto close_all;
+ err = connect(client, (void *)&dst, len);
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client);
if (err)
@@ -1009,18 +1024,27 @@ static void test_drop_on_reuseport(struct test_sk_lookup *skel)
static void run_sk_assign(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog,
- const char *listen_ip, const char *connect_ip)
+ const char *remote_ip, const char *local_ip)
{
- int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 };
- struct bpf_link *lookup_link;
+ int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 };
+ struct bpf_sk_lookup ctx;
+ __u64 server_cookie;
int i, err;
- lookup_link = attach_lookup_prog(lookup_prog);
- if (!lookup_link)
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ .ctx_out = &ctx,
+ .ctx_size_out = sizeof(ctx),
+ );
+
+ if (fill_sk_lookup_ctx(&ctx, local_ip, EXT_PORT, remote_ip, INT_PORT))
return;
+ ctx.protocol = IPPROTO_TCP;
+
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
- server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL);
+ server_fds[i] = make_server(SOCK_STREAM, local_ip, 0, NULL);
if (server_fds[i] < 0)
goto close_servers;
@@ -1030,23 +1054,25 @@ static void run_sk_assign(struct test_sk_lookup *skel,
goto close_servers;
}
- client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT);
- if (client_fd < 0)
+ server_cookie = socket_cookie(server_fds[SERVER_B]);
+ if (!server_cookie)
+ return;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(lookup_prog), &opts);
+ if (CHECK(err, "test_run", "failed with error %d\n", errno))
goto close_servers;
- peer_fd = accept(server_fds[SERVER_B], NULL, NULL);
- if (CHECK(peer_fd < 0, "accept", "failed\n"))
- goto close_client;
+ if (CHECK(ctx.cookie == 0, "ctx.cookie", "no socket selected\n"))
+ goto close_servers;
+
+ CHECK(ctx.cookie != server_cookie, "ctx.cookie",
+ "selected sk %llu instead of %llu\n", ctx.cookie, server_cookie);
- close(peer_fd);
-close_client:
- close(client_fd);
close_servers:
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
if (server_fds[i] != -1)
close(server_fds[i]);
}
- bpf_link__destroy(lookup_link);
}
static void run_sk_assign_v4(struct test_sk_lookup *skel,
@@ -1071,8 +1097,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel,
if (server_fd < 0)
return;
- connected_fd = make_client(sotype, EXT_IP4, EXT_PORT);
- if (connected_fd < 0)
+ connected_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL);
+ if (!ASSERT_OK_FD(connected_fd, "connect_to_addr_str"))
goto out_close_server;
/* Put a connected socket in redirect map */
@@ -1085,8 +1111,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel,
goto out_close_connected;
/* Try to redirect TCP SYN / UDP packet to a connected socket */
- client_fd = make_client(sotype, EXT_IP4, EXT_PORT);
- if (client_fd < 0)
+ client_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str"))
goto out_unlink_prog;
if (sotype == SOCK_DGRAM) {
send_byte(client_fd);
@@ -1138,6 +1164,7 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t)
int map_fd, server_fd, client_fd;
struct bpf_link *link1, *link2;
int prog_idx, done, err;
+ socklen_t len;
map_fd = bpf_map__fd(t->run_map);
@@ -1167,11 +1194,14 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t)
if (err)
goto out_close_server;
- client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst);
- if (client_fd < 0)
+ client_fd = client_socket(AF_INET, SOCK_STREAM, NULL);
+ if (!ASSERT_OK_FD(client_fd, "client_socket"))
goto out_close_server;
- err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(AF_INET, EXT_IP4, EXT_PORT, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto out_close_client;
+ err = connect(client_fd, (void *)&dst, len);
if (CHECK(err && !t->expect_errno, "connect",
"unexpected error %d\n", errno))
goto out_close_client;
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c
new file mode 100644
index 000000000000..f35852d245e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include "sk_storage_omem_uncharge.skel.h"
+
+void test_sk_storage_omem_uncharge(void)
+{
+ struct sk_storage_omem_uncharge *skel;
+ int sk_fd = -1, map_fd, err, value;
+ socklen_t optlen;
+
+ skel = sk_storage_omem_uncharge__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+ map_fd = bpf_map__fd(skel->maps.sk_storage);
+
+ /* A standalone socket not binding to addr:port,
+ * so nentns is not needed.
+ */
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sk_fd, 0, "socket"))
+ goto done;
+
+ optlen = sizeof(skel->bss->cookie);
+ err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen);
+ if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)"))
+ goto done;
+
+ value = 0;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)"))
+ goto done;
+
+ value = 0xdeadbeef;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)"))
+ goto done;
+
+ err = sk_storage_omem_uncharge__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto done;
+
+ close(sk_fd);
+ sk_fd = -1;
+
+ ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found");
+ ASSERT_EQ(skel->bss->omem, 0, "omem");
+
+done:
+ sk_storage_omem_uncharge__destroy(skel);
+ if (sk_fd != -1)
+ close(sk_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
index 2b392590e8ca..547ae53cde74 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
@@ -105,7 +105,7 @@ out:
close(listen_fd);
}
-void test_sk_storage_tracing(void)
+void serial_test_sk_storage_tracing(void)
{
struct test_sk_storage_trace_itself *skel_itself;
int err;
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index fafeddaad6a9..33f950e2dae3 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -11,98 +11,81 @@ void test_skb_ctx(void)
.cb[3] = 4,
.cb[4] = 5,
.priority = 6,
+ .ingress_ifindex = 11,
.ifindex = 1,
.tstamp = 7,
.wire_len = 100,
.gso_segs = 8,
.mark = 9,
.gso_size = 10,
+ .hwtstamp = 11,
};
- struct bpf_prog_test_run_attr tattr = {
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
- };
+ );
struct bpf_object *obj;
- int err;
- int i;
+ int err, prog_fd, i;
- err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load("./test_skb_ctx.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (!ASSERT_OK(err, "load"))
return;
/* ctx_in != NULL, ctx_size_in == 0 */
tattr.ctx_size_in = 0;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "ctx_size_in");
tattr.ctx_size_in = sizeof(skb);
/* ctx_out != NULL, ctx_size_out == 0 */
tattr.ctx_size_out = 0;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "ctx_size_out");
tattr.ctx_size_out = sizeof(skb);
/* non-zero [len, tc_index] fields should be rejected*/
skb.len = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "len");
skb.len = 0;
skb.tc_index = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "tc_index");
skb.tc_index = 0;
/* non-zero [hash, sk] fields should be rejected */
skb.hash = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "hash");
skb.hash = 0;
skb.sk = (struct bpf_sock *)1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_NEQ(err, 0, "sk");
skb.sk = 0;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != 0 || tattr.retval,
- "run",
- "err %d errno %d retval %d\n",
- err, errno, tattr.retval);
-
- CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
- "ctx_size_out",
- "incorrect output size, want %zu have %u\n",
- sizeof(skb), tattr.ctx_size_out);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(tattr.retval, "test_run retval");
+ ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out");
for (i = 0; i < 5; i++)
- CHECK_ATTR(skb.cb[i] != i + 2,
- "ctx_out_cb",
- "skb->cb[i] == %d, expected %d\n",
- skb.cb[i], i + 2);
- CHECK_ATTR(skb.priority != 7,
- "ctx_out_priority",
- "skb->priority == %d, expected %d\n",
- skb.priority, 7);
- CHECK_ATTR(skb.ifindex != 1,
- "ctx_out_ifindex",
- "skb->ifindex == %d, expected %d\n",
- skb.ifindex, 1);
- CHECK_ATTR(skb.tstamp != 8,
- "ctx_out_tstamp",
- "skb->tstamp == %lld, expected %d\n",
- skb.tstamp, 8);
- CHECK_ATTR(skb.mark != 10,
- "ctx_out_mark",
- "skb->mark == %u, expected %d\n",
- skb.mark, 10);
+ ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb");
+ ASSERT_EQ(skb.priority, 7, "ctx_out_priority");
+ ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex");
+ ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex");
+ ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp");
+ ASSERT_EQ(skb.mark, 10, "ctx_out_mark");
+
+ bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
index f302ad84a298..f7ee25f290f7 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
@@ -9,22 +9,22 @@ void test_skb_helpers(void)
.gso_segs = 8,
.gso_size = 10,
};
- struct bpf_prog_test_run_attr tattr = {
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.ctx_in = &skb,
.ctx_size_in = sizeof(skb),
.ctx_out = &skb,
.ctx_size_out = sizeof(skb),
- };
+ );
struct bpf_object *obj;
- int err;
+ int err, prog_fd;
- err = bpf_prog_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load("./test_skb_helpers.bpf.o",
+ BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (!ASSERT_OK(err, "load"))
return;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err, "len", "err %d errno %d\n", err, errno);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
new file mode 100644
index 000000000000..d7f83c0a40a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_load_bytes.skel.h"
+
+void test_skb_load_bytes(void)
+{
+ struct skb_load_bytes *skel;
+ int err, prog_fd, test_result;
+ struct __sk_buff skb = { 0 };
+
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = skb_load_bytes__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.skb_process);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)(-1);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, -EFAULT, "offset -1"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)10;
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, 0, "offset 10"))
+ goto out;
+
+out:
+ skb_load_bytes__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c
new file mode 100644
index 000000000000..3eefdfed1db9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include <test_progs.h>
+#include <sys/un.h>
+#include "test_skc_to_unix_sock.skel.h"
+
+static const char *sock_path = "@skc_to_unix_sock";
+
+void test_skc_to_unix_sock(void)
+{
+ struct test_skc_to_unix_sock *skel;
+ struct sockaddr_un sockaddr;
+ int err, sockfd = 0;
+
+ skel = test_skc_to_unix_sock__open();
+ if (!ASSERT_OK_PTR(skel, "could not open BPF object"))
+ return;
+
+ skel->rodata->my_pid = getpid();
+
+ err = test_skc_to_unix_sock__load(skel);
+ if (!ASSERT_OK(err, "could not load BPF object"))
+ goto cleanup;
+
+ err = test_skc_to_unix_sock__attach(skel);
+ if (!ASSERT_OK(err, "could not attach BPF object"))
+ goto cleanup;
+
+ /* trigger unix_listen */
+ sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (!ASSERT_GT(sockfd, 0, "socket failed"))
+ goto cleanup;
+
+ memset(&sockaddr, 0, sizeof(sockaddr));
+ sockaddr.sun_family = AF_UNIX;
+ strncpy(sockaddr.sun_path, sock_path, strlen(sock_path));
+ sockaddr.sun_path[0] = '\0';
+
+ err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
+ if (!ASSERT_OK(err, "bind failed"))
+ goto cleanup;
+
+ err = listen(sockfd, 1);
+ if (!ASSERT_OK(err, "listen failed"))
+ goto cleanup;
+
+ ASSERT_EQ(strcmp(skel->bss->path, sock_path), 0, "bpf_skc_to_unix_sock failed");
+
+cleanup:
+ if (sockfd)
+ close(sockfd);
+ test_skc_to_unix_sock__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index fe87b77af459..bc6817aee9aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include <sys/mman.h>
struct s {
int a;
@@ -16,8 +17,14 @@ void test_skeleton(void)
struct test_skeleton* skel;
struct test_skeleton__bss *bss;
struct test_skeleton__data *data;
+ struct test_skeleton__data_dyn *data_dyn;
struct test_skeleton__rodata *rodata;
+ struct test_skeleton__rodata_dyn *rodata_dyn;
struct test_skeleton__kconfig *kcfg;
+ const void *elf_bytes;
+ size_t elf_bytes_sz = 0;
+ void *m;
+ int i, fd;
skel = test_skeleton__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
@@ -28,7 +35,12 @@ void test_skeleton(void)
bss = skel->bss;
data = skel->data;
+ data_dyn = skel->data_dyn;
rodata = skel->rodata;
+ rodata_dyn = skel->rodata_dyn;
+
+ ASSERT_STREQ(bpf_map__name(skel->maps.rodata_dyn), ".rodata.dyn", "rodata_dyn_name");
+ ASSERT_STREQ(bpf_map__name(skel->maps.data_dyn), ".data.dyn", "data_dyn_name");
/* validate values are pre-initialized correctly */
CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1);
@@ -44,6 +56,12 @@ void test_skeleton(void)
CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
+ ASSERT_EQ(rodata_dyn->in_dynarr_sz, 0, "in_dynarr_sz");
+ for (i = 0; i < 4; i++)
+ ASSERT_EQ(rodata_dyn->in_dynarr[i], -(i + 1), "in_dynarr");
+ for (i = 0; i < 4; i++)
+ ASSERT_EQ(data_dyn->out_dynarr[i], i + 1, "out_dynarr");
+
/* validate we can pre-setup global variables, even in .bss */
data->in1 = 10;
data->in2 = 11;
@@ -51,6 +69,10 @@ void test_skeleton(void)
bss->in4 = 13;
rodata->in.in6 = 14;
+ rodata_dyn->in_dynarr_sz = 4;
+ for (i = 0; i < 4; i++)
+ rodata_dyn->in_dynarr[i] = i + 10;
+
err = test_skeleton__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
@@ -62,6 +84,10 @@ void test_skeleton(void)
CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
+ ASSERT_EQ(rodata_dyn->in_dynarr_sz, 4, "in_dynarr_sz");
+ for (i = 0; i < 4; i++)
+ ASSERT_EQ(rodata_dyn->in_dynarr[i], i + 10, "in_dynarr");
+
/* now set new values and attach to get them into outX variables */
data->in1 = 1;
data->in2 = 2;
@@ -71,6 +97,8 @@ void test_skeleton(void)
bss->in5.b = 6;
kcfg = skel->kconfig;
+ skel->data_read_mostly->read_mostly_var = 123;
+
err = test_skeleton__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
@@ -82,10 +110,8 @@ void test_skeleton(void)
CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
- CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
- bss->handler_out5.a, 5);
- CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
- bss->handler_out5.b, 6);
+ CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5);
+ CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6);
CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14);
CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",
@@ -93,6 +119,24 @@ void test_skeleton(void)
CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2",
"got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION);
+ for (i = 0; i < 4; i++)
+ ASSERT_EQ(data_dyn->out_dynarr[i], i + 10, "out_dynarr");
+
+ ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var");
+
+ ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr");
+
+ fd = bpf_map__fd(skel->maps.data_non_mmapable);
+ m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
+ if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success"))
+ munmap(m, getpagesize());
+
+ ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable), 0, "non_mmap_flags");
+
+ elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
+ ASSERT_OK_PTR(elf_bytes, "elf_bytes");
+ ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
+
cleanup:
test_skeleton__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
new file mode 100644
index 000000000000..594441acb707
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <test_progs.h>
+#include "test_snprintf.skel.h"
+#include "test_snprintf_single.skel.h"
+
+#define EXP_NUM_OUT "-8 9 96 -424242 1337 DABBAD00"
+#define EXP_NUM_RET sizeof(EXP_NUM_OUT)
+
+#define EXP_IP_OUT "127.000.000.001 0000:0000:0000:0000:0000:0000:0000:0001"
+#define EXP_IP_RET sizeof(EXP_IP_OUT)
+
+/* The third specifier, %pB, depends on compiler inlining so don't check it */
+#define EXP_SYM_OUT "schedule schedule+0x0/"
+#define MIN_SYM_RET sizeof(EXP_SYM_OUT)
+
+/* The third specifier, %p, is a hashed pointer which changes on every reboot */
+#define EXP_ADDR_OUT "0000000000000000 ffff00000add4e55 "
+#define EXP_ADDR_RET sizeof(EXP_ADDR_OUT "unknownhashedptr")
+
+#define EXP_STR_OUT "str1 a b c d e longstr"
+#define EXP_STR_RET sizeof(EXP_STR_OUT)
+
+#define EXP_OVER_OUT "%over"
+#define EXP_OVER_RET 10
+
+#define EXP_PAD_OUT " 4 000"
+#define EXP_PAD_RET 900007
+
+#define EXP_NO_ARG_OUT "simple case"
+#define EXP_NO_ARG_RET 12
+
+#define EXP_NO_BUF_RET 29
+
+static void test_snprintf_positive(void)
+{
+ char exp_addr_out[] = EXP_ADDR_OUT;
+ char exp_sym_out[] = EXP_SYM_OUT;
+ struct test_snprintf *skel;
+
+ skel = test_snprintf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out");
+ ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret");
+
+ ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out");
+ ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret");
+
+ ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out,
+ sizeof(exp_sym_out) - 1), "sym_out");
+ ASSERT_LT(MIN_SYM_RET, skel->bss->sym_ret, "sym_ret");
+
+ ASSERT_OK(memcmp(skel->bss->addr_out, exp_addr_out,
+ sizeof(exp_addr_out) - 1), "addr_out");
+ ASSERT_EQ(skel->bss->addr_ret, EXP_ADDR_RET, "addr_ret");
+
+ ASSERT_STREQ(skel->bss->str_out, EXP_STR_OUT, "str_out");
+ ASSERT_EQ(skel->bss->str_ret, EXP_STR_RET, "str_ret");
+
+ ASSERT_STREQ(skel->bss->over_out, EXP_OVER_OUT, "over_out");
+ ASSERT_EQ(skel->bss->over_ret, EXP_OVER_RET, "over_ret");
+
+ ASSERT_STREQ(skel->bss->pad_out, EXP_PAD_OUT, "pad_out");
+ ASSERT_EQ(skel->bss->pad_ret, EXP_PAD_RET, "pad_ret");
+
+ ASSERT_STREQ(skel->bss->noarg_out, EXP_NO_ARG_OUT, "no_arg_out");
+ ASSERT_EQ(skel->bss->noarg_ret, EXP_NO_ARG_RET, "no_arg_ret");
+
+ ASSERT_EQ(skel->bss->nobuf_ret, EXP_NO_BUF_RET, "no_buf_ret");
+
+cleanup:
+ test_snprintf__destroy(skel);
+}
+
+/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
+static int load_single_snprintf(char *fmt)
+{
+ struct test_snprintf_single *skel;
+ int ret;
+
+ skel = test_snprintf_single__open();
+ if (!skel)
+ return -EINVAL;
+
+ memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10));
+
+ ret = test_snprintf_single__load(skel);
+ test_snprintf_single__destroy(skel);
+
+ return ret;
+}
+
+static void test_snprintf_negative(void)
+{
+ ASSERT_OK(load_single_snprintf("valid %d"), "valid usage");
+
+ ASSERT_ERR(load_single_snprintf("0123456789"), "no terminating zero");
+ ASSERT_ERR(load_single_snprintf("%d %d"), "too many specifiers");
+ ASSERT_ERR(load_single_snprintf("%pi5"), "invalid specifier 1");
+ ASSERT_ERR(load_single_snprintf("%a"), "invalid specifier 2");
+ ASSERT_ERR(load_single_snprintf("%"), "invalid specifier 3");
+ ASSERT_ERR(load_single_snprintf("%12345678"), "invalid specifier 4");
+ ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5");
+ ASSERT_ERR(load_single_snprintf("%lc"), "invalid specifier 6");
+ ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
+ ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
+ ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
+ ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8");
+ ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9");
+}
+
+void test_snprintf(void)
+{
+ if (test__start_subtest("snprintf_positive"))
+ test_snprintf_positive();
+ if (test__start_subtest("snprintf_negative"))
+ test_snprintf_negative();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
index 686b40f11a45..dd41b826be30 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
@@ -6,7 +6,7 @@
/* Demonstrate that bpf_snprintf_btf succeeds and that various data types
* are formatted correctly.
*/
-void test_snprintf_btf(void)
+void serial_test_snprintf_btf(void)
{
struct netif_receive_skb *skel;
struct netif_receive_skb__bss *bss;
@@ -42,9 +42,7 @@ void test_snprintf_btf(void)
* and it set expected return values from bpf_trace_printk()s
* and all tests ran.
*/
- if (CHECK(bss->ret <= 0,
- "bpf_snprintf_btf: got return value",
- "ret <= 0 %ld test %d\n", bss->ret, bss->ran_subtests))
+ if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret"))
goto cleanup;
if (CHECK(bss->ran_subtests == 0, "check if subtests ran",
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
new file mode 100644
index 000000000000..b2efabbed220
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
@@ -0,0 +1,2660 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/un.h>
+
+#include "test_progs.h"
+
+#include "sock_addr_kern.skel.h"
+#include "bind4_prog.skel.h"
+#include "bind6_prog.skel.h"
+#include "connect_unix_prog.skel.h"
+#include "connect4_prog.skel.h"
+#include "connect6_prog.skel.h"
+#include "sendmsg4_prog.skel.h"
+#include "sendmsg6_prog.skel.h"
+#include "recvmsg4_prog.skel.h"
+#include "recvmsg6_prog.skel.h"
+#include "sendmsg_unix_prog.skel.h"
+#include "recvmsg_unix_prog.skel.h"
+#include "getsockname4_prog.skel.h"
+#include "getsockname6_prog.skel.h"
+#include "getsockname_unix_prog.skel.h"
+#include "getpeername4_prog.skel.h"
+#include "getpeername6_prog.skel.h"
+#include "getpeername_unix_prog.skel.h"
+#include "network_helpers.h"
+
+#define TEST_NS "sock_addr"
+#define TEST_IF_PREFIX "test_sock_addr"
+#define TEST_IPV4 "127.0.0.4"
+#define TEST_IPV6 "::6"
+
+#define SERV4_IP "192.168.1.254"
+#define SERV4_REWRITE_IP "127.0.0.1"
+#define SRC4_IP "172.16.0.1"
+#define SRC4_REWRITE_IP TEST_IPV4
+#define SERV4_PORT 4040
+#define SERV4_REWRITE_PORT 4444
+
+#define SERV6_IP "face:b00c:1234:5678::abcd"
+#define SERV6_REWRITE_IP "::1"
+#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
+#define SRC6_IP "::1"
+#define SRC6_REWRITE_IP TEST_IPV6
+#define WILDCARD6_IP "::"
+#define SERV6_PORT 6060
+#define SERV6_REWRITE_PORT 6666
+
+#define SERVUN_ADDRESS "bpf_cgroup_unix_test"
+#define SERVUN_REWRITE_ADDRESS "bpf_cgroup_unix_test_rewrite"
+#define SRCUN_ADDRESS "bpf_cgroup_unix_test_src"
+
+#define save_errno_do(op) ({ int __save = errno; op; errno = __save; })
+
+enum sock_addr_test_type {
+ SOCK_ADDR_TEST_BIND,
+ SOCK_ADDR_TEST_CONNECT,
+ SOCK_ADDR_TEST_SENDMSG,
+ SOCK_ADDR_TEST_RECVMSG,
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ SOCK_ADDR_TEST_GETPEERNAME,
+};
+
+typedef void *(*load_fn)(int cgroup_fd,
+ enum bpf_attach_type attach_type,
+ bool expect_reject);
+typedef void (*destroy_fn)(void *skel);
+
+static int cmp_addr(const struct sockaddr_storage *addr1, socklen_t addr1_len,
+ const struct sockaddr_storage *addr2, socklen_t addr2_len,
+ bool cmp_port);
+
+struct init_sock_args {
+ int af;
+ int type;
+};
+
+struct addr_args {
+ char addr[sizeof(struct sockaddr_storage)];
+ int addrlen;
+};
+
+struct sendmsg_args {
+ struct addr_args addr;
+ char msg[10];
+ int msglen;
+};
+
+static struct sock_addr_kern *skel;
+
+static int run_bpf_prog(const char *prog_name, void *ctx, int ctx_size)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_program *prog;
+ int prog_fd, err;
+
+ topts.ctx_in = ctx;
+ topts.ctx_size_in = ctx_size;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto err;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, prog_name))
+ goto err;
+
+ err = topts.retval;
+ errno = -topts.retval;
+ goto out;
+err:
+ err = -1;
+out:
+ return err;
+}
+
+static int kernel_init_sock(int af, int type, int protocol)
+{
+ struct init_sock_args args = {
+ .af = af,
+ .type = type,
+ };
+
+ return run_bpf_prog("init_sock", &args, sizeof(args));
+}
+
+static int kernel_close_sock(int fd)
+{
+ return run_bpf_prog("close_sock", NULL, 0);
+}
+
+static int sock_addr_op(const char *name, struct sockaddr *addr,
+ socklen_t *addrlen, bool expect_change)
+{
+ struct addr_args args;
+ int err;
+
+ if (addrlen)
+ args.addrlen = *addrlen;
+
+ if (addr)
+ memcpy(&args.addr, addr, *addrlen);
+
+ err = run_bpf_prog(name, &args, sizeof(args));
+
+ if (!expect_change && addr)
+ if (!ASSERT_EQ(cmp_addr((struct sockaddr_storage *)addr,
+ *addrlen,
+ (struct sockaddr_storage *)&args.addr,
+ args.addrlen, 1),
+ 0, "address_param_modified"))
+ return -1;
+
+ if (addrlen)
+ *addrlen = args.addrlen;
+
+ if (addr)
+ memcpy(addr, &args.addr, *addrlen);
+
+ return err;
+}
+
+static int send_msg_op(const char *name, struct sockaddr *addr,
+ socklen_t addrlen, const char *msg, int msglen)
+{
+ struct sendmsg_args args;
+ int err;
+
+ memset(&args, 0, sizeof(args));
+ memcpy(&args.addr.addr, addr, addrlen);
+ args.addr.addrlen = addrlen;
+ memcpy(args.msg, msg, msglen);
+ args.msglen = msglen;
+
+ err = run_bpf_prog(name, &args, sizeof(args));
+
+ if (!ASSERT_EQ(cmp_addr((struct sockaddr_storage *)addr,
+ addrlen,
+ (struct sockaddr_storage *)&args.addr.addr,
+ args.addr.addrlen, 1),
+ 0, "address_param_modified"))
+ return -1;
+
+ return err;
+}
+
+static int kernel_connect(struct sockaddr *addr, socklen_t addrlen)
+{
+ return sock_addr_op("kernel_connect", addr, &addrlen, false);
+}
+
+static int kernel_bind(int fd, struct sockaddr *addr, socklen_t addrlen)
+{
+ return sock_addr_op("kernel_bind", addr, &addrlen, false);
+}
+
+static int kernel_listen(void)
+{
+ return sock_addr_op("kernel_listen", NULL, NULL, false);
+}
+
+static int kernel_sendmsg(int fd, struct sockaddr *addr, socklen_t addrlen,
+ char *msg, int msglen)
+{
+ return send_msg_op("kernel_sendmsg", addr, addrlen, msg, msglen);
+}
+
+static int sock_sendmsg(int fd, struct sockaddr *addr, socklen_t addrlen,
+ char *msg, int msglen)
+{
+ return send_msg_op("sock_sendmsg", addr, addrlen, msg, msglen);
+}
+
+static int kernel_getsockname(int fd, struct sockaddr *addr, socklen_t *addrlen)
+{
+ return sock_addr_op("kernel_getsockname", addr, addrlen, true);
+}
+
+static int kernel_getpeername(int fd, struct sockaddr *addr, socklen_t *addrlen)
+{
+ return sock_addr_op("kernel_getpeername", addr, addrlen, true);
+}
+
+int kernel_connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
+ const struct network_helper_opts *opts)
+{
+ int err;
+
+ if (!ASSERT_OK(kernel_init_sock(addr->ss_family, type, 0),
+ "kernel_init_sock"))
+ goto err;
+
+ if (kernel_connect((struct sockaddr *)addr, addrlen) < 0)
+ goto err;
+
+ /* Test code expects a "file descriptor" on success. */
+ err = 1;
+ goto out;
+err:
+ err = -1;
+ save_errno_do(ASSERT_OK(kernel_close_sock(0), "kernel_close_sock"));
+out:
+ return err;
+}
+
+int kernel_start_server(int family, int type, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ int err;
+
+ if (!ASSERT_OK(kernel_init_sock(family, type, 0), "kernel_init_sock"))
+ goto err;
+
+ if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
+ goto err;
+
+ if (kernel_bind(0, (struct sockaddr *)&addr, addrlen) < 0)
+ goto err;
+
+ if (type == SOCK_STREAM) {
+ if (!ASSERT_OK(kernel_listen(), "kernel_listen"))
+ goto err;
+ }
+
+ /* Test code expects a "file descriptor" on success. */
+ err = 1;
+ goto out;
+err:
+ err = -1;
+ save_errno_do(ASSERT_OK(kernel_close_sock(0), "kernel_close_sock"));
+out:
+ return err;
+}
+
+struct sock_ops {
+ int (*connect_to_addr)(int type, const struct sockaddr_storage *addr,
+ socklen_t addrlen,
+ const struct network_helper_opts *opts);
+ int (*start_server)(int family, int type, const char *addr_str,
+ __u16 port, int timeout_ms);
+ int (*socket)(int famil, int type, int protocol);
+ int (*bind)(int fd, struct sockaddr *addr, socklen_t addrlen);
+ int (*getsockname)(int fd, struct sockaddr *addr, socklen_t *addrlen);
+ int (*getpeername)(int fd, struct sockaddr *addr, socklen_t *addrlen);
+ int (*sendmsg)(int fd, struct sockaddr *addr, socklen_t addrlen,
+ char *msg, int msglen);
+ int (*close)(int fd);
+};
+
+static int user_sendmsg(int fd, struct sockaddr *addr, socklen_t addrlen,
+ char *msg, int msglen)
+{
+ struct msghdr hdr;
+ struct iovec iov;
+
+ memset(&iov, 0, sizeof(iov));
+ iov.iov_base = msg;
+ iov.iov_len = msglen;
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.msg_name = (void *)addr;
+ hdr.msg_namelen = addrlen;
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+
+ return sendmsg(fd, &hdr, 0);
+}
+
+static int user_bind(int fd, struct sockaddr *addr, socklen_t addrlen)
+{
+ return bind(fd, (const struct sockaddr *)addr, addrlen);
+}
+
+struct sock_ops user_ops = {
+ .connect_to_addr = connect_to_addr,
+ .start_server = start_server,
+ .socket = socket,
+ .bind = user_bind,
+ .getsockname = getsockname,
+ .getpeername = getpeername,
+ .sendmsg = user_sendmsg,
+ .close = close,
+};
+
+struct sock_ops kern_ops_sock_sendmsg = {
+ .connect_to_addr = kernel_connect_to_addr,
+ .start_server = kernel_start_server,
+ .socket = kernel_init_sock,
+ .bind = kernel_bind,
+ .getsockname = kernel_getsockname,
+ .getpeername = kernel_getpeername,
+ .sendmsg = sock_sendmsg,
+ .close = kernel_close_sock,
+};
+
+struct sock_ops kern_ops_kernel_sendmsg = {
+ .connect_to_addr = kernel_connect_to_addr,
+ .start_server = kernel_start_server,
+ .socket = kernel_init_sock,
+ .bind = kernel_bind,
+ .getsockname = kernel_getsockname,
+ .getpeername = kernel_getpeername,
+ .sendmsg = kernel_sendmsg,
+ .close = kernel_close_sock,
+};
+
+struct sock_addr_test {
+ enum sock_addr_test_type type;
+ const char *name;
+ /* BPF prog properties */
+ load_fn loadfn;
+ destroy_fn destroyfn;
+ enum bpf_attach_type attach_type;
+ /* Socket operations */
+ struct sock_ops *ops;
+ /* Socket properties */
+ int socket_family;
+ int socket_type;
+ /* IP:port pairs for BPF prog to override */
+ const char *requested_addr;
+ unsigned short requested_port;
+ const char *expected_addr;
+ unsigned short expected_port;
+ const char *expected_src_addr;
+ /* Expected test result */
+ enum {
+ LOAD_REJECT,
+ ATTACH_REJECT,
+ SYSCALL_EPERM,
+ SYSCALL_ENOTSUPP,
+ SUCCESS,
+ } expected_result;
+};
+
+#define BPF_SKEL_FUNCS_RAW(skel_name, prog_name) \
+static void *prog_name##_load_raw(int cgroup_fd, \
+ enum bpf_attach_type attach_type, \
+ bool expect_reject) \
+{ \
+ struct skel_name *skel = skel_name##__open(); \
+ int prog_fd = -1; \
+ if (!ASSERT_OK_PTR(skel, "skel_open")) \
+ goto cleanup; \
+ if (!ASSERT_OK(skel_name##__load(skel), "load")) \
+ goto cleanup; \
+ prog_fd = bpf_program__fd(skel->progs.prog_name); \
+ if (!ASSERT_GT(prog_fd, 0, "prog_fd")) \
+ goto cleanup; \
+ if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, \
+ BPF_F_ALLOW_OVERRIDE), "bpf_prog_attach") { \
+ ASSERT_TRUE(expect_reject, "unexpected rejection"); \
+ goto cleanup; \
+ } \
+ if (!ASSERT_FALSE(expect_reject, "expected rejection")) \
+ goto cleanup; \
+cleanup: \
+ if (prog_fd > 0) \
+ bpf_prog_detach(cgroup_fd, attach_type); \
+ skel_name##__destroy(skel); \
+ return NULL; \
+} \
+static void prog_name##_destroy_raw(void *progfd) \
+{ \
+ /* No-op. *_load_raw does all cleanup. */ \
+} \
+
+#define BPF_SKEL_FUNCS(skel_name, prog_name) \
+static void *prog_name##_load(int cgroup_fd, \
+ enum bpf_attach_type attach_type, \
+ bool expect_reject) \
+{ \
+ struct skel_name *skel = skel_name##__open(); \
+ if (!ASSERT_OK_PTR(skel, "skel_open")) \
+ goto cleanup; \
+ if (!ASSERT_OK(bpf_program__set_expected_attach_type(skel->progs.prog_name, \
+ attach_type), \
+ "set_expected_attach_type")) \
+ goto cleanup; \
+ if (skel_name##__load(skel)) { \
+ ASSERT_TRUE(expect_reject, "unexpected rejection"); \
+ goto cleanup; \
+ } \
+ if (!ASSERT_FALSE(expect_reject, "expected rejection")) \
+ goto cleanup; \
+ skel->links.prog_name = bpf_program__attach_cgroup( \
+ skel->progs.prog_name, cgroup_fd); \
+ if (!ASSERT_OK_PTR(skel->links.prog_name, "prog_attach")) \
+ goto cleanup; \
+ return skel; \
+cleanup: \
+ skel_name##__destroy(skel); \
+ return NULL; \
+} \
+static void prog_name##_destroy(void *skel) \
+{ \
+ skel_name##__destroy(skel); \
+}
+
+BPF_SKEL_FUNCS(bind4_prog, bind_v4_prog);
+BPF_SKEL_FUNCS_RAW(bind4_prog, bind_v4_prog);
+BPF_SKEL_FUNCS(bind4_prog, bind_v4_deny_prog);
+BPF_SKEL_FUNCS(bind6_prog, bind_v6_prog);
+BPF_SKEL_FUNCS_RAW(bind6_prog, bind_v6_prog);
+BPF_SKEL_FUNCS(bind6_prog, bind_v6_deny_prog);
+BPF_SKEL_FUNCS(connect4_prog, connect_v4_prog);
+BPF_SKEL_FUNCS_RAW(connect4_prog, connect_v4_prog);
+BPF_SKEL_FUNCS(connect4_prog, connect_v4_deny_prog);
+BPF_SKEL_FUNCS(connect6_prog, connect_v6_prog);
+BPF_SKEL_FUNCS_RAW(connect6_prog, connect_v6_prog);
+BPF_SKEL_FUNCS(connect6_prog, connect_v6_deny_prog);
+BPF_SKEL_FUNCS(connect_unix_prog, connect_unix_prog);
+BPF_SKEL_FUNCS_RAW(connect_unix_prog, connect_unix_prog);
+BPF_SKEL_FUNCS(connect_unix_prog, connect_unix_deny_prog);
+BPF_SKEL_FUNCS(sendmsg4_prog, sendmsg_v4_prog);
+BPF_SKEL_FUNCS_RAW(sendmsg4_prog, sendmsg_v4_prog);
+BPF_SKEL_FUNCS(sendmsg4_prog, sendmsg_v4_deny_prog);
+BPF_SKEL_FUNCS(sendmsg6_prog, sendmsg_v6_prog);
+BPF_SKEL_FUNCS_RAW(sendmsg6_prog, sendmsg_v6_prog);
+BPF_SKEL_FUNCS(sendmsg6_prog, sendmsg_v6_deny_prog);
+BPF_SKEL_FUNCS(sendmsg6_prog, sendmsg_v6_preserve_dst_prog);
+BPF_SKEL_FUNCS(sendmsg6_prog, sendmsg_v6_v4mapped_prog);
+BPF_SKEL_FUNCS(sendmsg6_prog, sendmsg_v6_wildcard_prog);
+BPF_SKEL_FUNCS(sendmsg_unix_prog, sendmsg_unix_prog);
+BPF_SKEL_FUNCS_RAW(sendmsg_unix_prog, sendmsg_unix_prog);
+BPF_SKEL_FUNCS(sendmsg_unix_prog, sendmsg_unix_deny_prog);
+BPF_SKEL_FUNCS(recvmsg4_prog, recvmsg4_prog);
+BPF_SKEL_FUNCS_RAW(recvmsg4_prog, recvmsg4_prog);
+BPF_SKEL_FUNCS(recvmsg6_prog, recvmsg6_prog);
+BPF_SKEL_FUNCS_RAW(recvmsg6_prog, recvmsg6_prog);
+BPF_SKEL_FUNCS(recvmsg_unix_prog, recvmsg_unix_prog);
+BPF_SKEL_FUNCS_RAW(recvmsg_unix_prog, recvmsg_unix_prog);
+BPF_SKEL_FUNCS(getsockname_unix_prog, getsockname_unix_prog);
+BPF_SKEL_FUNCS_RAW(getsockname_unix_prog, getsockname_unix_prog);
+BPF_SKEL_FUNCS(getsockname4_prog, getsockname_v4_prog);
+BPF_SKEL_FUNCS_RAW(getsockname4_prog, getsockname_v4_prog);
+BPF_SKEL_FUNCS(getsockname6_prog, getsockname_v6_prog);
+BPF_SKEL_FUNCS_RAW(getsockname6_prog, getsockname_v6_prog);
+BPF_SKEL_FUNCS(getpeername_unix_prog, getpeername_unix_prog);
+BPF_SKEL_FUNCS_RAW(getpeername_unix_prog, getpeername_unix_prog);
+BPF_SKEL_FUNCS(getpeername4_prog, getpeername_v4_prog);
+BPF_SKEL_FUNCS_RAW(getpeername4_prog, getpeername_v4_prog);
+BPF_SKEL_FUNCS(getpeername6_prog, getpeername_v6_prog);
+BPF_SKEL_FUNCS_RAW(getpeername6_prog, getpeername_v6_prog);
+
+static struct sock_addr_test tests[] = {
+ /* bind - system calls */
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: bind (stream)",
+ bind_v4_prog_load,
+ bind_v4_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: bind deny (stream)",
+ bind_v4_deny_prog_load,
+ bind_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: bind (dgram)",
+ bind_v4_prog_load,
+ bind_v4_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: bind deny (dgram)",
+ bind_v4_deny_prog_load,
+ bind_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: load prog with wrong expected attach type",
+ bind_v4_prog_load,
+ bind_v4_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: attach prog with wrong attach type",
+ bind_v4_prog_load_raw,
+ bind_v4_prog_destroy_raw,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: bind (stream)",
+ bind_v6_prog_load,
+ bind_v6_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: bind deny (stream)",
+ bind_v6_deny_prog_load,
+ bind_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: bind (dgram)",
+ bind_v6_prog_load,
+ bind_v6_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: bind deny (dgram)",
+ bind_v6_deny_prog_load,
+ bind_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: load prog with wrong expected attach type",
+ bind_v6_prog_load,
+ bind_v6_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: attach prog with wrong attach type",
+ bind_v6_prog_load_raw,
+ bind_v6_prog_destroy_raw,
+ BPF_CGROUP_INET4_BIND,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+
+ /* bind - kernel calls */
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: kernel_bind (stream)",
+ bind_v4_prog_load,
+ bind_v4_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: kernel_bind deny (stream)",
+ bind_v4_deny_prog_load,
+ bind_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: kernel_bind (dgram)",
+ bind_v4_prog_load,
+ bind_v4_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind4: kernel_bind deny (dgram)",
+ bind_v4_deny_prog_load,
+ bind_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: kernel_bind (stream)",
+ bind_v6_prog_load,
+ bind_v6_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: kernel_bind deny (stream)",
+ bind_v6_deny_prog_load,
+ bind_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: kernel_bind (dgram)",
+ bind_v6_prog_load,
+ bind_v6_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_BIND,
+ "bind6: kernel_bind deny (dgram)",
+ bind_v6_deny_prog_load,
+ bind_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_BIND,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ NULL,
+ SYSCALL_EPERM,
+ },
+
+ /* connect - system calls */
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: connect (stream)",
+ connect_v4_prog_load,
+ connect_v4_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: connect deny (stream)",
+ connect_v4_deny_prog_load,
+ connect_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: connect (dgram)",
+ connect_v4_prog_load,
+ connect_v4_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: connect deny (dgram)",
+ connect_v4_deny_prog_load,
+ connect_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: load prog with wrong expected attach type",
+ connect_v4_prog_load,
+ connect_v4_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: attach prog with wrong attach type",
+ connect_v4_prog_load_raw,
+ connect_v4_prog_destroy_raw,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: connect (stream)",
+ connect_v6_prog_load,
+ connect_v6_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: connect deny (stream)",
+ connect_v6_deny_prog_load,
+ connect_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: connect (dgram)",
+ connect_v6_prog_load,
+ connect_v6_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: connect deny (dgram)",
+ connect_v6_deny_prog_load,
+ connect_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: load prog with wrong expected attach type",
+ connect_v6_prog_load,
+ connect_v6_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: attach prog with wrong attach type",
+ connect_v6_prog_load_raw,
+ connect_v6_prog_destroy_raw,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix: connect (stream)",
+ connect_unix_prog_load,
+ connect_unix_prog_destroy,
+ BPF_CGROUP_UNIX_CONNECT,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix: connect deny (stream)",
+ connect_unix_deny_prog_load,
+ connect_unix_deny_prog_destroy,
+ BPF_CGROUP_UNIX_CONNECT,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix: attach prog with wrong attach type",
+ connect_unix_prog_load_raw,
+ connect_unix_prog_destroy_raw,
+ BPF_CGROUP_INET4_CONNECT,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+
+ /* connect - kernel calls */
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: kernel_connect (stream)",
+ connect_v4_prog_load,
+ connect_v4_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: kernel_connect deny (stream)",
+ connect_v4_deny_prog_load,
+ connect_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: kernel_connect (dgram)",
+ connect_v4_prog_load,
+ connect_v4_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect4: kernel_connect deny (dgram)",
+ connect_v4_deny_prog_load,
+ connect_v4_deny_prog_destroy,
+ BPF_CGROUP_INET4_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: kernel_connect (stream)",
+ connect_v6_prog_load,
+ connect_v6_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: kernel_connect deny (stream)",
+ connect_v6_deny_prog_load,
+ connect_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: kernel_connect (dgram)",
+ connect_v6_prog_load,
+ connect_v6_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect6: kernel_connect deny (dgram)",
+ connect_v6_deny_prog_load,
+ connect_v6_deny_prog_destroy,
+ BPF_CGROUP_INET6_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix: kernel_connect (dgram)",
+ connect_unix_prog_load,
+ connect_unix_prog_destroy,
+ BPF_CGROUP_UNIX_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_CONNECT,
+ "connect_unix: kernel_connect deny (dgram)",
+ connect_unix_deny_prog_load,
+ connect_unix_deny_prog_destroy,
+ BPF_CGROUP_UNIX_CONNECT,
+ &kern_ops_sock_sendmsg,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SYSCALL_EPERM,
+ },
+
+ /* sendmsg - system calls */
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: sendmsg (dgram)",
+ sendmsg_v4_prog_load,
+ sendmsg_v4_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: sendmsg deny (dgram)",
+ sendmsg_v4_deny_prog_load,
+ sendmsg_v4_deny_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: load prog with wrong expected attach type",
+ sendmsg_v4_prog_load,
+ sendmsg_v4_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: attach prog with wrong attach type",
+ sendmsg_v4_prog_load_raw,
+ sendmsg_v4_prog_destroy_raw,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sendmsg (dgram)",
+ sendmsg_v6_prog_load,
+ sendmsg_v6_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sendmsg [::] (BSD'ism) (dgram)",
+ sendmsg_v6_preserve_dst_prog_load,
+ sendmsg_v6_preserve_dst_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sendmsg deny (dgram)",
+ sendmsg_v6_deny_prog_load,
+ sendmsg_v6_deny_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sendmsg IPv4-mapped IPv6 (dgram)",
+ sendmsg_v6_v4mapped_prog_load,
+ sendmsg_v6_v4mapped_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_ENOTSUPP,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sendmsg dst IP = [::] (BSD'ism) (dgram)",
+ sendmsg_v6_wildcard_prog_load,
+ sendmsg_v6_wildcard_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: load prog with wrong expected attach type",
+ sendmsg_v6_prog_load,
+ sendmsg_v6_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: attach prog with wrong attach type",
+ sendmsg_v6_prog_load_raw,
+ sendmsg_v6_prog_destroy_raw,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: sendmsg (dgram)",
+ sendmsg_unix_prog_load,
+ sendmsg_unix_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &user_ops,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: sendmsg deny (dgram)",
+ sendmsg_unix_deny_prog_load,
+ sendmsg_unix_deny_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &user_ops,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: attach prog with wrong attach type",
+ sendmsg_unix_prog_load_raw,
+ sendmsg_unix_prog_destroy_raw,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &user_ops,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+
+ /* sendmsg - kernel calls (sock_sendmsg) */
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: sock_sendmsg (dgram)",
+ sendmsg_v4_prog_load,
+ sendmsg_v4_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: sock_sendmsg deny (dgram)",
+ sendmsg_v4_deny_prog_load,
+ sendmsg_v4_deny_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sock_sendmsg (dgram)",
+ sendmsg_v6_prog_load,
+ sendmsg_v6_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sock_sendmsg [::] (BSD'ism) (dgram)",
+ sendmsg_v6_preserve_dst_prog_load,
+ sendmsg_v6_preserve_dst_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: sock_sendmsg deny (dgram)",
+ sendmsg_v6_deny_prog_load,
+ sendmsg_v6_deny_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: sock_sendmsg (dgram)",
+ sendmsg_unix_prog_load,
+ sendmsg_unix_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: sock_sendmsg deny (dgram)",
+ sendmsg_unix_deny_prog_load,
+ sendmsg_unix_deny_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &kern_ops_sock_sendmsg,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SYSCALL_EPERM,
+ },
+
+ /* sendmsg - kernel calls (kernel_sendmsg) */
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: kernel_sendmsg (dgram)",
+ sendmsg_v4_prog_load,
+ sendmsg_v4_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg4: kernel_sendmsg deny (dgram)",
+ sendmsg_v4_deny_prog_load,
+ sendmsg_v4_deny_prog_destroy,
+ BPF_CGROUP_UDP4_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_IP,
+ SERV4_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SRC4_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: kernel_sendmsg (dgram)",
+ sendmsg_v6_prog_load,
+ sendmsg_v6_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: kernel_sendmsg [::] (BSD'ism) (dgram)",
+ sendmsg_v6_preserve_dst_prog_load,
+ sendmsg_v6_preserve_dst_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg6: kernel_sendmsg deny (dgram)",
+ sendmsg_v6_deny_prog_load,
+ sendmsg_v6_deny_prog_destroy,
+ BPF_CGROUP_UDP6_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SYSCALL_EPERM,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: sock_sendmsg (dgram)",
+ sendmsg_unix_prog_load,
+ sendmsg_unix_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_SENDMSG,
+ "sendmsg_unix: kernel_sendmsg deny (dgram)",
+ sendmsg_unix_deny_prog_load,
+ sendmsg_unix_deny_prog_destroy,
+ BPF_CGROUP_UNIX_SENDMSG,
+ &kern_ops_kernel_sendmsg,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SYSCALL_EPERM,
+ },
+
+ /* recvmsg - system calls */
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg4: recvfrom (dgram)",
+ recvmsg4_prog_load,
+ recvmsg4_prog_destroy,
+ BPF_CGROUP_UDP4_RECVMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg4: attach prog with wrong attach type",
+ recvmsg4_prog_load_raw,
+ recvmsg4_prog_destroy_raw,
+ BPF_CGROUP_UDP6_RECVMSG,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg6: recvfrom (dgram)",
+ recvmsg6_prog_load,
+ recvmsg6_prog_destroy,
+ BPF_CGROUP_UDP6_RECVMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg6: attach prog with wrong attach type",
+ recvmsg6_prog_load_raw,
+ recvmsg6_prog_destroy_raw,
+ BPF_CGROUP_UDP4_RECVMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg_unix: recvfrom (dgram)",
+ recvmsg_unix_prog_load,
+ recvmsg_unix_prog_destroy,
+ BPF_CGROUP_UNIX_RECVMSG,
+ &user_ops,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_ADDRESS,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg_unix: recvfrom (stream)",
+ recvmsg_unix_prog_load,
+ recvmsg_unix_prog_destroy,
+ BPF_CGROUP_UNIX_RECVMSG,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_ADDRESS,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_RECVMSG,
+ "recvmsg_unix: attach prog with wrong attach type",
+ recvmsg_unix_prog_load_raw,
+ recvmsg_unix_prog_destroy_raw,
+ BPF_CGROUP_UDP4_RECVMSG,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ SERVUN_ADDRESS,
+ ATTACH_REJECT,
+ },
+
+ /* getsockname - system calls */
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname4: getsockname (stream)",
+ getsockname_v4_prog_load,
+ getsockname_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname4: getsockname (dgram)",
+ getsockname_v4_prog_load,
+ getsockname_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname4: attach prog with wrong attach type",
+ getsockname_v4_prog_load_raw,
+ getsockname_v4_prog_destroy_raw,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname6: getsockname (stream)",
+ getsockname_v6_prog_load,
+ getsockname_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname6: getsockname (dgram)",
+ getsockname_v6_prog_load,
+ getsockname_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname6: attach prog with wrong attach type",
+ getsockname_v6_prog_load_raw,
+ getsockname_v6_prog_destroy_raw,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname_unix: getsockname",
+ getsockname_unix_prog_load,
+ getsockname_unix_prog_destroy,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname_unix: attach prog with wrong attach type",
+ getsockname_unix_prog_load_raw,
+ getsockname_unix_prog_destroy_raw,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+
+ /* getsockname - kernel calls */
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname4: kernel_getsockname (stream)",
+ getsockname_v4_prog_load,
+ getsockname_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname4: kernel_getsockname (dgram)",
+ getsockname_v4_prog_load,
+ getsockname_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname6: kernel_getsockname (stream)",
+ getsockname_v6_prog_load,
+ getsockname_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname6: kernel_getsockname (dgram)",
+ getsockname_v6_prog_load,
+ getsockname_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETSOCKNAME,
+ "getsockname_unix: kernel_getsockname",
+ getsockname_unix_prog_load,
+ getsockname_unix_prog_destroy,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+
+ /* getpeername - system calls */
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername4: getpeername (stream)",
+ getpeername_v4_prog_load,
+ getpeername_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ &user_ops,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername4: getpeername (dgram)",
+ getpeername_v4_prog_load,
+ getpeername_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ &user_ops,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername4: attach prog with wrong attach type",
+ getpeername_v4_prog_load_raw,
+ getpeername_v4_prog_destroy_raw,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ &user_ops,
+ AF_UNIX,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername6: getpeername (stream)",
+ getpeername_v6_prog_load,
+ getpeername_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername6: getpeername (dgram)",
+ getpeername_v6_prog_load,
+ getpeername_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername6: attach prog with wrong attach type",
+ getpeername_v6_prog_load_raw,
+ getpeername_v6_prog_destroy_raw,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ ATTACH_REJECT,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername_unix: getpeername",
+ getpeername_unix_prog_load,
+ getpeername_unix_prog_destroy,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername_unix: attach prog with wrong attach type",
+ getpeername_unix_prog_load_raw,
+ getpeername_unix_prog_destroy_raw,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ &user_ops,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ ATTACH_REJECT,
+ },
+
+ /* getpeername - kernel calls */
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername4: kernel_getpeername (stream)",
+ getpeername_v4_prog_load,
+ getpeername_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_STREAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername4: kernel_getpeername (dgram)",
+ getpeername_v4_prog_load,
+ getpeername_v4_prog_destroy,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SERV4_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername6: kernel_getpeername (stream)",
+ getpeername_v6_prog_load,
+ getpeername_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_STREAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername6: kernel_getpeername (dgram)",
+ getpeername_v6_prog_load,
+ getpeername_v6_prog_destroy,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SERV6_PORT,
+ NULL,
+ SUCCESS,
+ },
+ {
+ SOCK_ADDR_TEST_GETPEERNAME,
+ "getpeername_unix: kernel_getpeername",
+ getpeername_unix_prog_load,
+ getpeername_unix_prog_destroy,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ &kern_ops_kernel_sendmsg,
+ AF_UNIX,
+ SOCK_STREAM,
+ SERVUN_ADDRESS,
+ 0,
+ SERVUN_REWRITE_ADDRESS,
+ 0,
+ NULL,
+ SUCCESS,
+ },
+};
+
+typedef int (*info_fn)(int, struct sockaddr *, socklen_t *);
+
+static int cmp_addr(const struct sockaddr_storage *addr1, socklen_t addr1_len,
+ const struct sockaddr_storage *addr2, socklen_t addr2_len,
+ bool cmp_port)
+{
+ const struct sockaddr_in *four1, *four2;
+ const struct sockaddr_in6 *six1, *six2;
+ const struct sockaddr_un *un1, *un2;
+
+ if (addr1->ss_family != addr2->ss_family)
+ return -1;
+
+ if (addr1_len != addr2_len)
+ return -1;
+
+ if (addr1->ss_family == AF_INET) {
+ four1 = (const struct sockaddr_in *)addr1;
+ four2 = (const struct sockaddr_in *)addr2;
+ return !((four1->sin_port == four2->sin_port || !cmp_port) &&
+ four1->sin_addr.s_addr == four2->sin_addr.s_addr);
+ } else if (addr1->ss_family == AF_INET6) {
+ six1 = (const struct sockaddr_in6 *)addr1;
+ six2 = (const struct sockaddr_in6 *)addr2;
+ return !((six1->sin6_port == six2->sin6_port || !cmp_port) &&
+ !memcmp(&six1->sin6_addr, &six2->sin6_addr,
+ sizeof(struct in6_addr)));
+ } else if (addr1->ss_family == AF_UNIX) {
+ un1 = (const struct sockaddr_un *)addr1;
+ un2 = (const struct sockaddr_un *)addr2;
+ return memcmp(un1, un2, addr1_len);
+ }
+
+ return -1;
+}
+
+static int cmp_sock_addr(info_fn fn, int sock1,
+ const struct sockaddr_storage *addr2,
+ socklen_t addr2_len, bool cmp_port)
+{
+ struct sockaddr_storage addr1;
+ socklen_t len1 = sizeof(addr1);
+
+ memset(&addr1, 0, len1);
+ if (fn(sock1, (struct sockaddr *)&addr1, (socklen_t *)&len1) != 0)
+ return -1;
+
+ return cmp_addr(&addr1, len1, addr2, addr2_len, cmp_port);
+}
+
+static int load_sock_addr_kern(void)
+{
+ int err;
+
+ skel = sock_addr_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto err;
+
+ err = 0;
+ goto out;
+err:
+ err = -1;
+out:
+ return err;
+}
+
+static void unload_sock_addr_kern(void)
+{
+ sock_addr_kern__destroy(skel);
+}
+
+static int test_bind(struct sock_addr_test *test)
+{
+ struct sockaddr_storage expected_addr;
+ socklen_t expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = test->ops->start_server(test->socket_family, test->socket_type,
+ test->requested_addr,
+ test->requested_port, 0);
+ if (serv < 0) {
+ err = errno;
+ goto err;
+ }
+
+ err = make_sockaddr(test->socket_family,
+ test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_sock_addr(test->ops->getsockname, serv, &expected_addr,
+ expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+
+ /* Try to connect to server just in case */
+ client = connect_to_addr(test->socket_type, &expected_addr, expected_addr_len, NULL);
+ if (!ASSERT_GE(client, 0, "connect_to_addr"))
+ goto cleanup;
+
+cleanup:
+ err = 0;
+err:
+ if (client != -1)
+ close(client);
+ if (serv != -1)
+ test->ops->close(serv);
+
+ return err;
+}
+
+static int test_connect(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, expected_addr, expected_src_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ expected_addr_len = sizeof(struct sockaddr_storage),
+ expected_src_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->expected_addr, test->expected_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ client = test->ops->connect_to_addr(test->socket_type, &addr, addr_len,
+ NULL);
+ if (client < 0) {
+ err = errno;
+ goto err;
+ }
+
+ err = make_sockaddr(test->socket_family, test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ if (test->expected_src_addr) {
+ err = make_sockaddr(test->socket_family, test->expected_src_addr, 0,
+ &expected_src_addr, &expected_src_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+ }
+
+ err = cmp_sock_addr(test->ops->getpeername, client, &expected_addr,
+ expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_peer_addr"))
+ goto cleanup;
+
+ if (test->expected_src_addr) {
+ err = cmp_sock_addr(test->ops->getsockname, client,
+ &expected_src_addr, expected_src_addr_len,
+ false);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+ }
+cleanup:
+ err = 0;
+err:
+ if (client != -1)
+ test->ops->close(client);
+ if (serv != -1)
+ close(serv);
+
+ return err;
+}
+
+static int test_xmsg(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, src_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ src_addr_len = sizeof(struct sockaddr_storage);
+ char data = 'a';
+ int serv = -1, client = -1, err;
+
+ /* Unlike the other tests, here we test that we can rewrite the src addr
+ * with a recvmsg() hook.
+ */
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->expected_addr, test->expected_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ client = test->ops->socket(test->socket_family, test->socket_type, 0);
+ if (!ASSERT_GE(client, 0, "socket"))
+ goto cleanup;
+
+ /* AF_UNIX sockets have to be bound to something to trigger the recvmsg bpf program. */
+ if (test->socket_family == AF_UNIX) {
+ err = make_sockaddr(AF_UNIX, SRCUN_ADDRESS, 0, &src_addr, &src_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = test->ops->bind(client, (struct sockaddr *)&src_addr,
+ src_addr_len);
+ if (!ASSERT_OK(err, "bind"))
+ goto cleanup;
+ }
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ if (test->socket_type == SOCK_DGRAM) {
+ err = test->ops->sendmsg(client, (struct sockaddr *)&addr,
+ addr_len, &data, sizeof(data));
+ if (err < 0) {
+ err = errno;
+ goto err;
+ }
+
+ if (!ASSERT_EQ(err, sizeof(data), "sendmsg"))
+ goto cleanup;
+ } else {
+ /* Testing with connection-oriented sockets is only valid for
+ * recvmsg() tests.
+ */
+ if (!ASSERT_EQ(test->type, SOCK_ADDR_TEST_RECVMSG, "recvmsg"))
+ goto cleanup;
+
+ err = connect(client, (const struct sockaddr *)&addr, addr_len);
+ if (!ASSERT_OK(err, "connect"))
+ goto cleanup;
+
+ err = send(client, &data, sizeof(data), 0);
+ if (!ASSERT_EQ(err, sizeof(data), "send"))
+ goto cleanup;
+
+ err = listen(serv, 0);
+ if (!ASSERT_OK(err, "listen"))
+ goto cleanup;
+
+ err = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(err, 0, "accept"))
+ goto cleanup;
+
+ close(serv);
+ serv = err;
+ }
+
+ addr_len = src_addr_len = sizeof(struct sockaddr_storage);
+
+ err = recvfrom(serv, &data, sizeof(data), 0, (struct sockaddr *) &src_addr, &src_addr_len);
+ if (!ASSERT_EQ(err, sizeof(data), "recvfrom"))
+ goto cleanup;
+
+ ASSERT_EQ(data, 'a', "data mismatch");
+
+ if (test->expected_src_addr) {
+ err = make_sockaddr(test->socket_family, test->expected_src_addr, 0,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_addr(&src_addr, src_addr_len, &addr, addr_len, false);
+ if (!ASSERT_EQ(err, 0, "cmp_addr"))
+ goto cleanup;
+ }
+
+cleanup:
+ err = 0;
+err:
+ if (client != -1)
+ test->ops->close(client);
+ if (serv != -1)
+ close(serv);
+
+ return err;
+}
+
+static int test_getsockname(struct sock_addr_test *test)
+{
+ struct sockaddr_storage expected_addr;
+ socklen_t expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, err;
+
+ serv = test->ops->start_server(test->socket_family, test->socket_type,
+ test->requested_addr, test->requested_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family,
+ test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_sock_addr(test->ops->getsockname, serv, &expected_addr, expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_local_addr"))
+ goto cleanup;
+
+cleanup:
+ if (serv != -1)
+ test->ops->close(serv);
+
+ return 0;
+}
+
+static int test_getpeername(struct sock_addr_test *test)
+{
+ struct sockaddr_storage addr, expected_addr;
+ socklen_t addr_len = sizeof(struct sockaddr_storage),
+ expected_addr_len = sizeof(struct sockaddr_storage);
+ int serv = -1, client = -1, err;
+
+ serv = start_server(test->socket_family, test->socket_type,
+ test->requested_addr, test->requested_port, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->requested_addr, test->requested_port,
+ &addr, &addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ client = test->ops->connect_to_addr(test->socket_type, &addr, addr_len,
+ NULL);
+ if (!ASSERT_GE(client, 0, "connect_to_addr"))
+ goto cleanup;
+
+ err = make_sockaddr(test->socket_family, test->expected_addr, test->expected_port,
+ &expected_addr, &expected_addr_len);
+ if (!ASSERT_EQ(err, 0, "make_sockaddr"))
+ goto cleanup;
+
+ err = cmp_sock_addr(test->ops->getpeername, client, &expected_addr,
+ expected_addr_len, true);
+ if (!ASSERT_EQ(err, 0, "cmp_peer_addr"))
+ goto cleanup;
+
+cleanup:
+ if (client != -1)
+ test->ops->close(client);
+ if (serv != -1)
+ close(serv);
+
+ return 0;
+}
+
+static int setup_test_env(struct nstoken **tok)
+{
+ int err;
+
+ SYS_NOFAIL("ip netns delete %s", TEST_NS);
+ SYS(fail, "ip netns add %s", TEST_NS);
+ *tok = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(*tok, "netns token"))
+ goto fail;
+
+ SYS(fail, "ip link add dev %s1 type veth peer name %s2", TEST_IF_PREFIX,
+ TEST_IF_PREFIX);
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip link set %s1 up", TEST_IF_PREFIX);
+ SYS(fail, "ip link set %s2 up", TEST_IF_PREFIX);
+ SYS(fail, "ip -4 addr add %s/8 dev %s1", TEST_IPV4, TEST_IF_PREFIX);
+ SYS(fail, "ip -6 addr add %s/128 nodad dev %s1", TEST_IPV6, TEST_IF_PREFIX);
+
+ err = 0;
+ goto out;
+fail:
+ err = -1;
+ close_netns(*tok);
+ *tok = NULL;
+ SYS_NOFAIL("ip netns delete %s", TEST_NS);
+out:
+ return err;
+}
+
+static void cleanup_test_env(struct nstoken *tok)
+{
+ close_netns(tok);
+ SYS_NOFAIL("ip netns delete %s", TEST_NS);
+}
+
+void test_sock_addr(void)
+{
+ struct nstoken *tok = NULL;
+ int cgroup_fd = -1;
+ void *skel;
+
+ cgroup_fd = test__join_cgroup("/sock_addr");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto cleanup;
+
+ if (!ASSERT_OK(setup_test_env(&tok), "setup_test_env"))
+ goto cleanup;
+
+ if (!ASSERT_OK(load_sock_addr_kern(), "load_sock_addr_kern"))
+ goto cleanup;
+
+ for (size_t i = 0; i < ARRAY_SIZE(tests); ++i) {
+ struct sock_addr_test *test = &tests[i];
+ int err;
+
+ if (!test__start_subtest(test->name))
+ continue;
+
+ skel = test->loadfn(cgroup_fd, test->attach_type,
+ test->expected_result == LOAD_REJECT ||
+ test->expected_result == ATTACH_REJECT);
+ if (!skel)
+ continue;
+
+ switch (test->type) {
+ /* Not exercised yet but we leave this code here for when the
+ * INET and INET6 sockaddr tests are migrated to this file in
+ * the future.
+ */
+ case SOCK_ADDR_TEST_BIND:
+ err = test_bind(test);
+ break;
+ case SOCK_ADDR_TEST_CONNECT:
+ err = test_connect(test);
+ break;
+ case SOCK_ADDR_TEST_SENDMSG:
+ case SOCK_ADDR_TEST_RECVMSG:
+ err = test_xmsg(test);
+ break;
+ case SOCK_ADDR_TEST_GETSOCKNAME:
+ err = test_getsockname(test);
+ break;
+ case SOCK_ADDR_TEST_GETPEERNAME:
+ err = test_getpeername(test);
+ break;
+ default:
+ ASSERT_TRUE(false, "Unknown sock addr test type");
+ err = -EINVAL;
+ break;
+ }
+
+ if (test->expected_result == SYSCALL_EPERM)
+ ASSERT_EQ(err, EPERM, "socket operation returns EPERM");
+ else if (test->expected_result == SYSCALL_ENOTSUPP)
+ ASSERT_EQ(err, ENOTSUPP, "socket operation returns ENOTSUPP");
+ else if (test->expected_result == SUCCESS)
+ ASSERT_OK(err, "socket operation succeeds");
+
+ test->destroyfn(skel);
+ }
+
+cleanup:
+ unload_sock_addr_kern();
+ cleanup_test_env(tok);
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_create.c b/tools/testing/selftests/bpf/prog_tests/sock_create.c
new file mode 100644
index 000000000000..187ffc5e60c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_create.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+static char bpf_log_buf[4096];
+static bool verbose;
+
+enum sock_create_test_error {
+ OK = 0,
+ DENY_CREATE,
+};
+
+static struct sock_create_test {
+ const char *descr;
+ const struct bpf_insn insns[64];
+ enum bpf_attach_type attach_type;
+ enum bpf_attach_type expected_attach_type;
+
+ int domain;
+ int type;
+ int protocol;
+
+ int optname;
+ int optval;
+ enum sock_create_test_error error;
+} tests[] = {
+ {
+ .descr = "AF_INET set priority",
+ .insns = {
+ /* r3 = 123 (priority) */
+ BPF_MOV64_IMM(BPF_REG_3, 123),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, priority)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_PRIORITY,
+ .optval = 123,
+ },
+ {
+ .descr = "AF_INET6 set priority",
+ .insns = {
+ /* r3 = 123 (priority) */
+ BPF_MOV64_IMM(BPF_REG_3, 123),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, priority)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_PRIORITY,
+ .optval = 123,
+ },
+ {
+ .descr = "AF_INET set mark",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* get uid of process */
+ BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff),
+
+ /* if uid is 0, use given mark(666), else use uid as the mark */
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 666),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_MARK,
+ .optval = 666,
+ },
+ {
+ .descr = "AF_INET6 set mark",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* get uid of process */
+ BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff),
+
+ /* if uid is 0, use given mark(666), else use uid as the mark */
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 666),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_MARK,
+ .optval = 666,
+ },
+ {
+ .descr = "AF_INET bound to iface",
+ .insns = {
+ /* r3 = 1 (lo interface) */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, bound_dev_if)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_BINDTOIFINDEX,
+ .optval = 1,
+ },
+ {
+ .descr = "AF_INET6 bound to iface",
+ .insns = {
+ /* r3 = 1 (lo interface) */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, bound_dev_if)),
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET6,
+ .type = SOCK_DGRAM,
+
+ .optname = SO_BINDTOIFINDEX,
+ .optval = 1,
+ },
+ {
+ .descr = "block AF_INET, SOCK_DGRAM, IPPROTO_ICMP socket",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
+
+ /* sock->family == AF_INET */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET, 5),
+
+ /* sock->type == SOCK_DGRAM */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, type)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3),
+
+ /* sock->protocol == IPPROTO_ICMP */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, protocol)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMP, 1),
+
+ /* return 0 (block) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMP,
+
+ .error = DENY_CREATE,
+ },
+ {
+ .descr = "block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 socket",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
+
+ /* sock->family == AF_INET6 */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET6, 5),
+
+ /* sock->type == SOCK_DGRAM */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, type)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3),
+
+ /* sock->protocol == IPPROTO_ICMPV6 */
+ BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1,
+ offsetof(struct bpf_sock, protocol)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMPV6, 1),
+
+ /* return 0 (block) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_ICMPV6,
+
+ .error = DENY_CREATE,
+ },
+ {
+ .descr = "load w/o expected_attach_type (compat mode)",
+ .insns = {
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = 0,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ },
+};
+
+static int load_prog(const struct bpf_insn *insns,
+ enum bpf_attach_type expected_attach_type)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = expected_attach_type,
+ .log_level = 2,
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
+
+ for (;
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
+ }
+ insns_cnt++;
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns,
+ insns_cnt, &opts);
+ if (verbose && fd < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return fd;
+}
+
+static int run_test(int cgroup_fd, struct sock_create_test *test)
+{
+ int sock_fd, err, prog_fd, optval, ret = -1;
+ socklen_t optlen = sizeof(optval);
+
+ prog_fd = load_prog(test->insns, test->expected_attach_type);
+ if (prog_fd < 0) {
+ log_err("Failed to load BPF program");
+ return -1;
+ }
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ if (err < 0) {
+ log_err("Failed to attach BPF program");
+ goto close_prog_fd;
+ }
+
+ sock_fd = socket(test->domain, test->type, test->protocol);
+ if (sock_fd < 0) {
+ if (test->error == DENY_CREATE)
+ ret = 0;
+ else
+ log_err("Failed to create socket");
+
+ goto detach_prog;
+ }
+
+ if (test->optname) {
+ err = getsockopt(sock_fd, SOL_SOCKET, test->optname, &optval, &optlen);
+ if (err) {
+ log_err("Failed to call getsockopt");
+ goto cleanup;
+ }
+
+ if (optval != test->optval) {
+ errno = 0;
+ log_err("getsockopt returned unexpected optval");
+ goto cleanup;
+ }
+ }
+
+ ret = test->error != OK;
+
+cleanup:
+ close(sock_fd);
+detach_prog:
+ bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type);
+close_prog_fd:
+ close(prog_fd);
+ return ret;
+}
+
+void test_sock_create(void)
+{
+ int cgroup_fd, i;
+
+ cgroup_fd = test__join_cgroup("/sock_create");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
+ ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
+ }
+
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
new file mode 100644
index 000000000000..9c11938fe597
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <bpf/bpf_endian.h>
+
+#include "sock_destroy_prog.skel.h"
+#include "sock_destroy_prog_fail.skel.h"
+#include "network_helpers.h"
+
+#define TEST_NS "sock_destroy_netns"
+
+static void start_iter_sockets(struct bpf_program *prog)
+{
+ struct bpf_link *link;
+ char buf[50] = {};
+ int iter_fd, len;
+
+ link = bpf_program__attach_iter(prog, NULL);
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+ goto free_link;
+
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ ASSERT_GE(len, 0, "read");
+
+ close(iter_fd);
+
+free_link:
+ bpf_link__destroy(link);
+}
+
+static void test_tcp_client(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, accept_serv = -1, n;
+
+ serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ accept_serv = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(accept_serv, 0, "serv accept"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys connected client sockets. */
+ start_iter_sockets(skel->progs.iter_tcp6_client);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket");
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (accept_serv != -1)
+ close(accept_serv);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_tcp_server(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, accept_serv = -1, n, serv_port;
+
+ serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+ serv_port = get_socket_local_port(serv);
+ if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
+ goto cleanup;
+ skel->bss->serv_port = (__be16) serv_port;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ accept_serv = accept(serv, NULL, NULL);
+ if (!ASSERT_GE(accept_serv, 0, "serv accept"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys server sockets. */
+ start_iter_sockets(skel->progs.iter_tcp6_server);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket");
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (accept_serv != -1)
+ close(accept_serv);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_udp_client(struct sock_destroy_prog *skel)
+{
+ int serv = -1, clien = -1, n = 0;
+
+ serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0);
+ if (!ASSERT_GE(serv, 0, "start_server"))
+ goto cleanup;
+
+ clien = connect_to_fd(serv, 0);
+ if (!ASSERT_GE(clien, 0, "connect_to_fd"))
+ goto cleanup;
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_EQ(n, 1, "client send"))
+ goto cleanup;
+
+ /* Run iterator program that destroys sockets. */
+ start_iter_sockets(skel->progs.iter_udp6_client);
+
+ n = send(clien, "t", 1, 0);
+ if (!ASSERT_LT(n, 0, "client_send on destroyed socket"))
+ goto cleanup;
+ /* UDP sockets have an overriding error code after they are disconnected,
+ * so we don't check for ECONNABORTED error code.
+ */
+
+cleanup:
+ if (clien != -1)
+ close(clien);
+ if (serv != -1)
+ close(serv);
+}
+
+static void test_udp_server(struct sock_destroy_prog *skel)
+{
+ int *listen_fds = NULL, n, i, serv_port;
+ unsigned int num_listens = 5;
+ char buf[1];
+
+ /* Start reuseport servers. */
+ listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM,
+ "::1", 0, 0, num_listens);
+ if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server"))
+ goto cleanup;
+ serv_port = get_socket_local_port(listen_fds[0]);
+ if (!ASSERT_GE(serv_port, 0, "get_sock_local_port"))
+ goto cleanup;
+ skel->bss->serv_port = (__be16) serv_port;
+
+ /* Run iterator program that destroys server sockets. */
+ start_iter_sockets(skel->progs.iter_udp6_server);
+
+ for (i = 0; i < num_listens; ++i) {
+ n = read(listen_fds[i], buf, sizeof(buf));
+ if (!ASSERT_EQ(n, -1, "read") ||
+ !ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"))
+ break;
+ }
+ ASSERT_EQ(i, num_listens, "server socket");
+
+cleanup:
+ free_fds(listen_fds, num_listens);
+}
+
+void test_sock_destroy(void)
+{
+ struct sock_destroy_prog *skel;
+ struct nstoken *nstoken = NULL;
+ int cgroup_fd;
+
+ skel = sock_destroy_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ cgroup_fd = test__join_cgroup("/sock_destroy");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto cleanup;
+
+ skel->links.sock_connect = bpf_program__attach_cgroup(
+ skel->progs.sock_connect, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach"))
+ goto cleanup;
+
+ SYS(cleanup, "ip netns add %s", TEST_NS);
+ SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS);
+
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto cleanup;
+
+ if (test__start_subtest("tcp_client"))
+ test_tcp_client(skel);
+ if (test__start_subtest("tcp_server"))
+ test_tcp_server(skel);
+ if (test__start_subtest("udp_client"))
+ test_udp_client(skel);
+ if (test__start_subtest("udp_server"))
+ test_udp_server(skel);
+
+ RUN_TESTS(sock_destroy_prog_fail);
+
+cleanup:
+ if (nstoken)
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del " TEST_NS);
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+ sock_destroy_prog__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
index af87118e748e..7d23166c77af 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
+#define _GNU_SOURCE
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
+#include <sched.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
@@ -15,12 +17,12 @@
#include "network_helpers.h"
#include "cgroup_helpers.h"
#include "test_progs.h"
-#include "bpf_rlimit.h"
#include "test_sock_fields.skel.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
INGRESS_LINUM_IDX,
+ READ_SK_DST_PORT_LINUM_IDX,
__NR_BPF_LINUM_ARRAY_IDX,
};
@@ -43,8 +45,16 @@ static __u64 child_cg_id;
static int linum_map_fd;
static __u32 duration;
-static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
-static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
+static bool create_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return false;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo"))
+ return false;
+
+ return true;
+}
static void print_sk(const struct bpf_sock *sk, const char *prefix)
{
@@ -92,19 +102,24 @@ static void check_result(void)
{
struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
struct bpf_sock srv_sk, cli_sk, listen_sk;
- __u32 ingress_linum, egress_linum;
+ __u32 idx, ingress_linum, egress_linum, linum;
int err;
- err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
- &egress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ idx = EGRESS_LINUM_IDX;
+ err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum);
+ CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
- err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
- &ingress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ idx = INGRESS_LINUM_IDX;
+ err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum);
+ CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)",
"err:%d errno:%d\n", err, errno);
+ idx = READ_SK_DST_PORT_LINUM_IDX;
+ err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum);
+ ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)");
+ ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line");
+
memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));
memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp));
memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk));
@@ -263,7 +278,7 @@ static void test(void)
char buf[DATA_LEN];
/* Prepare listen_fd */
- listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0);
/* start_server() has logged the error details */
if (CHECK_FAIL(listen_fd == -1))
goto done;
@@ -329,10 +344,14 @@ done:
close(listen_fd);
}
-void test_sock_fields(void)
+void serial_test_sock_fields(void)
{
- struct bpf_link *egress_link = NULL, *ingress_link = NULL;
int parent_cg_fd = -1, child_cg_fd = -1;
+ struct bpf_link *link;
+
+ /* Use a dedicated netns to have a fixed listen port */
+ if (!create_netns())
+ return;
/* Create a cgroup, get fd, and join it */
parent_cg_fd = test__join_cgroup(PARENT_CGROUP);
@@ -353,17 +372,20 @@ void test_sock_fields(void)
if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n"))
goto done;
- egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields,
- child_cg_fd);
- if (CHECK(IS_ERR(egress_link), "attach_cgroup(egress)", "err:%ld\n",
- PTR_ERR(egress_link)))
+ link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd);
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)"))
+ goto done;
+ skel->links.egress_read_sock_fields = link;
+
+ link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd);
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)"))
goto done;
+ skel->links.ingress_read_sock_fields = link;
- ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields,
- child_cg_fd);
- if (CHECK(IS_ERR(ingress_link), "attach_cgroup(ingress)", "err:%ld\n",
- PTR_ERR(ingress_link)))
+ link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd);
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port"))
goto done;
+ skel->links.read_sk_dst_port = link;
linum_map_fd = bpf_map__fd(skel->maps.linum_map);
sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt);
@@ -372,11 +394,9 @@ void test_sock_fields(void)
test();
done:
- bpf_link__destroy(egress_link);
- bpf_link__destroy(ingress_link);
test_sock_fields__destroy(skel);
- if (child_cg_fd != -1)
+ if (child_cg_fd >= 0)
close(child_cg_fd);
- if (parent_cg_fd != -1)
+ if (parent_cg_fd >= 0)
close(parent_cg_fd);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
new file mode 100644
index 000000000000..27781df8f2fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Meta
+
+#include <poll.h>
+#include <test_progs.h>
+#include "network_helpers.h"
+#include "sock_iter_batch.skel.h"
+
+#define TEST_NS "sock_iter_batch_netns"
+#define TEST_CHILD_NS "sock_iter_batch_child_netns"
+
+static const int init_batch_size = 16;
+static const int nr_soreuse = 4;
+
+struct iter_out {
+ int idx;
+ __u64 cookie;
+} __packed;
+
+struct sock_count {
+ __u64 cookie;
+ int count;
+};
+
+static int insert(__u64 cookie, struct sock_count counts[], int counts_len)
+{
+ int insert = -1;
+ int i = 0;
+
+ for (; i < counts_len; i++) {
+ if (!counts[i].cookie) {
+ insert = i;
+ } else if (counts[i].cookie == cookie) {
+ insert = i;
+ break;
+ }
+ }
+ if (insert < 0)
+ return insert;
+
+ counts[insert].cookie = cookie;
+ counts[insert].count++;
+
+ return counts[insert].count;
+}
+
+static int read_n(int iter_fd, int n, struct sock_count counts[],
+ int counts_len)
+{
+ struct iter_out out;
+ int nread = 1;
+ int i = 0;
+
+ for (; nread > 0 && (n < 0 || i < n); i++) {
+ nread = read(iter_fd, &out, sizeof(out));
+ if (!nread || !ASSERT_EQ(nread, sizeof(out), "nread"))
+ break;
+ ASSERT_GE(insert(out.cookie, counts, counts_len), 0, "insert");
+ }
+
+ ASSERT_TRUE(n < 0 || i == n, "n < 0 || i == n");
+
+ return i;
+}
+
+static __u64 socket_cookie(int fd)
+{
+ __u64 cookie;
+ socklen_t cookie_len = sizeof(cookie);
+
+ if (!ASSERT_OK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie,
+ &cookie_len), "getsockopt(SO_COOKIE)"))
+ return 0;
+ return cookie;
+}
+
+static bool was_seen(int fd, struct sock_count counts[], int counts_len)
+{
+ __u64 cookie = socket_cookie(fd);
+ int i = 0;
+
+ for (; cookie && i < counts_len; i++)
+ if (cookie == counts[i].cookie)
+ return true;
+
+ return false;
+}
+
+static int get_seen_socket(int *fds, struct sock_count counts[], int n)
+{
+ int i = 0;
+
+ for (; i < n; i++)
+ if (was_seen(fds[i], counts, n))
+ return i;
+ return -1;
+}
+
+static int get_nth_socket(int *fds, int fds_len, struct bpf_link *link, int n)
+{
+ int i, nread, iter_fd;
+ int nth_sock_idx = -1;
+ struct iter_out out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create"))
+ return -1;
+
+ for (; n >= 0; n--) {
+ nread = read(iter_fd, &out, sizeof(out));
+ if (!nread || !ASSERT_GE(nread, 1, "nread"))
+ goto done;
+ }
+
+ for (i = 0; i < fds_len && nth_sock_idx < 0; i++)
+ if (fds[i] >= 0 && socket_cookie(fds[i]) == out.cookie)
+ nth_sock_idx = i;
+done:
+ close(iter_fd);
+ return nth_sock_idx;
+}
+
+static void destroy(int fd)
+{
+ struct sock_iter_batch *skel = NULL;
+ __u64 cookie = socket_cookie(fd);
+ struct bpf_link *link = NULL;
+ int iter_fd = -1;
+ int nread;
+ __u64 out;
+
+ skel = sock_iter_batch__open();
+ if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
+ goto done;
+
+ skel->rodata->destroy_cookie = cookie;
+
+ if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load"))
+ goto done;
+
+ link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
+ goto done;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create"))
+ goto done;
+
+ /* Delete matching socket. */
+ nread = read(iter_fd, &out, sizeof(out));
+ ASSERT_GE(nread, 0, "nread");
+ if (nread)
+ ASSERT_EQ(out, cookie, "cookie matches");
+done:
+ if (iter_fd >= 0)
+ close(iter_fd);
+ bpf_link__destroy(link);
+ sock_iter_batch__destroy(skel);
+ close(fd);
+}
+
+static int get_seen_count(int fd, struct sock_count counts[], int n)
+{
+ __u64 cookie = socket_cookie(fd);
+ int count = 0;
+ int i = 0;
+
+ for (; cookie && !count && i < n; i++)
+ if (cookie == counts[i].cookie)
+ count = counts[i].count;
+
+ return count;
+}
+
+static void check_n_were_seen_once(int *fds, int fds_len, int n,
+ struct sock_count counts[], int counts_len)
+{
+ int seen_once = 0;
+ int seen_cnt;
+ int i = 0;
+
+ for (; i < fds_len; i++) {
+ /* Skip any sockets that were closed or that weren't seen
+ * exactly once.
+ */
+ if (fds[i] < 0)
+ continue;
+ seen_cnt = get_seen_count(fds[i], counts, counts_len);
+ if (seen_cnt && ASSERT_EQ(seen_cnt, 1, "seen_cnt"))
+ seen_once++;
+ }
+
+ ASSERT_EQ(seen_once, n, "seen_once");
+}
+
+static int accept_from_one(struct pollfd *server_poll_fds,
+ int server_poll_fds_len)
+{
+ static const int poll_timeout_ms = 5000; /* 5s */
+ int ret;
+ int i;
+
+ ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms);
+ if (!ASSERT_EQ(ret, 1, "poll"))
+ return -1;
+
+ for (i = 0; i < server_poll_fds_len; i++)
+ if (server_poll_fds[i].revents & POLLIN)
+ return accept(server_poll_fds[i].fd, NULL, NULL);
+
+ return -1;
+}
+
+static int *connect_to_server(int family, int sock_type, const char *addr,
+ __u16 port, int nr_connects, int *server_fds,
+ int server_fds_len)
+{
+ struct pollfd *server_poll_fds = NULL;
+ int *established_socks = NULL;
+ int i;
+
+ server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds));
+ if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds"))
+ return NULL;
+
+ for (i = 0; i < server_fds_len; i++) {
+ server_poll_fds[i].fd = server_fds[i];
+ server_poll_fds[i].events = POLLIN;
+ }
+
+ i = 0;
+
+ established_socks = malloc(sizeof(*established_socks) * nr_connects*2);
+ if (!ASSERT_OK_PTR(established_socks, "established_socks"))
+ goto error;
+
+ while (nr_connects--) {
+ established_socks[i] = connect_to_addr_str(family, sock_type,
+ addr, port, NULL);
+ if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str"))
+ goto error;
+ i++;
+ established_socks[i] = accept_from_one(server_poll_fds,
+ server_fds_len);
+ if (!ASSERT_OK_FD(established_socks[i], "accept_from_one"))
+ goto error;
+ i++;
+ }
+
+ free(server_poll_fds);
+ return established_socks;
+error:
+ free_fds(established_socks, i);
+ free(server_poll_fds);
+ return NULL;
+}
+
+static void remove_seen(int family, int sock_type, const char *addr, __u16 port,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
+ int counts_len, struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through the first socks_len - 1 sockets. */
+ read_n(iter_fd, socks_len - 1, counts, counts_len);
+
+ /* Make sure we saw socks_len - 1 sockets exactly once. */
+ check_n_were_seen_once(socks, socks_len, socks_len - 1, counts,
+ counts_len);
+
+ /* Close a socket we've already seen to remove it from the bucket. */
+ close_idx = get_seen_socket(socks, counts, counts_len);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+ close(socks[close_idx]);
+ socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the last socket wasn't skipped and that there were no
+ * repeats.
+ */
+ check_n_were_seen_once(socks, socks_len, socks_len - 1, counts,
+ counts_len);
+}
+
+static void remove_seen_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Leave one established socket. */
+ read_n(iter_fd, established_socks_len - 1, counts, counts_len);
+
+ /* Close a socket we've already seen to remove it from the bucket. */
+ close_idx = get_nth_socket(established_socks, established_socks_len,
+ link, listen_socks_len + 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+ destroy(established_socks[close_idx]);
+ established_socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the last socket wasn't skipped and that there were no
+ * repeats.
+ */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+}
+
+static void remove_unseen(int family, int sock_type, const char *addr,
+ __u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through the first socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw a socket from fds. */
+ check_n_were_seen_once(socks, socks_len, 1, counts, counts_len);
+
+ /* Close what would be the next socket in the bucket to exercise the
+ * condition where we need to skip past the first cookie we remembered.
+ */
+ close_idx = get_nth_socket(socks, socks_len, link, 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+ close(socks[close_idx]);
+ socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the remaining sockets were seen exactly once and that we
+ * didn't repeat the socket that was already seen.
+ */
+ check_n_were_seen_once(socks, socks_len, socks_len - 1, counts,
+ counts_len);
+}
+
+static void remove_unseen_established(int family, int sock_type,
+ const char *addr, __u16 port,
+ int *listen_socks, int listen_socks_len,
+ int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw one established socks. */
+ check_n_were_seen_once(established_socks, established_socks_len, 1,
+ counts, counts_len);
+
+ /* Close what would be the next socket in the bucket to exercise the
+ * condition where we need to skip past the first cookie we remembered.
+ */
+ close_idx = get_nth_socket(established_socks, established_socks_len,
+ link, listen_socks_len + 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+
+ destroy(established_socks[close_idx]);
+ established_socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the remaining sockets were seen exactly once and that we
+ * didn't repeat the socket that was already seen.
+ */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+}
+
+static void remove_all(int family, int sock_type, const char *addr,
+ __u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx, i;
+
+ /* Iterate through the first socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw a socket from fds. */
+ check_n_were_seen_once(socks, socks_len, 1, counts, counts_len);
+
+ /* Close all remaining sockets to exhaust the list of saved cookies and
+ * exit without putting any sockets into the batch on the next read.
+ */
+ for (i = 0; i < socks_len - 1; i++) {
+ close_idx = get_nth_socket(socks, socks_len, link, 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+ close(socks[close_idx]);
+ socks[close_idx] = -1;
+ }
+
+ /* Make sure there are no more sockets returned */
+ ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
+}
+
+static void remove_all_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int *close_idx = NULL;
+ int i;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw one established socks. */
+ check_n_were_seen_once(established_socks, established_socks_len, 1,
+ counts, counts_len);
+
+ /* Close all remaining sockets to exhaust the list of saved cookies and
+ * exit without putting any sockets into the batch on the next read.
+ */
+ close_idx = malloc(sizeof(int) * (established_socks_len - 1));
+ if (!ASSERT_OK_PTR(close_idx, "close_idx malloc"))
+ return;
+ for (i = 0; i < established_socks_len - 1; i++) {
+ close_idx[i] = get_nth_socket(established_socks,
+ established_socks_len, link,
+ listen_socks_len + i);
+ if (!ASSERT_GE(close_idx[i], 0, "close_idx"))
+ return;
+ }
+
+ for (i = 0; i < established_socks_len - 1; i++) {
+ destroy(established_socks[close_idx[i]]);
+ established_socks[close_idx[i]] = -1;
+ }
+
+ /* Make sure there are no more sockets returned */
+ ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
+ free(close_idx);
+}
+
+static void add_some(int family, int sock_type, const char *addr, __u16 port,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
+ int counts_len, struct bpf_link *link, int iter_fd)
+{
+ int *new_socks = NULL;
+
+ /* Iterate through the first socks_len - 1 sockets. */
+ read_n(iter_fd, socks_len - 1, counts, counts_len);
+
+ /* Make sure we saw socks_len - 1 sockets exactly once. */
+ check_n_were_seen_once(socks, socks_len, socks_len - 1, counts,
+ counts_len);
+
+ /* Double the number of sockets in the bucket. */
+ new_socks = start_reuseport_server(family, sock_type, addr, port, 0,
+ socks_len);
+ if (!ASSERT_OK_PTR(new_socks, "start_reuseport_server"))
+ goto done;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each of the original sockets was seen exactly once. */
+ check_n_were_seen_once(socks, socks_len, socks_len, counts,
+ counts_len);
+done:
+ free_fds(new_socks, socks_len);
+}
+
+static void add_some_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts,
+ int counts_len, struct bpf_link *link,
+ int iter_fd)
+{
+ int *new_socks = NULL;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established_socks_len - 1 sockets. */
+ read_n(iter_fd, established_socks_len - 1, counts, counts_len);
+
+ /* Make sure we saw established_socks_len - 1 sockets exactly once. */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+
+ /* Double the number of established sockets in the bucket. */
+ new_socks = connect_to_server(family, sock_type, addr, port,
+ established_socks_len / 2, listen_socks,
+ listen_socks_len);
+ if (!ASSERT_OK_PTR(new_socks, "connect_to_server"))
+ goto done;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each of the original sockets was seen exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len, counts, counts_len);
+done:
+ free_fds(new_socks, established_socks_len);
+}
+
+static void force_realloc(int family, int sock_type, const char *addr,
+ __u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int *new_socks = NULL;
+
+ /* Iterate through the first socket just to initialize the batch. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Double the number of sockets in the bucket to force a realloc on the
+ * next read.
+ */
+ new_socks = start_reuseport_server(family, sock_type, addr, port, 0,
+ socks_len);
+ if (!ASSERT_OK_PTR(new_socks, "start_reuseport_server"))
+ goto done;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each socket from the first set was seen exactly once. */
+ check_n_were_seen_once(socks, socks_len, socks_len, counts,
+ counts_len);
+done:
+ free_fds(new_socks, socks_len);
+}
+
+static void force_realloc_established(int family, int sock_type,
+ const char *addr, __u16 port,
+ int *listen_socks, int listen_socks_len,
+ int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ /* Iterate through all sockets to trigger a realloc. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each socket was seen exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len, counts, counts_len);
+}
+
+struct test_case {
+ void (*test)(int family, int sock_type, const char *addr, __u16 port,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
+ int counts_len, struct bpf_link *link, int iter_fd);
+ const char *description;
+ int ehash_buckets;
+ int connections;
+ int init_socks;
+ int max_socks;
+ int sock_type;
+ int family;
+};
+
+static struct test_case resume_tests[] = {
+ {
+ .description = "udp: resume after removing a seen socket",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_DGRAM,
+ .family = AF_INET6,
+ .test = remove_seen,
+ },
+ {
+ .description = "udp: resume after removing one unseen socket",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_DGRAM,
+ .family = AF_INET6,
+ .test = remove_unseen,
+ },
+ {
+ .description = "udp: resume after removing all unseen sockets",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_DGRAM,
+ .family = AF_INET6,
+ .test = remove_all,
+ },
+ {
+ .description = "udp: resume after adding a few sockets",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_DGRAM,
+ /* Use AF_INET so that new sockets are added to the head of the
+ * bucket's list.
+ */
+ .family = AF_INET,
+ .test = add_some,
+ },
+ {
+ .description = "udp: force a realloc to occur",
+ .init_socks = init_batch_size,
+ .max_socks = init_batch_size * 2,
+ .sock_type = SOCK_DGRAM,
+ /* Use AF_INET6 so that new sockets are added to the tail of the
+ * bucket's list, needing to be added to the next batch to force
+ * a realloc.
+ */
+ .family = AF_INET6,
+ .test = force_realloc,
+ },
+ {
+ .description = "tcp: resume after removing a seen socket (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_seen,
+ },
+ {
+ .description = "tcp: resume after removing one unseen socket (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_unseen,
+ },
+ {
+ .description = "tcp: resume after removing all unseen sockets (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_all,
+ },
+ {
+ .description = "tcp: resume after adding a few sockets (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ /* Use AF_INET so that new sockets are added to the head of the
+ * bucket's list.
+ */
+ .family = AF_INET,
+ .test = add_some,
+ },
+ {
+ .description = "tcp: force a realloc to occur (listening)",
+ .init_socks = init_batch_size,
+ .max_socks = init_batch_size * 2,
+ .sock_type = SOCK_STREAM,
+ /* Use AF_INET6 so that new sockets are added to the tail of the
+ * bucket's list, needing to be added to the next batch to force
+ * a realloc.
+ */
+ .family = AF_INET6,
+ .test = force_realloc,
+ },
+ {
+ .description = "tcp: resume after removing a seen socket (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_seen_established,
+ },
+ {
+ .description = "tcp: resume after removing one unseen socket (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_unseen_established,
+ },
+ {
+ .description = "tcp: resume after removing all unseen sockets (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_all_established,
+ },
+ {
+ .description = "tcp: resume after adding a few sockets (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = add_some_established,
+ },
+ {
+ .description = "tcp: force a realloc to occur (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ /* Bucket size will need to double when going from listening to
+ * established sockets.
+ */
+ .connections = init_batch_size,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse + (init_batch_size * 2),
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = force_realloc_established,
+ },
+};
+
+static void do_resume_test(struct test_case *tc)
+{
+ struct sock_iter_batch *skel = NULL;
+ struct sock_count *counts = NULL;
+ static const __u16 port = 10001;
+ struct nstoken *nstoken = NULL;
+ struct bpf_link *link = NULL;
+ int *established_fds = NULL;
+ int err, iter_fd = -1;
+ const char *addr;
+ int *fds = NULL;
+
+ if (tc->ehash_buckets) {
+ SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
+ SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d",
+ tc->ehash_buckets);
+ SYS(done, "ip netns add %s", TEST_CHILD_NS);
+ SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS);
+ nstoken = open_netns(TEST_CHILD_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_child_netns"))
+ goto done;
+ }
+
+ counts = calloc(tc->max_socks, sizeof(*counts));
+ if (!ASSERT_OK_PTR(counts, "counts"))
+ goto done;
+ skel = sock_iter_batch__open();
+ if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
+ goto done;
+
+ /* Prepare a bucket of sockets in the kernel hashtable */
+ addr = tc->family == AF_INET6 ? "::1" : "127.0.0.1";
+ fds = start_reuseport_server(tc->family, tc->sock_type, addr, port, 0,
+ tc->init_socks);
+ if (!ASSERT_OK_PTR(fds, "start_reuseport_server"))
+ goto done;
+ if (tc->connections) {
+ established_fds = connect_to_server(tc->family, tc->sock_type,
+ addr, port,
+ tc->connections, fds,
+ tc->init_socks);
+ if (!ASSERT_OK_PTR(established_fds, "connect_to_server"))
+ goto done;
+ }
+ skel->rodata->ports[0] = 0;
+ skel->rodata->ports[1] = 0;
+ skel->rodata->sf = tc->family;
+ skel->rodata->ss = 0;
+
+ err = sock_iter_batch__load(skel);
+ if (!ASSERT_OK(err, "sock_iter_batch__load"))
+ goto done;
+
+ link = bpf_program__attach_iter(tc->sock_type == SOCK_STREAM ?
+ skel->progs.iter_tcp_soreuse :
+ skel->progs.iter_udp_soreuse,
+ NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
+ goto done;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create"))
+ goto done;
+
+ tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks,
+ established_fds, tc->connections*2, counts, tc->max_socks,
+ link, iter_fd);
+done:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
+ SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0");
+ free(counts);
+ free_fds(fds, tc->init_socks);
+ free_fds(established_fds, tc->connections*2);
+ if (iter_fd >= 0)
+ close(iter_fd);
+ bpf_link__destroy(link);
+ sock_iter_batch__destroy(skel);
+}
+
+static void do_resume_tests(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(resume_tests); i++) {
+ if (test__start_subtest(resume_tests[i].description)) {
+ do_resume_test(&resume_tests[i]);
+ }
+ }
+}
+
+static void do_test(int sock_type, bool onebyone)
+{
+ int err, i, nread, to_read, total_read, iter_fd = -1;
+ struct iter_out outputs[nr_soreuse];
+ struct bpf_link *link = NULL;
+ struct sock_iter_batch *skel;
+ int first_idx, second_idx;
+ int *fds[2] = {};
+
+ skel = sock_iter_batch__open();
+ if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
+ return;
+
+ /* Prepare 2 buckets of sockets in the kernel hashtable */
+ for (i = 0; i < ARRAY_SIZE(fds); i++) {
+ int local_port;
+
+ fds[i] = start_reuseport_server(AF_INET6, sock_type, "::1", 0, 0,
+ nr_soreuse);
+ if (!ASSERT_OK_PTR(fds[i], "start_reuseport_server"))
+ goto done;
+ local_port = get_socket_local_port(*fds[i]);
+ if (!ASSERT_GE(local_port, 0, "get_socket_local_port"))
+ goto done;
+ skel->rodata->ports[i] = ntohs(local_port);
+ }
+ skel->rodata->sf = AF_INET6;
+ if (sock_type == SOCK_STREAM)
+ skel->rodata->ss = TCP_LISTEN;
+
+ err = sock_iter_batch__load(skel);
+ if (!ASSERT_OK(err, "sock_iter_batch__load"))
+ goto done;
+
+ link = bpf_program__attach_iter(sock_type == SOCK_STREAM ?
+ skel->progs.iter_tcp_soreuse :
+ skel->progs.iter_udp_soreuse,
+ NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
+ goto done;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create"))
+ goto done;
+
+ /* Test reading a bucket (either from fds[0] or fds[1]).
+ * Only read "nr_soreuse - 1" number of sockets
+ * from a bucket and leave one socket out from
+ * that bucket on purpose.
+ */
+ to_read = (nr_soreuse - 1) * sizeof(*outputs);
+ total_read = 0;
+ first_idx = -1;
+ do {
+ nread = read(iter_fd, outputs, onebyone ? sizeof(*outputs) : to_read);
+ if (nread <= 0 || nread % sizeof(*outputs))
+ break;
+ total_read += nread;
+
+ if (first_idx == -1)
+ first_idx = outputs[0].idx;
+ for (i = 0; i < nread / sizeof(*outputs); i++)
+ ASSERT_EQ(outputs[i].idx, first_idx, "first_idx");
+ } while (total_read < to_read);
+ ASSERT_EQ(nread, onebyone ? sizeof(*outputs) : to_read, "nread");
+ ASSERT_EQ(total_read, to_read, "total_read");
+
+ free_fds(fds[first_idx], nr_soreuse);
+ fds[first_idx] = NULL;
+
+ /* Read the "whole" second bucket */
+ to_read = nr_soreuse * sizeof(*outputs);
+ total_read = 0;
+ second_idx = !first_idx;
+ do {
+ nread = read(iter_fd, outputs, onebyone ? sizeof(*outputs) : to_read);
+ if (nread <= 0 || nread % sizeof(*outputs))
+ break;
+ total_read += nread;
+
+ for (i = 0; i < nread / sizeof(*outputs); i++)
+ ASSERT_EQ(outputs[i].idx, second_idx, "second_idx");
+ } while (total_read <= to_read);
+ ASSERT_EQ(nread, 0, "nread");
+ /* Both so_reuseport ports should be in different buckets, so
+ * total_read must equal to the expected to_read.
+ *
+ * For a very unlikely case, both ports collide at the same bucket,
+ * the bucket offset (i.e. 3) will be skipped and it cannot
+ * expect the to_read number of bytes.
+ */
+ if (skel->bss->bucket[0] != skel->bss->bucket[1])
+ ASSERT_EQ(total_read, to_read, "total_read");
+
+done:
+ for (i = 0; i < ARRAY_SIZE(fds); i++)
+ free_fds(fds[i], nr_soreuse);
+ if (iter_fd < 0)
+ close(iter_fd);
+ bpf_link__destroy(link);
+ sock_iter_batch__destroy(skel);
+}
+
+void test_sock_iter_batch(void)
+{
+ struct nstoken *nstoken = NULL;
+
+ SYS_NOFAIL("ip netns del " TEST_NS);
+ SYS(done, "ip netns add %s", TEST_NS);
+ SYS(done, "ip -net %s link set dev lo up", TEST_NS);
+
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto done;
+
+ if (test__start_subtest("tcp")) {
+ do_test(SOCK_STREAM, true);
+ do_test(SOCK_STREAM, false);
+ }
+ if (test__start_subtest("udp")) {
+ do_test(SOCK_DGRAM, true);
+ do_test(SOCK_DGRAM, false);
+ }
+ do_resume_tests();
+ close_netns(nstoken);
+
+done:
+ SYS_NOFAIL("ip netns del " TEST_NS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c
new file mode 100644
index 000000000000..788135c9c673
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+#define TEST_NS "sock_post_bind"
+
+static char bpf_log_buf[4096];
+
+static struct sock_post_bind_test {
+ const char *descr;
+ /* BPF prog properties */
+ const struct bpf_insn insns[64];
+ enum bpf_attach_type attach_type;
+ enum bpf_attach_type expected_attach_type;
+ /* Socket properties */
+ int domain;
+ int type;
+ /* Endpoint to bind() to */
+ const char *ip;
+ unsigned short port;
+ unsigned short port_retry;
+
+ /* Expected test result */
+ enum {
+ ATTACH_REJECT,
+ BIND_REJECT,
+ SUCCESS,
+ RETRY_SUCCESS,
+ RETRY_REJECT
+ } result;
+} tests[] = {
+ {
+ .descr = "attach type mismatch bind4 vs bind6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch bind6 vs bind4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch default vs bind4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = 0,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "attach type mismatch bind6 vs sock_create",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "bind4 reject all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "0.0.0.0",
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind6 reject all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::",
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind6 deny specific IP & port",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip6[3])),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x00000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::1",
+ .port = 8193,
+ .result = BIND_REJECT,
+ },
+ {
+ .descr = "bind4 allow specific IP & port",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "bind4 deny specific IP & port of TCP, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .port_retry = 5000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind4 deny specific IP & port of UDP, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_DGRAM,
+ .ip = "127.0.0.1",
+ .port = 4098,
+ .port_retry = 5000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind6 deny specific IP & port, and retry",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip6[3])),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x00000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::1",
+ .port = 8193,
+ .port_retry = 9000,
+ .result = RETRY_SUCCESS,
+ },
+ {
+ .descr = "bind4 allow all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .ip = "0.0.0.0",
+ .result = SUCCESS,
+ },
+ {
+ .descr = "bind6 allow all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .attach_type = BPF_CGROUP_INET6_POST_BIND,
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .ip = "::",
+ .result = SUCCESS,
+ },
+};
+
+static int load_prog(const struct bpf_insn *insns,
+ enum bpf_attach_type expected_attach_type)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = expected_attach_type,
+ .log_level = 2,
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
+
+ for (;
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
+ }
+ insns_cnt++;
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns,
+ insns_cnt, &opts);
+ if (fd < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return fd;
+}
+
+static int bind_sock(int domain, int type, const char *ip,
+ unsigned short port, unsigned short port_retry)
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr_in *addr4;
+ int sockfd = -1;
+ socklen_t len;
+ int res = SUCCESS;
+
+ sockfd = socket(domain, type, 0);
+ if (sockfd < 0)
+ goto err;
+
+ memset(&addr, 0, sizeof(addr));
+
+ if (domain == AF_INET) {
+ len = sizeof(struct sockaddr_in);
+ addr4 = (struct sockaddr_in *)&addr;
+ addr4->sin_family = domain;
+ addr4->sin_port = htons(port);
+ if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1)
+ goto err;
+ } else if (domain == AF_INET6) {
+ len = sizeof(struct sockaddr_in6);
+ addr6 = (struct sockaddr_in6 *)&addr;
+ addr6->sin6_family = domain;
+ addr6->sin6_port = htons(port);
+ if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1)
+ goto err;
+ } else {
+ goto err;
+ }
+
+ if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
+ /* sys_bind() may fail for different reasons, errno has to be
+ * checked to confirm that BPF program rejected it.
+ */
+ if (errno != EPERM)
+ goto err;
+ if (port_retry)
+ goto retry;
+ res = BIND_REJECT;
+ goto out;
+ }
+
+ goto out;
+retry:
+ if (domain == AF_INET)
+ addr4->sin_port = htons(port_retry);
+ else
+ addr6->sin6_port = htons(port_retry);
+ if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) {
+ if (errno != EPERM)
+ goto err;
+ res = RETRY_REJECT;
+ } else {
+ res = RETRY_SUCCESS;
+ }
+ goto out;
+err:
+ res = -1;
+out:
+ close(sockfd);
+ return res;
+}
+
+static int run_test(int cgroup_fd, struct sock_post_bind_test *test)
+{
+ int err, prog_fd, res, ret = 0;
+
+ prog_fd = load_prog(test->insns, test->expected_attach_type);
+ if (prog_fd < 0)
+ goto err;
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ if (err < 0) {
+ if (test->result == ATTACH_REJECT)
+ goto out;
+ else
+ goto err;
+ }
+
+ res = bind_sock(test->domain, test->type, test->ip, test->port,
+ test->port_retry);
+ if (res > 0 && test->result == res)
+ goto out;
+err:
+ ret = -1;
+out:
+ /* Detaching w/o checking return code: best effort attempt. */
+ if (prog_fd != -1)
+ bpf_prog_detach(cgroup_fd, test->attach_type);
+ close(prog_fd);
+ return ret;
+}
+
+void test_sock_post_bind(void)
+{
+ struct netns_obj *ns;
+ int cgroup_fd;
+ int i;
+
+ cgroup_fd = test__join_cgroup("/post_bind");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup"))
+ return;
+
+ ns = netns_new(TEST_NS, true);
+ if (!ASSERT_OK_PTR(ns, "netns_new"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
+ ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr);
+ }
+
+cleanup:
+ netns_free(ns);
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/socket_cookie.c b/tools/testing/selftests/bpf/prog_tests/socket_cookie.c
new file mode 100644
index 000000000000..232db28dde18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/socket_cookie.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Google LLC.
+// Copyright (c) 2018 Facebook
+
+#include <test_progs.h>
+#include "socket_cookie_prog.skel.h"
+#include "network_helpers.h"
+
+static int duration;
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u32 cookie_value;
+};
+
+void test_socket_cookie(void)
+{
+ int server_fd = 0, client_fd = 0, cgroup_fd = 0, err = 0;
+ socklen_t addr_len = sizeof(struct sockaddr_in6);
+ struct socket_cookie_prog *skel;
+ __u32 cookie_expected_value;
+ struct sockaddr_in6 addr;
+ struct socket_cookie val;
+
+ skel = socket_cookie_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ cgroup_fd = test__join_cgroup("/socket_cookie");
+ if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n"))
+ goto out;
+
+ skel->links.set_cookie = bpf_program__attach_cgroup(
+ skel->progs.set_cookie, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
+ goto close_cgroup_fd;
+
+ skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
+ skel->progs.update_cookie_sockops, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
+ goto close_cgroup_fd;
+
+ skel->links.update_cookie_tracing = bpf_program__attach(
+ skel->progs.update_cookie_tracing);
+ if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
+ goto close_cgroup_fd;
+
+ server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno))
+ goto close_cgroup_fd;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno))
+ goto close_server_fd;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
+ &client_fd, &val);
+ if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
+ goto close_client_fd;
+
+ err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
+ if (!ASSERT_OK(err, "getsockname"))
+ goto close_client_fd;
+
+ cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
+ ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
+
+close_client_fd:
+ close(client_fd);
+close_server_fd:
+ close(server_fd);
+close_cgroup_fd:
+ close(cgroup_fd);
+out:
+ socket_cookie_prog__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
new file mode 100644
index 000000000000..0d59503a0c73
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SOCKET_HELPERS__
+#define __SOCKET_HELPERS__
+
+#include <sys/un.h>
+#include <linux/vm_sockets.h>
+
+/* include/linux/net.h */
+#define SOCK_TYPE_MASK 0xf
+
+#define IO_TIMEOUT_SEC 30
+#define MAX_STRERR_LEN 256
+
+/* workaround for older vm_sockets.h */
+#ifndef VMADDR_CID_LOCAL
+#define VMADDR_CID_LOCAL 1
+#endif
+
+/* include/linux/compiler_types.h */
+#if __STDC_VERSION__ < 202311L && !defined(auto)
+# define auto __auto_type
+#endif
+
+/* include/linux/cleanup.h */
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
+
+#define take_fd(fd) __get_and_null(fd, -EBADF)
+
+/* Wrappers that fail the test on error and report it. */
+
+#define _FAIL(errnum, fmt...) \
+ ({ \
+ error_at_line(0, (errnum), __func__, __LINE__, fmt); \
+ CHECK_FAIL(true); \
+ })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg) \
+ ({ \
+ char __buf[MAX_STRERR_LEN]; \
+ libbpf_strerror((err), __buf, sizeof(__buf)); \
+ FAIL("%s: %s", (msg), __buf); \
+ })
+
+
+#define xaccept_nonblock(fd, addr, len) \
+ ({ \
+ int __ret = \
+ accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("accept"); \
+ __ret; \
+ })
+
+#define xbind(fd, addr, len) \
+ ({ \
+ int __ret = bind((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("bind"); \
+ __ret; \
+ })
+
+#define xclose(fd) \
+ ({ \
+ int __ret = close((fd)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("close"); \
+ __ret; \
+ })
+
+#define xconnect(fd, addr, len) \
+ ({ \
+ int __ret = connect((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("connect"); \
+ __ret; \
+ })
+
+#define xgetsockname(fd, addr, len) \
+ ({ \
+ int __ret = getsockname((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockname"); \
+ __ret; \
+ })
+
+#define xgetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = getsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xlisten(fd, backlog) \
+ ({ \
+ int __ret = listen((fd), (backlog)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("listen"); \
+ __ret; \
+ })
+
+#define xsetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = setsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("setsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xsend(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = send((fd), (buf), (len), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("send"); \
+ __ret; \
+ })
+
+#define xrecv_nonblock(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
+ IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("recv"); \
+ __ret; \
+ })
+
+#define xsocket(family, sotype, flags) \
+ ({ \
+ int __ret = socket(family, sotype, flags); \
+ if (__ret == -1) \
+ FAIL_ERRNO("socket"); \
+ __ret; \
+ })
+
+static inline void close_fd(int *fd)
+{
+ if (*fd >= 0)
+ xclose(*fd);
+}
+
+#define __close_fd __attribute__((cleanup(close_fd)))
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+ return (struct sockaddr *)ss;
+}
+
+static inline void init_addr_loopback4(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = 0;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ *len = sizeof(*addr4);
+}
+
+static inline void init_addr_loopback6(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = 0;
+ addr6->sin6_addr = in6addr_loopback;
+ *len = sizeof(*addr6);
+}
+
+static inline void init_addr_loopback_unix(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_un *addr = memset(ss, 0, sizeof(*ss));
+
+ addr->sun_family = AF_UNIX;
+ *len = sizeof(sa_family_t);
+}
+
+static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
+
+ addr->svm_family = AF_VSOCK;
+ addr->svm_port = VMADDR_PORT_ANY;
+ addr->svm_cid = VMADDR_CID_LOCAL;
+ *len = sizeof(*addr);
+}
+
+static inline void init_addr_loopback(int family, struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ switch (family) {
+ case AF_INET:
+ init_addr_loopback4(ss, len);
+ return;
+ case AF_INET6:
+ init_addr_loopback6(ss, len);
+ return;
+ case AF_UNIX:
+ init_addr_loopback_unix(ss, len);
+ return;
+ case AF_VSOCK:
+ init_addr_loopback_vsock(ss, len);
+ return;
+ default:
+ FAIL("unsupported address family %d", family);
+ }
+}
+
+static inline int enable_reuseport(int s, int progfd)
+{
+ int err, one = 1;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+ if (err)
+ return -1;
+ err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+ sizeof(progfd));
+ if (err)
+ return -1;
+
+ return 0;
+}
+
+static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = 0;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return -1;
+
+ if (progfd >= 0)
+ enable_reuseport(s, progfd);
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ if (sotype & SOCK_DGRAM)
+ return s;
+
+ err = xlisten(s, SOMAXCONN);
+ if (err)
+ goto close;
+
+ return s;
+close:
+ xclose(s);
+ return -1;
+}
+
+static inline int socket_loopback(int family, int sotype)
+{
+ return socket_loopback_reuseport(family, sotype, -1);
+}
+
+static inline int poll_connect(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set wfds;
+ int r, eval;
+ socklen_t esize = sizeof(eval);
+
+ FD_ZERO(&wfds);
+ FD_SET(fd, &wfds);
+
+ r = select(fd + 1, NULL, &wfds, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+ if (r != 1)
+ return -1;
+
+ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
+ return -1;
+ if (eval != 0) {
+ errno = eval;
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int poll_read(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set rfds;
+ int r;
+
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+
+ r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+
+ return r == 1 ? 0 : -1;
+}
+
+static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return accept(fd, addr, len);
+}
+
+static inline int recv_timeout(int fd, void *buf, size_t len, int flags,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return recv(fd, buf, len, flags);
+}
+
+
+static inline int create_pair(int family, int sotype, int *p0, int *p1)
+{
+ __close_fd int s, c = -1, p = -1;
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return s;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ return c;
+
+ init_addr_loopback(family, &addr, &len);
+ err = xbind(c, sockaddr(&addr), len);
+ if (err)
+ return err;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ return err;
+
+ err = connect(c, sockaddr(&addr), len);
+ if (err) {
+ if (errno != EINPROGRESS) {
+ FAIL_ERRNO("connect");
+ return err;
+ }
+
+ err = poll_connect(c, IO_TIMEOUT_SEC);
+ if (err) {
+ FAIL_ERRNO("poll_connect");
+ return err;
+ }
+ }
+
+ switch (sotype & SOCK_TYPE_MASK) {
+ case SOCK_DGRAM:
+ err = xgetsockname(c, sockaddr(&addr), &len);
+ if (err)
+ return err;
+
+ err = xconnect(s, sockaddr(&addr), len);
+ if (err)
+ return err;
+
+ *p0 = take_fd(s);
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0)
+ return p;
+
+ *p0 = take_fd(p);
+ break;
+ default:
+ FAIL("Unsupported socket type %#x", sotype);
+ return -EOPNOTSUPP;
+ }
+
+ *p1 = take_fd(c);
+ return 0;
+}
+
+static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1,
+ int *p0, int *p1)
+{
+ int err;
+
+ err = create_pair(family, sotype, c0, p0);
+ if (err)
+ return err;
+
+ err = create_pair(family, sotype, c1, p1);
+ if (err) {
+ close(*c0);
+ close(*p0);
+ }
+
+ return err;
+}
+
+static inline const char *socket_kind_to_str(int sock_fd)
+{
+ socklen_t opt_len;
+ int domain, type;
+
+ opt_len = sizeof(domain);
+ if (getsockopt(sock_fd, SOL_SOCKET, SO_DOMAIN, &domain, &opt_len))
+ FAIL_ERRNO("getsockopt(SO_DOMAIN)");
+
+ opt_len = sizeof(type);
+ if (getsockopt(sock_fd, SOL_SOCKET, SO_TYPE, &type, &opt_len))
+ FAIL_ERRNO("getsockopt(SO_TYPE)");
+
+ switch (domain) {
+ case AF_INET:
+ switch (type) {
+ case SOCK_STREAM:
+ return "tcp4";
+ case SOCK_DGRAM:
+ return "udp4";
+ }
+ break;
+ case AF_INET6:
+ switch (type) {
+ case SOCK_STREAM:
+ return "tcp6";
+ case SOCK_DGRAM:
+ return "udp6";
+ }
+ break;
+ case AF_UNIX:
+ switch (type) {
+ case SOCK_STREAM:
+ return "u_str";
+ case SOCK_DGRAM:
+ return "u_dgr";
+ case SOCK_SEQPACKET:
+ return "u_seq";
+ }
+ break;
+ case AF_VSOCK:
+ switch (type) {
+ case SOCK_STREAM:
+ return "v_str";
+ case SOCK_DGRAM:
+ return "v_dgr";
+ case SOCK_SEQPACKET:
+ return "v_seq";
+ }
+ break;
+ }
+
+ return "???";
+}
+
+#endif // __SOCKET_HELPERS__
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 85f73261fab0..1e3e4392dcca 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -1,13 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include <error.h>
+#include <netinet/tcp.h>
+#include <sys/epoll.h>
#include "test_progs.h"
#include "test_skmsg_load_helpers.skel.h"
#include "test_sockmap_update.skel.h"
#include "test_sockmap_invalid_update.skel.h"
+#include "test_sockmap_skb_verdict_attach.skel.h"
+#include "test_sockmap_progs_query.skel.h"
+#include "test_sockmap_pass_prog.skel.h"
+#include "test_sockmap_drop_prog.skel.h"
+#include "test_sockmap_change_tail.skel.h"
#include "bpf_iter_sockmap.skel.h"
+#include "sockmap_helpers.h"
+
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
#define TCP_REPAIR_ON 1
@@ -24,21 +33,21 @@ static int connected_socket_v4(void)
int s, repair, err;
s = socket(AF_INET, SOCK_STREAM, 0);
- if (CHECK_FAIL(s == -1))
+ if (!ASSERT_GE(s, 0, "socket"))
goto error;
repair = TCP_REPAIR_ON;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
goto error;
err = connect(s, (struct sockaddr *)&addr, len);
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "connect"))
goto error;
repair = TCP_REPAIR_OFF_NO_WP;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
- if (CHECK_FAIL(err))
+ if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
goto error;
return s;
@@ -51,7 +60,7 @@ error:
static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
{
__u32 i, max_entries = bpf_map__max_entries(src);
- int err, duration = 0, src_fd, dst_fd;
+ int err, src_fd, dst_fd;
src_fd = bpf_map__fd(src);
dst_fd = bpf_map__fd(dst);
@@ -62,20 +71,18 @@ static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
if (err && errno == ENOENT) {
err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
- CHECK(!err, "map_lookup_elem(dst)", "element %u not deleted\n", i);
- CHECK(err && errno != ENOENT, "map_lookup_elem(dst)", "%s\n",
- strerror(errno));
+ ASSERT_ERR(err, "map_lookup_elem(dst)");
+ ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
continue;
}
- if (CHECK(err, "lookup_elem(src)", "%s\n", strerror(errno)))
+ if (!ASSERT_OK(err, "lookup_elem(src)"))
continue;
err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
- if (CHECK(err, "lookup_elem(dst)", "%s\n", strerror(errno)))
+ if (!ASSERT_OK(err, "lookup_elem(dst)"))
continue;
- CHECK(dst_cookie != src_cookie, "cookie mismatch",
- "%llu != %llu (pos %u)\n", dst_cookie, src_cookie, i);
+ ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
}
}
@@ -86,71 +93,157 @@ static void test_sockmap_create_update_free(enum bpf_map_type map_type)
int s, map, err;
s = connected_socket_v4();
- if (CHECK_FAIL(s == -1))
+ if (!ASSERT_GE(s, 0, "connected_socket_v4"))
return;
- map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
- if (CHECK_FAIL(map == -1)) {
- perror("bpf_create_map");
+ map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
+ if (!ASSERT_GE(map, 0, "bpf_map_create"))
goto out;
- }
err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
- if (CHECK_FAIL(err)) {
- perror("bpf_map_update");
+ if (!ASSERT_OK(err, "bpf_map_update"))
goto out;
- }
out:
close(map);
close(s);
}
+static void test_sockmap_vsock_delete_on_close(void)
+{
+ int map, c, p, err, zero = 0;
+
+ map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int),
+ sizeof(int), 1, NULL);
+ if (!ASSERT_OK_FD(map, "bpf_map_create"))
+ return;
+
+ err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p);
+ if (!ASSERT_OK(err, "create_pair"))
+ goto close_map;
+
+ if (xbpf_map_update_elem(map, &zero, &c, BPF_NOEXIST))
+ goto close_socks;
+
+ xclose(c);
+ xclose(p);
+
+ err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p);
+ if (!ASSERT_OK(err, "create_pair"))
+ goto close_map;
+
+ err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST);
+ ASSERT_OK(err, "after close(), bpf_map_update");
+
+close_socks:
+ xclose(c);
+ xclose(p);
+close_map:
+ xclose(map);
+}
+
static void test_skmsg_helpers(enum bpf_map_type map_type)
{
struct test_skmsg_load_helpers *skel;
int err, map, verdict;
skel = test_skmsg_load_helpers__open_and_load();
- if (CHECK_FAIL(!skel)) {
- perror("test_skmsg_load_helpers__open_and_load");
+ if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
return;
- }
verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
map = bpf_map__fd(skel->maps.sock_map);
err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
- if (CHECK_FAIL(err)) {
- perror("bpf_prog_attach");
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
- }
err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
- if (CHECK_FAIL(err)) {
- perror("bpf_prog_detach2");
+ if (!ASSERT_OK(err, "bpf_prog_detach2"))
+ goto out;
+out:
+ test_skmsg_load_helpers__destroy(skel);
+}
+
+static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
+{
+ struct bpf_program *prog, *prog_clone, *prog_clone2;
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
+ struct test_skmsg_load_helpers *skel;
+ struct bpf_link *link, *link2;
+ int err, map;
+
+ skel = test_skmsg_load_helpers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
+ return;
+
+ prog = skel->progs.prog_msg_verdict;
+ prog_clone = skel->progs.prog_msg_verdict_clone;
+ prog_clone2 = skel->progs.prog_msg_verdict_clone2;
+ map = bpf_map__fd(skel->maps.sock_map);
+
+ link = bpf_program__attach_sockmap(prog, map);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
+ goto out;
+
+ /* Fail since bpf_link for the same prog has been created. */
+ err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_ERR(err, "bpf_prog_attach"))
+ goto out;
+
+ /* Fail since bpf_link for the same prog type has been created. */
+ link2 = bpf_program__attach_sockmap(prog_clone, map);
+ if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) {
+ bpf_link__detach(link2);
goto out;
}
+
+ err = bpf_link__update_program(link, prog_clone);
+ if (!ASSERT_OK(err, "bpf_link__update_program"))
+ goto out;
+
+ /* Fail since a prog with different type attempts to do update. */
+ err = bpf_link__update_program(link, skel->progs.prog_skb_verdict);
+ if (!ASSERT_ERR(err, "bpf_link__update_program"))
+ goto out;
+
+ /* Fail since the old prog does not match the one in the kernel. */
+ opts.old_prog_fd = bpf_program__fd(prog_clone2);
+ opts.flags = BPF_F_REPLACE;
+ err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
+ if (!ASSERT_ERR(err, "bpf_link_update"))
+ goto out;
+
+ opts.old_prog_fd = bpf_program__fd(prog_clone);
+ opts.flags = BPF_F_REPLACE;
+ err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
+ if (!ASSERT_OK(err, "bpf_link_update"))
+ goto out;
out:
+ bpf_link__detach(link);
test_skmsg_load_helpers__destroy(skel);
}
static void test_sockmap_update(enum bpf_map_type map_type)
{
- struct bpf_prog_test_run_attr tattr;
- int err, prog, src, duration = 0;
+ int err, prog, src;
struct test_sockmap_update *skel;
struct bpf_map *dst_map;
const __u32 zero = 0;
char dummy[14] = {0};
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = dummy,
+ .data_size_in = sizeof(dummy),
+ .repeat = 1,
+ );
__s64 sk;
sk = connected_socket_v4();
- if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n"))
+ if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
return;
skel = test_sockmap_update__open_and_load();
- if (CHECK(!skel, "open_and_load", "cannot load skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
goto close_sk;
prog = bpf_program__fd(skel->progs.copy_sock_map);
@@ -161,19 +254,13 @@ static void test_sockmap_update(enum bpf_map_type map_type)
dst_map = skel->maps.dst_sock_hash;
err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
- if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))
+ if (!ASSERT_OK(err, "update_elem(src)"))
goto out;
- tattr = (struct bpf_prog_test_run_attr){
- .prog_fd = prog,
- .repeat = 1,
- .data_in = dummy,
- .data_size_in = sizeof(dummy),
- };
-
- err = bpf_prog_test_run_xattr(&tattr);
- if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run",
- "errno=%u retval=%u\n", errno, tattr.retval))
+ err = bpf_prog_test_run_opts(prog, &topts);
+ if (!ASSERT_OK(err, "test_run"))
+ goto out;
+ if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
goto out;
compare_cookies(skel->maps.src, dst_map);
@@ -187,17 +274,16 @@ close_sk:
static void test_sockmap_invalid_update(void)
{
struct test_sockmap_invalid_update *skel;
- int duration = 0;
skel = test_sockmap_invalid_update__open_and_load();
- if (CHECK(skel, "open_and_load", "verifier accepted map_update\n"))
+ if (!ASSERT_NULL(skel, "open_and_load"))
test_sockmap_invalid_update__destroy(skel);
}
static void test_sockmap_copy(enum bpf_map_type map_type)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
- int err, len, src_fd, iter_fd, duration = 0;
+ int err, len, src_fd, iter_fd;
union bpf_iter_link_info linfo = {};
__u32 i, num_sockets, num_elems;
struct bpf_iter_sockmap *skel;
@@ -207,7 +293,7 @@ static void test_sockmap_copy(enum bpf_map_type map_type)
char buf[64];
skel = bpf_iter_sockmap__open_and_load();
- if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
return;
if (map_type == BPF_MAP_TYPE_SOCKMAP) {
@@ -221,7 +307,7 @@ static void test_sockmap_copy(enum bpf_map_type map_type)
}
sock_fd = calloc(num_sockets, sizeof(*sock_fd));
- if (CHECK(!sock_fd, "calloc(sock_fd)", "failed to allocate\n"))
+ if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
goto out;
for (i = 0; i < num_sockets; i++)
@@ -231,11 +317,11 @@ static void test_sockmap_copy(enum bpf_map_type map_type)
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = connected_socket_v4();
- if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n"))
+ if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
goto out;
err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
- if (CHECK(err, "map_update", "failed: %s\n", strerror(errno)))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -243,26 +329,24 @@ static void test_sockmap_copy(enum bpf_map_type map_type)
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.copy, &opts);
- if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
- if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno)))
+ if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
- if (CHECK(skel->bss->elems != num_elems, "elems", "got %u expected %u\n",
- skel->bss->elems, num_elems))
+ if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
goto close_iter;
- if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n",
- skel->bss->socks, num_sockets))
+ if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
goto close_iter;
compare_cookies(src, skel->maps.dst);
@@ -280,12 +364,692 @@ out:
bpf_iter_sockmap__destroy(skel);
}
+static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
+ enum bpf_attach_type second)
+{
+ struct test_sockmap_skb_verdict_attach *skel;
+ int err, map, verdict;
+
+ skel = test_sockmap_skb_verdict_attach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ map = bpf_map__fd(skel->maps.sock_map);
+
+ err = bpf_prog_attach(verdict, map, first, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ err = bpf_prog_attach(verdict, map, second, 0);
+ ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
+
+ err = bpf_prog_detach2(verdict, map, first);
+ if (!ASSERT_OK(err, "bpf_prog_detach2"))
+ goto out;
+out:
+ test_sockmap_skb_verdict_attach__destroy(skel);
+}
+
+static void test_sockmap_skb_verdict_attach_with_link(void)
+{
+ struct test_sockmap_skb_verdict_attach *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err, map;
+
+ skel = test_sockmap_skb_verdict_attach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+ prog = skel->progs.prog_skb_verdict;
+ map = bpf_map__fd(skel->maps.sock_map);
+ link = bpf_program__attach_sockmap(prog, map);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
+ goto out;
+
+ bpf_link__detach(link);
+
+ err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ /* Fail since attaching with the same prog/map has been done. */
+ link = bpf_program__attach_sockmap(prog, map);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap"))
+ bpf_link__detach(link);
+
+ err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
+ if (!ASSERT_OK(err, "bpf_prog_detach2"))
+ goto out;
+out:
+ test_sockmap_skb_verdict_attach__destroy(skel);
+}
+
+static __u32 query_prog_id(int prog_fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
+ !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
+ return 0;
+
+ return info.id;
+}
+
+static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
+{
+ struct test_sockmap_progs_query *skel;
+ int err, map_fd, verdict_fd;
+ __u32 attach_flags = 0;
+ __u32 prog_ids[3] = {};
+ __u32 prog_cnt = 3;
+
+ skel = test_sockmap_progs_query__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
+ return;
+
+ map_fd = bpf_map__fd(skel->maps.sock_map);
+
+ if (attach_type == BPF_SK_MSG_VERDICT)
+ verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
+ else
+ verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
+
+ err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
+ &attach_flags, prog_ids, &prog_cnt);
+ ASSERT_OK(err, "bpf_prog_query failed");
+ ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
+ ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
+
+ err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach failed"))
+ goto out;
+
+ prog_cnt = 1;
+ err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
+ &attach_flags, prog_ids, &prog_cnt);
+ ASSERT_OK(err, "bpf_prog_query failed");
+ ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
+ ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
+ ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
+ "wrong prog_ids on query");
+
+ bpf_prog_detach2(verdict_fd, map_fd, attach_type);
+out:
+ test_sockmap_progs_query__destroy(skel);
+}
+
+#define MAX_EVENTS 10
+static void test_sockmap_skb_verdict_shutdown(void)
+{
+ int n, err, map, verdict, c1 = -1, p1 = -1;
+ struct epoll_event ev, events[MAX_EVENTS];
+ struct test_sockmap_pass_prog *skel;
+ int zero = 0;
+ int epollfd;
+ char b;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
+ if (err < 0)
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (err < 0)
+ goto out_close;
+
+ shutdown(p1, SHUT_WR);
+
+ ev.events = EPOLLIN;
+ ev.data.fd = c1;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
+ goto out_close;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
+ goto out_close;
+ err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
+ if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
+ goto out_close;
+
+ n = recv(c1, &b, 1, MSG_DONTWAIT);
+ ASSERT_EQ(n, 0, "recv(fin)");
+out_close:
+ close(c1);
+ close(p1);
+out:
+ test_sockmap_pass_prog__destroy(skel);
+}
+
+
+static void test_sockmap_skb_verdict_fionread(bool pass_prog)
+{
+ int err, map, verdict, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
+ int expected, zero = 0, sent, recvd, avail;
+ struct test_sockmap_pass_prog *pass = NULL;
+ struct test_sockmap_drop_prog *drop = NULL;
+ char buf[256] = "0123456789";
+
+ if (pass_prog) {
+ pass = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(pass, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+ map = bpf_map__fd(pass->maps.sock_map_rx);
+ expected = sizeof(buf);
+ } else {
+ drop = test_sockmap_drop_prog__open_and_load();
+ if (!ASSERT_OK_PTR(drop, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
+ map = bpf_map__fd(drop->maps.sock_map_rx);
+ /* On drop data is consumed immediately and copied_seq inc'd */
+ expected = 0;
+ }
+
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ err = create_socket_pairs(AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
+ if (!ASSERT_OK(err, "create_socket_pairs()"))
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+
+ sent = xsend(p1, &buf, sizeof(buf), 0);
+ ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
+ /* On DROP test there will be no data to read */
+ if (pass_prog) {
+ recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC);
+ ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
+ }
+
+out_close:
+ close(c0);
+ close(p0);
+ close(c1);
+ close(p1);
+out:
+ if (pass_prog)
+ test_sockmap_pass_prog__destroy(pass);
+ else
+ test_sockmap_drop_prog__destroy(drop);
+}
+
+static void test_sockmap_skb_verdict_change_tail(void)
+{
+ struct test_sockmap_change_tail *skel;
+ int err, map, verdict;
+ int c1, p1, sent, recvd;
+ int zero = 0;
+ char buf[2];
+
+ skel = test_sockmap_change_tail__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+ err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+ sent = xsend(p1, "Tr", 2, 0);
+ ASSERT_EQ(sent, 2, "xsend(p1)");
+ recvd = recv(c1, buf, 2, 0);
+ ASSERT_EQ(recvd, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ sent = xsend(p1, "G", 1, 0);
+ ASSERT_EQ(sent, 1, "xsend(p1)");
+ recvd = recv(c1, buf, 2, 0);
+ ASSERT_EQ(recvd, 2, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ sent = xsend(p1, "E", 1, 0);
+ ASSERT_EQ(sent, 1, "xsend(p1)");
+ recvd = recv(c1, buf, 1, 0);
+ ASSERT_EQ(recvd, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+out_close:
+ close(c1);
+ close(p1);
+out:
+ test_sockmap_change_tail__destroy(skel);
+}
+
+static void test_sockmap_skb_verdict_peek_helper(int map)
+{
+ int err, c1, p1, zero = 0, sent, recvd, avail;
+ char snd[256] = "0123456789";
+ char rcv[256] = "0";
+
+ err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
+ if (!ASSERT_OK(err, "create_pair()"))
+ return;
+
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+
+ sent = xsend(p1, snd, sizeof(snd), 0);
+ ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
+ recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
+ recvd = recv(c1, rcv, sizeof(rcv), 0);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
+
+out_close:
+ close(c1);
+ close(p1);
+}
+
+static void test_sockmap_skb_verdict_peek(void)
+{
+ struct test_sockmap_pass_prog *pass;
+ int err, map, verdict;
+
+ pass = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(pass, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+ map = bpf_map__fd(pass->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ test_sockmap_skb_verdict_peek_helper(map);
+
+out:
+ test_sockmap_pass_prog__destroy(pass);
+}
+
+static void test_sockmap_skb_verdict_peek_with_link(void)
+{
+ struct test_sockmap_pass_prog *pass;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err, map;
+
+ pass = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(pass, "open_and_load"))
+ return;
+ prog = pass->progs.prog_skb_verdict;
+ map = bpf_map__fd(pass->maps.sock_map_rx);
+ link = bpf_program__attach_sockmap(prog, map);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
+ goto out;
+
+ err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone);
+ if (!ASSERT_OK(err, "bpf_link__update_program"))
+ goto out;
+
+ /* Fail since a prog with different attach type attempts to do update. */
+ err = bpf_link__update_program(link, pass->progs.prog_skb_parser);
+ if (!ASSERT_ERR(err, "bpf_link__update_program"))
+ goto out;
+
+ test_sockmap_skb_verdict_peek_helper(map);
+ ASSERT_EQ(pass->bss->clone_called, 1, "clone_called");
+out:
+ bpf_link__detach(link);
+ test_sockmap_pass_prog__destroy(pass);
+}
+
+static void test_sockmap_unconnected_unix(void)
+{
+ int err, map, stream = 0, dgram = 0, zero = 0;
+ struct test_sockmap_pass_prog *skel;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
+ if (stream < 0)
+ return;
+
+ dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
+ if (dgram < 0) {
+ close(stream);
+ return;
+ }
+
+ err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
+ ASSERT_ERR(err, "bpf_map_update_elem(stream)");
+
+ err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(dgram)");
+
+ close(stream);
+ close(dgram);
+}
+
+static void test_sockmap_many_socket(void)
+{
+ struct test_sockmap_pass_prog *skel;
+ int stream[2], dgram, udp, tcp;
+ int i, err, map, entry = 0;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
+ if (dgram < 0) {
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ tcp = connected_socket_v4();
+ if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
+ close(dgram);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
+ if (udp < 0) {
+ close(dgram);
+ close(tcp);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
+ ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
+ if (err)
+ goto out;
+
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(stream)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(dgram)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(udp)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(tcp)");
+ }
+ for (entry--; entry >= 0; entry--) {
+ err = bpf_map_delete_elem(map, &entry);
+ ASSERT_OK(err, "bpf_map_delete_elem(entry)");
+ }
+
+ close(stream[0]);
+ close(stream[1]);
+out:
+ close(dgram);
+ close(tcp);
+ close(udp);
+ test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_many_maps(void)
+{
+ struct test_sockmap_pass_prog *skel;
+ int stream[2], dgram, udp, tcp;
+ int i, err, map[2], entry = 0;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ map[0] = bpf_map__fd(skel->maps.sock_map_rx);
+ map[1] = bpf_map__fd(skel->maps.sock_map_tx);
+
+ dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
+ if (dgram < 0) {
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ tcp = connected_socket_v4();
+ if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
+ close(dgram);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
+ if (udp < 0) {
+ close(dgram);
+ close(tcp);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
+ ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
+ if (err)
+ goto out;
+
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(stream)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(dgram)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(udp)");
+ }
+ for (i = 0; i < 2; i++, entry++) {
+ err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(tcp)");
+ }
+ for (entry--; entry >= 0; entry--) {
+ err = bpf_map_delete_elem(map[1], &entry);
+ entry--;
+ ASSERT_OK(err, "bpf_map_delete_elem(entry)");
+ err = bpf_map_delete_elem(map[0], &entry);
+ ASSERT_OK(err, "bpf_map_delete_elem(entry)");
+ }
+
+ close(stream[0]);
+ close(stream[1]);
+out:
+ close(dgram);
+ close(tcp);
+ close(udp);
+ test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_same_sock(void)
+{
+ struct test_sockmap_pass_prog *skel;
+ int stream[2], dgram, udp, tcp;
+ int i, err, map, zero = 0;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
+ if (dgram < 0) {
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ tcp = connected_socket_v4();
+ if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
+ close(dgram);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
+ if (udp < 0) {
+ close(dgram);
+ close(tcp);
+ test_sockmap_pass_prog__destroy(skel);
+ return;
+ }
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
+ ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
+ if (err) {
+ close(tcp);
+ goto out;
+ }
+
+ for (i = 0; i < 2; i++) {
+ err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(stream)");
+ }
+ for (i = 0; i < 2; i++) {
+ err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(dgram)");
+ }
+ for (i = 0; i < 2; i++) {
+ err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(udp)");
+ }
+ for (i = 0; i < 2; i++) {
+ err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
+ ASSERT_OK(err, "bpf_map_update_elem(tcp)");
+ }
+
+ close(tcp);
+ err = bpf_map_delete_elem(map, &zero);
+ ASSERT_ERR(err, "bpf_map_delete_elem(entry)");
+
+ close(stream[0]);
+ close(stream[1]);
+out:
+ close(dgram);
+ close(udp);
+ test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_skb_verdict_vsock_poll(void)
+{
+ struct test_sockmap_pass_prog *skel;
+ int err, map, conn, peer;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ char buf = 'x';
+ int zero = 0;
+
+ skel = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ if (create_pair(AF_VSOCK, SOCK_STREAM, &conn, &peer))
+ goto destroy;
+
+ prog = skel->progs.prog_skb_verdict;
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+ link = bpf_program__attach_sockmap(prog, map);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
+ goto close;
+
+ err = bpf_map_update_elem(map, &zero, &conn, BPF_ANY);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto detach;
+
+ if (xsend(peer, &buf, 1, 0) != 1)
+ goto detach;
+
+ err = poll_read(conn, IO_TIMEOUT_SEC);
+ if (!ASSERT_OK(err, "poll"))
+ goto detach;
+
+ if (xrecv_nonblock(conn, &buf, 1, 0) != 1)
+ FAIL("xrecv_nonblock");
+detach:
+ bpf_link__detach(link);
+close:
+ xclose(conn);
+ xclose(peer);
+destroy:
+ test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_vsock_unconnected(void)
+{
+ struct sockaddr_storage addr;
+ int map, s, zero = 0;
+ socklen_t alen;
+
+ map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int),
+ sizeof(int), 1, NULL);
+ if (!ASSERT_OK_FD(map, "bpf_map_create"))
+ return;
+
+ s = xsocket(AF_VSOCK, SOCK_STREAM, 0);
+ if (s < 0)
+ goto close_map;
+
+ /* Fail connect(), but trigger transport assignment. */
+ init_addr_loopback(AF_VSOCK, &addr, &alen);
+ if (!ASSERT_ERR(connect(s, sockaddr(&addr), alen), "connect"))
+ goto close_sock;
+
+ ASSERT_ERR(bpf_map_update_elem(map, &zero, &s, BPF_ANY), "map_update");
+
+close_sock:
+ xclose(s);
+close_map:
+ xclose(map);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap vsock delete on close"))
+ test_sockmap_vsock_delete_on_close();
if (test__start_subtest("sockmap sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash sk_msg load helpers"))
@@ -300,4 +1064,48 @@ void test_sockmap_basic(void)
test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash copy"))
test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap skb_verdict attach")) {
+ test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
+ BPF_SK_SKB_STREAM_VERDICT);
+ test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
+ BPF_SK_SKB_VERDICT);
+ }
+ if (test__start_subtest("sockmap skb_verdict attach_with_link"))
+ test_sockmap_skb_verdict_attach_with_link();
+ if (test__start_subtest("sockmap msg_verdict progs query"))
+ test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
+ if (test__start_subtest("sockmap stream_parser progs query"))
+ test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
+ if (test__start_subtest("sockmap stream_verdict progs query"))
+ test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
+ if (test__start_subtest("sockmap skb_verdict progs query"))
+ test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
+ if (test__start_subtest("sockmap skb_verdict shutdown"))
+ test_sockmap_skb_verdict_shutdown();
+ if (test__start_subtest("sockmap skb_verdict fionread"))
+ test_sockmap_skb_verdict_fionread(true);
+ if (test__start_subtest("sockmap skb_verdict fionread on drop"))
+ test_sockmap_skb_verdict_fionread(false);
+ if (test__start_subtest("sockmap skb_verdict change tail"))
+ test_sockmap_skb_verdict_change_tail();
+ if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
+ test_sockmap_skb_verdict_peek();
+ if (test__start_subtest("sockmap skb_verdict msg_f_peek with link"))
+ test_sockmap_skb_verdict_peek_with_link();
+ if (test__start_subtest("sockmap unconnected af_unix"))
+ test_sockmap_unconnected_unix();
+ if (test__start_subtest("sockmap one socket to many map entries"))
+ test_sockmap_many_socket();
+ if (test__start_subtest("sockmap one socket to many maps"))
+ test_sockmap_many_maps();
+ if (test__start_subtest("sockmap same socket replace"))
+ test_sockmap_same_sock();
+ if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link"))
+ test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP);
+ if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link"))
+ test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap skb_verdict vsock poll"))
+ test_sockmap_skb_verdict_vsock_poll();
+ if (test__start_subtest("sockmap vsock unconnected"))
+ test_sockmap_vsock_unconnected();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
new file mode 100644
index 000000000000..d815efac52fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
@@ -0,0 +1,83 @@
+#ifndef __SOCKMAP_HELPERS__
+#define __SOCKMAP_HELPERS__
+
+#include "socket_helpers.h"
+
+#define MAX_TEST_NAME 80
+
+#define u32(v) ((u32){(v)})
+#define u64(v) ((u64){(v)})
+
+#define __always_unused __attribute__((__unused__))
+
+#define xbpf_map_delete_elem(fd, key) \
+ ({ \
+ int __ret = bpf_map_delete_elem((fd), (key)); \
+ if (__ret < 0) \
+ FAIL_ERRNO("map_delete"); \
+ __ret; \
+ })
+
+#define xbpf_map_lookup_elem(fd, key, val) \
+ ({ \
+ int __ret = bpf_map_lookup_elem((fd), (key), (val)); \
+ if (__ret < 0) \
+ FAIL_ERRNO("map_lookup"); \
+ __ret; \
+ })
+
+#define xbpf_map_update_elem(fd, key, val, flags) \
+ ({ \
+ int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \
+ if (__ret < 0) \
+ FAIL_ERRNO("map_update"); \
+ __ret; \
+ })
+
+#define xbpf_prog_attach(prog, target, type, flags) \
+ ({ \
+ int __ret = \
+ bpf_prog_attach((prog), (target), (type), (flags)); \
+ if (__ret < 0) \
+ FAIL_ERRNO("prog_attach(" #type ")"); \
+ __ret; \
+ })
+
+#define xbpf_prog_detach2(prog, target, type) \
+ ({ \
+ int __ret = bpf_prog_detach2((prog), (target), (type)); \
+ if (__ret < 0) \
+ FAIL_ERRNO("prog_detach2(" #type ")"); \
+ __ret; \
+ })
+
+#define xpthread_create(thread, attr, func, arg) \
+ ({ \
+ int __ret = pthread_create((thread), (attr), (func), (arg)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_create"); \
+ __ret; \
+ })
+
+#define xpthread_join(thread, retval) \
+ ({ \
+ int __ret = pthread_join((thread), (retval)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_join"); \
+ __ret; \
+ })
+
+static inline int add_to_sockmap(int mapfd, int fd1, int fd2)
+{
+ int err;
+
+ err = xbpf_map_update_elem(mapfd, &u32(0), &u64(fd1), BPF_NOEXIST);
+ if (err)
+ return err;
+
+ return xbpf_map_update_elem(mapfd, &u32(1), &u64(fd2), BPF_NOEXIST);
+}
+
+#endif // __SOCKMAP_HELPERS__
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
index 06b86addc181..b87e7f39e15a 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -3,122 +3,440 @@
/*
* Tests for sockmap/sockhash holding kTLS sockets.
*/
-
+#include <error.h>
+#include <netinet/tcp.h>
+#include <linux/tls.h>
#include "test_progs.h"
+#include "sockmap_helpers.h"
+#include "test_skmsg_load_helpers.skel.h"
+#include "test_sockmap_ktls.skel.h"
#define MAX_TEST_NAME 80
#define TCP_ULP 31
-static int tcp_server(int family)
+static int init_ktls_pairs(int c, int p)
{
- int err, s;
+ int err;
+ struct tls12_crypto_info_aes_gcm_128 crypto_rx;
+ struct tls12_crypto_info_aes_gcm_128 crypto_tx;
- s = socket(family, SOCK_STREAM, 0);
- if (CHECK_FAIL(s == -1)) {
- perror("socket");
+ err = setsockopt(c, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
+ goto out;
+
+ err = setsockopt(p, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
+ goto out;
+
+ memset(&crypto_rx, 0, sizeof(crypto_rx));
+ memset(&crypto_tx, 0, sizeof(crypto_tx));
+ crypto_rx.info.version = TLS_1_2_VERSION;
+ crypto_tx.info.version = TLS_1_2_VERSION;
+ crypto_rx.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+ crypto_tx.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ err = setsockopt(c, SOL_TLS, TLS_TX, &crypto_tx, sizeof(crypto_tx));
+ if (!ASSERT_OK(err, "setsockopt(TLS_TX)"))
+ goto out;
+
+ err = setsockopt(p, SOL_TLS, TLS_RX, &crypto_rx, sizeof(crypto_rx));
+ if (!ASSERT_OK(err, "setsockopt(TLS_RX)"))
+ goto out;
+ return 0;
+out:
+ return -1;
+}
+
+static int create_ktls_pairs(int family, int sotype, int *c, int *p)
+{
+ int err;
+
+ err = create_pair(family, sotype, c, p);
+ if (!ASSERT_OK(err, "create_pair()"))
return -1;
- }
- err = listen(s, SOMAXCONN);
- if (CHECK_FAIL(err)) {
- perror("listen");
+ err = init_ktls_pairs(*c, *p);
+ if (!ASSERT_OK(err, "init_ktls_pairs(c, p)"))
return -1;
+ return 0;
+}
+
+static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
+{
+ struct sockaddr_storage addr = {};
+ socklen_t len = sizeof(addr);
+ struct sockaddr_in6 *v6;
+ struct sockaddr_in *v4;
+ int err, s, zero = 0;
+
+ switch (family) {
+ case AF_INET:
+ v4 = (struct sockaddr_in *)&addr;
+ v4->sin_family = AF_INET;
+ break;
+ case AF_INET6:
+ v6 = (struct sockaddr_in6 *)&addr;
+ v6->sin6_family = AF_INET6;
+ break;
+ default:
+ PRINT_FAIL("unsupported socket family %d", family);
+ return;
}
- return s;
+ s = socket(family, SOCK_STREAM, 0);
+ if (!ASSERT_GE(s, 0, "socket"))
+ return;
+
+ err = bind(s, (struct sockaddr *)&addr, len);
+ if (!ASSERT_OK(err, "bind"))
+ goto close;
+
+ err = getsockname(s, (struct sockaddr *)&addr, &len);
+ if (!ASSERT_OK(err, "getsockname"))
+ goto close;
+
+ err = connect(s, (struct sockaddr *)&addr, len);
+ if (!ASSERT_OK(err, "connect"))
+ goto close;
+
+ /* save sk->sk_prot and set it to tls_prots */
+ err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
+ goto close;
+
+ /* sockmap update should not affect saved sk_prot */
+ err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
+ if (!ASSERT_ERR(err, "sockmap update elem"))
+ goto close;
+
+ /* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */
+ err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
+ ASSERT_OK(err, "setsockopt(TCP_NODELAY)");
+
+close:
+ close(s);
}
-static int disconnect(int fd)
+static const char *fmt_test_name(const char *subtest_name, int family,
+ enum bpf_map_type map_type)
{
- struct sockaddr unspec = { AF_UNSPEC };
+ const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH";
+ const char *family_str = AF_INET ? "IPv4" : "IPv6";
+ static char test_name[MAX_TEST_NAME];
- return connect(fd, &unspec, sizeof(unspec));
+ snprintf(test_name, MAX_TEST_NAME,
+ "sockmap_ktls %s %s %s",
+ subtest_name, family_str, map_type_str);
+
+ return test_name;
}
-/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */
-static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
+static void test_sockmap_ktls_offload(int family, int sotype)
{
- struct sockaddr_storage addr = {0};
- socklen_t len = sizeof(addr);
- int err, cli, srv, zero = 0;
+ int err;
+ int c = 0, p = 0, sent, recvd;
+ char msg[12] = "hello world\0";
+ char rcv[13];
+
+ err = create_ktls_pairs(family, sotype, &c, &p);
+ if (!ASSERT_OK(err, "create_ktls_pairs()"))
+ goto out;
+
+ sent = send(c, msg, sizeof(msg), 0);
+ if (!ASSERT_OK(err, "send(msg)"))
+ goto out;
+
+ recvd = recv(p, rcv, sizeof(rcv), 0);
+ if (!ASSERT_OK(err, "recv(msg)") ||
+ !ASSERT_EQ(recvd, sent, "length mismatch"))
+ goto out;
- srv = tcp_server(family);
- if (srv == -1)
+ ASSERT_OK(memcmp(msg, rcv, sizeof(msg)), "data mismatch");
+
+out:
+ if (c)
+ close(c);
+ if (p)
+ close(p);
+}
+
+static void test_sockmap_ktls_tx_cork(int family, int sotype, bool push)
+{
+ int err, off;
+ int i, j;
+ int start_push = 0, push_len = 0;
+ int c = 0, p = 0, one = 1, sent, recvd;
+ int prog_fd, map_fd;
+ char msg[12] = "hello world\0";
+ char rcv[20] = {0};
+ struct test_sockmap_ktls *skel;
+
+ skel = test_sockmap_ktls__open_and_load();
+ if (!ASSERT_TRUE(skel, "open ktls skel"))
return;
- err = getsockname(srv, (struct sockaddr *)&addr, &len);
- if (CHECK_FAIL(err)) {
- perror("getsockopt");
- goto close_srv;
- }
+ err = create_pair(family, sotype, &c, &p);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
- cli = socket(family, SOCK_STREAM, 0);
- if (CHECK_FAIL(cli == -1)) {
- perror("socket");
- goto close_srv;
- }
+ prog_fd = bpf_program__fd(skel->progs.prog_sk_policy);
+ map_fd = bpf_map__fd(skel->maps.sock_map);
- err = connect(cli, (struct sockaddr *)&addr, len);
- if (CHECK_FAIL(err)) {
- perror("connect");
- goto close_cli;
- }
+ err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach sk msg"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c)"))
+ goto out;
+
+ err = init_ktls_pairs(c, p);
+ if (!ASSERT_OK(err, "init_ktls_pairs(c, p)"))
+ goto out;
- err = bpf_map_update_elem(map, &zero, &cli, 0);
- if (CHECK_FAIL(err)) {
- perror("bpf_map_update_elem");
- goto close_cli;
+ skel->bss->cork_byte = sizeof(msg);
+ if (push) {
+ start_push = 1;
+ push_len = 2;
}
+ skel->bss->push_start = start_push;
+ skel->bss->push_end = push_len;
- err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
- if (CHECK_FAIL(err)) {
- perror("setsockopt(TCP_ULP)");
- goto close_cli;
+ off = sizeof(msg) / 2;
+ sent = send(c, msg, off, 0);
+ if (!ASSERT_EQ(sent, off, "send(msg)"))
+ goto out;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(-1, recvd, "expected no data"))
+ goto out;
+
+ /* send remaining msg */
+ sent = send(c, msg + off, sizeof(msg) - off, 0);
+ if (!ASSERT_EQ(sent, sizeof(msg) - off, "send remaining data"))
+ goto out;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_OK(err, "recv(msg)") ||
+ !ASSERT_EQ(recvd, sizeof(msg) + push_len, "check length mismatch"))
+ goto out;
+
+ for (i = 0, j = 0; i < recvd;) {
+ /* skip checking the data that has been pushed in */
+ if (i >= start_push && i <= start_push + push_len - 1) {
+ i++;
+ continue;
+ }
+ if (!ASSERT_EQ(rcv[i], msg[j], "data mismatch"))
+ goto out;
+ i++;
+ j++;
}
+out:
+ if (c)
+ close(c);
+ if (p)
+ close(p);
+ test_sockmap_ktls__destroy(skel);
+}
+
+static void test_sockmap_ktls_tx_no_buf(int family, int sotype, bool push)
+{
+ int c = -1, p = -1, one = 1, two = 2;
+ struct test_sockmap_ktls *skel;
+ unsigned char *data = NULL;
+ struct msghdr msg = {0};
+ struct iovec iov[2];
+ int prog_fd, map_fd;
+ int txrx_buf = 1024;
+ int iov_length = 8192;
+ int err;
+
+ skel = test_sockmap_ktls__open_and_load();
+ if (!ASSERT_TRUE(skel, "open ktls skel"))
+ return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
+
+ err = setsockopt(c, SOL_SOCKET, SO_RCVBUFFORCE, &txrx_buf, sizeof(int));
+ err |= setsockopt(p, SOL_SOCKET, SO_SNDBUFFORCE, &txrx_buf, sizeof(int));
+ if (!ASSERT_OK(err, "set buf limit"))
+ goto out;
- err = bpf_map_delete_elem(map, &zero);
- if (CHECK_FAIL(err)) {
- perror("bpf_map_delete_elem");
- goto close_cli;
+ prog_fd = bpf_program__fd(skel->progs.prog_sk_policy_redir);
+ map_fd = bpf_map__fd(skel->maps.sock_map);
+
+ err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach sk msg"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c)"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &two, &p, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p)"))
+ goto out;
+
+ skel->bss->apply_bytes = 1024;
+
+ err = init_ktls_pairs(c, p);
+ if (!ASSERT_OK(err, "init_ktls_pairs(c, p)"))
+ goto out;
+
+ data = calloc(iov_length, sizeof(char));
+ if (!data)
+ goto out;
+
+ iov[0].iov_base = data;
+ iov[0].iov_len = iov_length;
+ iov[1].iov_base = data;
+ iov[1].iov_len = iov_length;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 2;
+
+ for (;;) {
+ err = sendmsg(c, &msg, MSG_DONTWAIT);
+ if (err <= 0)
+ break;
}
- err = disconnect(cli);
- if (CHECK_FAIL(err))
- perror("disconnect");
+out:
+ if (data)
+ free(data);
+ if (c != -1)
+ close(c);
+ if (p != -1)
+ close(p);
-close_cli:
- close(cli);
-close_srv:
- close(srv);
+ test_sockmap_ktls__destroy(skel);
}
-static void run_tests(int family, enum bpf_map_type map_type)
+static void test_sockmap_ktls_tx_pop(int family, int sotype)
{
- char test_name[MAX_TEST_NAME];
- int map;
+ char msg[37] = "0123456789abcdefghijklmnopqrstuvwxyz\0";
+ int c = 0, p = 0, one = 1, sent, recvd;
+ struct test_sockmap_ktls *skel;
+ int prog_fd, map_fd;
+ char rcv[50] = {0};
+ int err;
+ int i, m, r;
- map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
- if (CHECK_FAIL(map == -1)) {
- perror("bpf_map_create");
+ skel = test_sockmap_ktls__open_and_load();
+ if (!ASSERT_TRUE(skel, "open ktls skel"))
return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.prog_sk_policy);
+ map_fd = bpf_map__fd(skel->maps.sock_map);
+
+ err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach sk msg"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c)"))
+ goto out;
+
+ err = init_ktls_pairs(c, p);
+ if (!ASSERT_OK(err, "init_ktls_pairs(c, p)"))
+ goto out;
+
+ struct {
+ int pop_start;
+ int pop_len;
+ } pop_policy[] = {
+ /* trim the start */
+ {0, 2},
+ {0, 10},
+ {1, 2},
+ {1, 10},
+ /* trim the end */
+ {35, 2},
+ /* New entries should be added before this line */
+ {-1, -1},
+ };
+
+ i = 0;
+ while (pop_policy[i].pop_start >= 0) {
+ skel->bss->pop_start = pop_policy[i].pop_start;
+ skel->bss->pop_end = pop_policy[i].pop_len;
+
+ sent = send(c, msg, sizeof(msg), 0);
+ if (!ASSERT_EQ(sent, sizeof(msg), "send(msg)"))
+ goto out;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(recvd, sizeof(msg) - pop_policy[i].pop_len, "pop len mismatch"))
+ goto out;
+
+ /* verify the data
+ * msg: 0123456789a bcdefghij klmnopqrstuvwxyz
+ * | |
+ * popped data
+ */
+ for (m = 0, r = 0; m < sizeof(msg);) {
+ /* skip checking the data that has been popped */
+ if (m >= pop_policy[i].pop_start &&
+ m <= pop_policy[i].pop_start + pop_policy[i].pop_len - 1) {
+ m++;
+ continue;
+ }
+
+ if (!ASSERT_EQ(msg[m], rcv[r], "data mismatch"))
+ goto out;
+ m++;
+ r++;
+ }
+ i++;
}
+out:
+ if (c)
+ close(c);
+ if (p)
+ close(p);
+ test_sockmap_ktls__destroy(skel);
+}
- snprintf(test_name, MAX_TEST_NAME,
- "sockmap_ktls disconnect_after_delete %s %s",
- family == AF_INET ? "IPv4" : "IPv6",
- map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH");
- if (!test__start_subtest(test_name))
+static void run_tests(int family, enum bpf_map_type map_type)
+{
+ int map;
+
+ map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
+ if (!ASSERT_GE(map, 0, "bpf_map_create"))
return;
- test_sockmap_ktls_disconnect_after_delete(family, map);
+ if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type)))
+ test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
close(map);
}
+static void run_ktls_test(int family, int sotype)
+{
+ if (test__start_subtest("tls simple offload"))
+ test_sockmap_ktls_offload(family, sotype);
+ if (test__start_subtest("tls tx cork"))
+ test_sockmap_ktls_tx_cork(family, sotype, false);
+ if (test__start_subtest("tls tx cork with push"))
+ test_sockmap_ktls_tx_cork(family, sotype, true);
+ if (test__start_subtest("tls tx egress with no buf"))
+ test_sockmap_ktls_tx_no_buf(family, sotype, true);
+ if (test__start_subtest("tls tx with pop"))
+ test_sockmap_ktls_tx_pop(family, sotype);
+}
+
void test_sockmap_ktls(void)
{
run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH);
+ run_ktls_test(AF_INET, SOCK_STREAM);
+ run_ktls_test(AF_INET6, SOCK_STREAM);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index d7d65a700799..f1bdccc7e4e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -18,6 +18,7 @@
#include <string.h>
#include <sys/select.h>
#include <unistd.h>
+#include <linux/vm_sockets.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
@@ -26,302 +27,12 @@
#include "test_progs.h"
#include "test_sockmap_listen.skel.h"
-#define IO_TIMEOUT_SEC 30
-#define MAX_STRERR_LEN 256
-#define MAX_TEST_NAME 80
-
-#define _FAIL(errnum, fmt...) \
- ({ \
- error_at_line(0, (errnum), __func__, __LINE__, fmt); \
- CHECK_FAIL(true); \
- })
-#define FAIL(fmt...) _FAIL(0, fmt)
-#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
-#define FAIL_LIBBPF(err, msg) \
- ({ \
- char __buf[MAX_STRERR_LEN]; \
- libbpf_strerror((err), __buf, sizeof(__buf)); \
- FAIL("%s: %s", (msg), __buf); \
- })
-
-/* Wrappers that fail the test on error and report it. */
-
-#define xaccept_nonblock(fd, addr, len) \
- ({ \
- int __ret = \
- accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
- if (__ret == -1) \
- FAIL_ERRNO("accept"); \
- __ret; \
- })
-
-#define xbind(fd, addr, len) \
- ({ \
- int __ret = bind((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("bind"); \
- __ret; \
- })
-
-#define xclose(fd) \
- ({ \
- int __ret = close((fd)); \
- if (__ret == -1) \
- FAIL_ERRNO("close"); \
- __ret; \
- })
-
-#define xconnect(fd, addr, len) \
- ({ \
- int __ret = connect((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("connect"); \
- __ret; \
- })
-
-#define xgetsockname(fd, addr, len) \
- ({ \
- int __ret = getsockname((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("getsockname"); \
- __ret; \
- })
-
-#define xgetsockopt(fd, level, name, val, len) \
- ({ \
- int __ret = getsockopt((fd), (level), (name), (val), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("getsockopt(" #name ")"); \
- __ret; \
- })
-
-#define xlisten(fd, backlog) \
- ({ \
- int __ret = listen((fd), (backlog)); \
- if (__ret == -1) \
- FAIL_ERRNO("listen"); \
- __ret; \
- })
-
-#define xsetsockopt(fd, level, name, val, len) \
- ({ \
- int __ret = setsockopt((fd), (level), (name), (val), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("setsockopt(" #name ")"); \
- __ret; \
- })
-
-#define xsend(fd, buf, len, flags) \
- ({ \
- ssize_t __ret = send((fd), (buf), (len), (flags)); \
- if (__ret == -1) \
- FAIL_ERRNO("send"); \
- __ret; \
- })
-
-#define xrecv_nonblock(fd, buf, len, flags) \
- ({ \
- ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
- IO_TIMEOUT_SEC); \
- if (__ret == -1) \
- FAIL_ERRNO("recv"); \
- __ret; \
- })
-
-#define xsocket(family, sotype, flags) \
- ({ \
- int __ret = socket(family, sotype, flags); \
- if (__ret == -1) \
- FAIL_ERRNO("socket"); \
- __ret; \
- })
-
-#define xbpf_map_delete_elem(fd, key) \
- ({ \
- int __ret = bpf_map_delete_elem((fd), (key)); \
- if (__ret == -1) \
- FAIL_ERRNO("map_delete"); \
- __ret; \
- })
-
-#define xbpf_map_lookup_elem(fd, key, val) \
- ({ \
- int __ret = bpf_map_lookup_elem((fd), (key), (val)); \
- if (__ret == -1) \
- FAIL_ERRNO("map_lookup"); \
- __ret; \
- })
-
-#define xbpf_map_update_elem(fd, key, val, flags) \
- ({ \
- int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \
- if (__ret == -1) \
- FAIL_ERRNO("map_update"); \
- __ret; \
- })
-
-#define xbpf_prog_attach(prog, target, type, flags) \
- ({ \
- int __ret = \
- bpf_prog_attach((prog), (target), (type), (flags)); \
- if (__ret == -1) \
- FAIL_ERRNO("prog_attach(" #type ")"); \
- __ret; \
- })
-
-#define xbpf_prog_detach2(prog, target, type) \
- ({ \
- int __ret = bpf_prog_detach2((prog), (target), (type)); \
- if (__ret == -1) \
- FAIL_ERRNO("prog_detach2(" #type ")"); \
- __ret; \
- })
-
-#define xpthread_create(thread, attr, func, arg) \
- ({ \
- int __ret = pthread_create((thread), (attr), (func), (arg)); \
- errno = __ret; \
- if (__ret) \
- FAIL_ERRNO("pthread_create"); \
- __ret; \
- })
-
-#define xpthread_join(thread, retval) \
- ({ \
- int __ret = pthread_join((thread), (retval)); \
- errno = __ret; \
- if (__ret) \
- FAIL_ERRNO("pthread_join"); \
- __ret; \
- })
-
-static int poll_read(int fd, unsigned int timeout_sec)
-{
- struct timeval timeout = { .tv_sec = timeout_sec };
- fd_set rfds;
- int r;
-
- FD_ZERO(&rfds);
- FD_SET(fd, &rfds);
-
- r = select(fd + 1, &rfds, NULL, NULL, &timeout);
- if (r == 0)
- errno = ETIME;
-
- return r == 1 ? 0 : -1;
-}
-
-static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
- unsigned int timeout_sec)
-{
- if (poll_read(fd, timeout_sec))
- return -1;
-
- return accept(fd, addr, len);
-}
-
-static int recv_timeout(int fd, void *buf, size_t len, int flags,
- unsigned int timeout_sec)
-{
- if (poll_read(fd, timeout_sec))
- return -1;
-
- return recv(fd, buf, len, flags);
-}
-
-static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
-{
- struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
-
- addr4->sin_family = AF_INET;
- addr4->sin_port = 0;
- addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- *len = sizeof(*addr4);
-}
-
-static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
-{
- struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
-
- addr6->sin6_family = AF_INET6;
- addr6->sin6_port = 0;
- addr6->sin6_addr = in6addr_loopback;
- *len = sizeof(*addr6);
-}
-
-static void init_addr_loopback(int family, struct sockaddr_storage *ss,
- socklen_t *len)
-{
- switch (family) {
- case AF_INET:
- init_addr_loopback4(ss, len);
- return;
- case AF_INET6:
- init_addr_loopback6(ss, len);
- return;
- default:
- FAIL("unsupported address family %d", family);
- }
-}
-
-static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
-{
- return (struct sockaddr *)ss;
-}
-
-static int enable_reuseport(int s, int progfd)
-{
- int err, one = 1;
-
- err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
- if (err)
- return -1;
- err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
- sizeof(progfd));
- if (err)
- return -1;
-
- return 0;
-}
-
-static int socket_loopback_reuseport(int family, int sotype, int progfd)
-{
- struct sockaddr_storage addr;
- socklen_t len;
- int err, s;
-
- init_addr_loopback(family, &addr, &len);
-
- s = xsocket(family, sotype, 0);
- if (s == -1)
- return -1;
-
- if (progfd >= 0)
- enable_reuseport(s, progfd);
-
- err = xbind(s, sockaddr(&addr), len);
- if (err)
- goto close;
-
- if (sotype & SOCK_DGRAM)
- return s;
+#include "sockmap_helpers.h"
- err = xlisten(s, SOMAXCONN);
- if (err)
- goto close;
-
- return s;
-close:
- xclose(s);
- return -1;
-}
+#define NO_FLAGS 0
-static int socket_loopback(int family, int sotype)
-{
- return socket_loopback_reuseport(family, sotype, -1);
-}
-
-static void test_insert_invalid(int family, int sotype, int mapfd)
+static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
@@ -338,7 +49,8 @@ static void test_insert_invalid(int family, int sotype, int mapfd)
FAIL_ERRNO("map_update: expected EBADF");
}
-static void test_insert_opened(int family, int sotype, int mapfd)
+static void test_insert_opened(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key = 0;
u64 value;
@@ -351,16 +63,19 @@ static void test_insert_opened(int family, int sotype, int mapfd)
errno = 0;
value = s;
err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
- if (!err || errno != EOPNOTSUPP)
- FAIL_ERRNO("map_update: expected EOPNOTSUPP");
-
+ if (sotype == SOCK_STREAM) {
+ if (!err || errno != EOPNOTSUPP)
+ FAIL_ERRNO("map_update: expected EOPNOTSUPP");
+ } else if (err)
+ FAIL_ERRNO("map_update: expected success");
xclose(s);
}
-static void test_insert_bound(int family, int sotype, int mapfd)
+static void test_insert_bound(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
- socklen_t len;
+ socklen_t len = 0;
u32 key = 0;
u64 value;
int err, s;
@@ -384,7 +99,8 @@ close:
xclose(s);
}
-static void test_insert(int family, int sotype, int mapfd)
+static void test_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 value;
u32 key;
@@ -400,7 +116,8 @@ static void test_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_delete_after_insert(int family, int sotype, int mapfd)
+static void test_delete_after_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 value;
u32 key;
@@ -417,7 +134,8 @@ static void test_delete_after_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_delete_after_close(int family, int sotype, int mapfd)
+static void test_delete_after_close(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int err, s;
u64 value;
@@ -440,7 +158,8 @@ static void test_delete_after_close(int family, int sotype, int mapfd)
FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
}
-static void test_lookup_after_insert(int family, int sotype, int mapfd)
+static void test_lookup_after_insert(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u64 cookie, value;
socklen_t len;
@@ -468,7 +187,8 @@ static void test_lookup_after_insert(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_lookup_after_delete(int family, int sotype, int mapfd)
+static void test_lookup_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int err, s;
u64 value;
@@ -491,7 +211,8 @@ static void test_lookup_after_delete(int family, int sotype, int mapfd)
xclose(s);
}
-static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
+static void test_lookup_32_bit_value(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
u32 key, value32;
int err, s;
@@ -500,8 +221,8 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
if (s < 0)
return;
- mapfd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(key),
- sizeof(value32), 1, 0);
+ mapfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key),
+ sizeof(value32), 1, NULL);
if (mapfd < 0) {
FAIL_ERRNO("map_create");
goto close;
@@ -521,7 +242,8 @@ close:
xclose(s);
}
-static void test_update_existing(int family, int sotype, int mapfd)
+static void test_update_existing(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
int s1, s2;
u64 value;
@@ -549,7 +271,7 @@ close_s1:
/* Exercise the code path where we destroy child sockets that never
* got accept()'ed, aka orphans, when parent socket gets closed.
*/
-static void test_destroy_orphan_child(int family, int sotype, int mapfd)
+static void do_destroy_orphan_child(int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -580,10 +302,38 @@ close_srv:
xclose(s);
}
+static void test_destroy_orphan_child(struct test_sockmap_listen *skel,
+ int family, int sotype, int mapfd)
+{
+ int msg_verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int skb_verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ const struct test {
+ int progfd;
+ enum bpf_attach_type atype;
+ } tests[] = {
+ { -1, -1 },
+ { msg_verdict, BPF_SK_MSG_VERDICT },
+ { skb_verdict, BPF_SK_SKB_VERDICT },
+ };
+ const struct test *t;
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ if (t->progfd != -1 &&
+ xbpf_prog_attach(t->progfd, mapfd, t->atype, 0) != 0)
+ return;
+
+ do_destroy_orphan_child(family, sotype, mapfd);
+
+ if (t->progfd != -1)
+ xbpf_prog_detach2(t->progfd, mapfd, t->atype);
+ }
+}
+
/* Perform a passive open after removing listening socket from SOCKMAP
* to ensure that callbacks get restored properly.
*/
-static void test_clone_after_delete(int family, int sotype, int mapfd)
+static void test_clone_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
socklen_t len;
@@ -619,7 +369,8 @@ close_srv:
* SOCKMAP, but got accept()'ed only after the parent has been removed
* from SOCKMAP, gets cloned without parent psock state or callbacks.
*/
-static void test_accept_after_delete(int family, int sotype, int mapfd)
+static void test_accept_after_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0;
@@ -673,7 +424,8 @@ close_srv:
/* Check that child socket that got created and accepted while parent
* was in a SOCKMAP is cloned without parent psock state or callbacks.
*/
-static void test_accept_before_delete(int family, int sotype, int mapfd)
+static void test_accept_before_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct sockaddr_storage addr;
const u32 zero = 0, one = 1;
@@ -782,7 +534,8 @@ done:
return NULL;
}
-static void test_syn_recv_insert_delete(int family, int sotype, int mapfd)
+static void test_syn_recv_insert_delete(struct test_sockmap_listen *skel __always_unused,
+ int family, int sotype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
struct sockaddr_storage addr;
@@ -845,7 +598,8 @@ static void *listen_thread(void *arg)
return NULL;
}
-static void test_race_insert_listen(int family, int socktype, int mapfd)
+static void test_race_insert_listen(struct test_sockmap_listen *skel __always_unused,
+ int family, int socktype, int mapfd)
{
struct connect_accept_ctx ctx = { 0 };
const u32 zero = 0;
@@ -923,59 +677,22 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
- struct sockaddr_storage addr;
- int s, c0, c1, p0, p1;
+ int c0, c1, p0, p1;
unsigned int pass;
- socklen_t len;
int err, n;
- u64 value;
u32 key;
char b;
zero_verdict_count(verd_mapfd);
- s = socket_loopback(family, sotype | SOCK_NONBLOCK);
- if (s < 0)
- return;
-
- len = sizeof(addr);
- err = xgetsockname(s, sockaddr(&addr), &len);
- if (err)
- goto close_srv;
-
- c0 = xsocket(family, sotype, 0);
- if (c0 < 0)
- goto close_srv;
- err = xconnect(c0, sockaddr(&addr), len);
+ err = create_socket_pairs(family, sotype | SOCK_NONBLOCK, &c0, &c1,
+ &p0, &p1);
if (err)
- goto close_cli0;
-
- p0 = xaccept_nonblock(s, NULL, NULL);
- if (p0 < 0)
- goto close_cli0;
-
- c1 = xsocket(family, sotype, 0);
- if (c1 < 0)
- goto close_peer0;
- err = xconnect(c1, sockaddr(&addr), len);
- if (err)
- goto close_cli1;
-
- p1 = xaccept_nonblock(s, NULL, NULL);
- if (p1 < 0)
- goto close_cli1;
-
- key = 0;
- value = p0;
- err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
- if (err)
- goto close_peer1;
+ return;
- key = 1;
- value = p1;
- err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ err = add_to_sockmap(sock_mapfd, p0, p1);
if (err)
- goto close_peer1;
+ goto close;
n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
if (n < 0)
@@ -983,39 +700,33 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
if (n == 0)
FAIL("%s: incomplete write", log_prefix);
if (n < 1)
- goto close_peer1;
+ goto close;
key = SK_PASS;
err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
if (err)
- goto close_peer1;
+ goto close;
if (pass != 1)
FAIL("%s: want pass count 1, have %d", log_prefix, pass);
-
- n = read(c0, &b, 1);
+ n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC);
if (n < 0)
- FAIL_ERRNO("%s: read", log_prefix);
+ FAIL_ERRNO("%s: recv_timeout", log_prefix);
if (n == 0)
- FAIL("%s: incomplete read", log_prefix);
+ FAIL("%s: incomplete recv", log_prefix);
-close_peer1:
+close:
xclose(p1);
-close_cli1:
xclose(c1);
-close_peer0:
xclose(p0);
-close_cli0:
xclose(c0);
-close_srv:
- xclose(s);
}
static void test_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
- int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
- int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1053,6 +764,24 @@ static void test_msg_redir_to_connected(struct test_sockmap_listen *skel,
xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
}
+static void test_msg_redir_to_connected_with_link(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int prog_msg_verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int link_fd;
+
+ link_fd = bpf_link_create(prog_msg_verdict, sock_map, BPF_SK_MSG_VERDICT, NULL);
+ if (!ASSERT_GE(link_fd, 0, "bpf_link_create"))
+ return;
+
+ redir_to_connected(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ close(link_fd);
+}
+
static void redir_to_listening(int family, int sotype, int sock_mapfd,
int verd_mapfd, enum redir_mode mode)
{
@@ -1061,7 +790,6 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,
int s, c, p, err, n;
unsigned int drop;
socklen_t len;
- u64 value;
u32 key;
zero_verdict_count(verd_mapfd);
@@ -1086,15 +814,7 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,
if (p < 0)
goto close_cli;
- key = 0;
- value = s;
- err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
- if (err)
- goto close_peer;
-
- key = 1;
- value = p;
- err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ err = add_to_sockmap(sock_mapfd, s, p);
if (err)
goto close_peer;
@@ -1125,8 +845,8 @@ static void test_skb_redir_to_listening(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family,
int sotype)
{
- int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
- int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_stream_parser);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1164,6 +884,92 @@ static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,
xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
}
+static void test_msg_redir_to_listening_with_link(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ struct bpf_program *verdict = skel->progs.prog_msg_verdict;
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ struct bpf_link *link;
+
+ link = bpf_program__attach_sockmap(verdict, sock_map);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
+ return;
+
+ redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ bpf_link__detach(link);
+}
+
+static void redir_partial(int family, int sotype, int sock_map, int parser_map)
+{
+ int c0 = -1, c1 = -1, p0 = -1, p1 = -1;
+ int err, n, key, value;
+ char buf[] = "abc";
+
+ key = 0;
+ value = sizeof(buf) - 1;
+ err = xbpf_map_update_elem(parser_map, &key, &value, 0);
+ if (err)
+ return;
+
+ err = create_socket_pairs(family, sotype | SOCK_NONBLOCK, &c0, &c1,
+ &p0, &p1);
+ if (err)
+ goto clean_parser_map;
+
+ err = add_to_sockmap(sock_map, p0, p1);
+ if (err)
+ goto close;
+
+ n = xsend(c1, buf, sizeof(buf), 0);
+ if (n == -1)
+ goto close;
+ if (n < sizeof(buf))
+ FAIL("incomplete write");
+
+ n = xrecv_nonblock(c0, buf, sizeof(buf), 0);
+ if (n != sizeof(buf) - 1)
+ FAIL("expect %zu, received %d", sizeof(buf) - 1, n);
+
+close:
+ xclose(c0);
+ xclose(p0);
+ xclose(c1);
+ xclose(p1);
+
+clean_parser_map:
+ key = 0;
+ value = 0;
+ xbpf_map_update_elem(parser_map, &key, &value, 0);
+}
+
+static void test_skb_redir_partial(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_stream_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_stream_parser);
+ int parser_map = bpf_map__fd(skel->maps.parser_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ goto detach;
+
+ redir_partial(family, sotype, sock_map, parser_map);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
+detach:
+ xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
+}
+
static void test_reuseport_select_listening(int family, int sotype,
int sock_map, int verd_map,
int reuseport_prog)
@@ -1346,7 +1152,6 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
int s1, s2, c, err;
unsigned int drop;
socklen_t len;
- u64 value;
u32 key;
zero_verdict_count(verd_map);
@@ -1360,16 +1165,10 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
if (s2 < 0)
goto close_srv1;
- key = 0;
- value = s1;
- err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ err = add_to_sockmap(sock_map, s1, s2);
if (err)
goto close_srv2;
- key = 1;
- value = s2;
- err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
-
/* Connect to s2, reuseport BPF selects s1 via sock_map[0] */
len = sizeof(addr);
err = xgetsockname(s2, sockaddr(&addr), &len);
@@ -1420,14 +1219,12 @@ close_srv1:
static void test_ops_cleanup(const struct bpf_map *map)
{
- const struct bpf_map_def *def;
int err, mapfd;
u32 key;
- def = bpf_map__def(map);
mapfd = bpf_map__fd(map);
- for (key = 0; key < def->max_entries; key++) {
+ for (key = 0; key < bpf_map__max_entries(map); key++) {
err = bpf_map_delete_elem(mapfd, &key);
if (err && errno != EINVAL && errno != ENOENT)
FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
@@ -1441,6 +1238,10 @@ static const char *family_str(sa_family_t family)
return "IPv4";
case AF_INET6:
return "IPv6";
+ case AF_UNIX:
+ return "Unix";
+ case AF_VSOCK:
+ return "VSOCK";
default:
return "unknown";
}
@@ -1448,13 +1249,13 @@ static const char *family_str(sa_family_t family)
static const char *map_type_str(const struct bpf_map *map)
{
- const struct bpf_map_def *def;
+ int type;
- def = bpf_map__def(map);
- if (IS_ERR(def))
+ if (!map)
return "invalid";
+ type = bpf_map__type(map);
- switch (def->type) {
+ switch (type) {
case BPF_MAP_TYPE_SOCKMAP:
return "sockmap";
case BPF_MAP_TYPE_SOCKHASH:
@@ -1480,7 +1281,8 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
int family, int sotype)
{
const struct op_test {
- void (*fn)(int family, int sotype, int mapfd);
+ void (*fn)(struct test_sockmap_listen *skel,
+ int family, int sotype, int mapfd);
const char *name;
int sotype;
} tests[] = {
@@ -1527,7 +1329,7 @@ static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
if (!test__start_subtest(s))
continue;
- t->fn(family, sotype, map_fd);
+ t->fn(skel, family, sotype, map_fd);
test_ops_cleanup(map);
}
}
@@ -1542,8 +1344,11 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
} tests[] = {
TEST(test_skb_redir_to_connected),
TEST(test_skb_redir_to_listening),
+ TEST(test_skb_redir_partial),
TEST(test_msg_redir_to_connected),
+ TEST(test_msg_redir_to_connected_with_link),
TEST(test_msg_redir_to_listening),
+ TEST(test_msg_redir_to_listening_with_link),
};
const char *family_name, *map_name;
const struct redir_test *t;
@@ -1613,7 +1418,7 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
test_reuseport(skel, map, family, SOCK_DGRAM);
}
-void test_sockmap_listen(void)
+void serial_test_sockmap_listen(void)
{
struct test_sockmap_listen *skel;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c
new file mode 100644
index 000000000000..9c461d93113d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for sockmap/sockhash redirection.
+ *
+ * BPF_MAP_TYPE_SOCKMAP
+ * BPF_MAP_TYPE_SOCKHASH
+ * x
+ * sk_msg-to-egress
+ * sk_msg-to-ingress
+ * sk_skb-to-egress
+ * sk_skb-to-ingress
+ * x
+ * AF_INET, SOCK_STREAM
+ * AF_INET6, SOCK_STREAM
+ * AF_INET, SOCK_DGRAM
+ * AF_INET6, SOCK_DGRAM
+ * AF_UNIX, SOCK_STREAM
+ * AF_UNIX, SOCK_DGRAM
+ * AF_VSOCK, SOCK_STREAM
+ * AF_VSOCK, SOCK_SEQPACKET
+ */
+
+#include <errno.h>
+#include <error.h>
+#include <sched.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <linux/string.h>
+#include <linux/vm_sockets.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "linux/const.h"
+#include "test_progs.h"
+#include "sockmap_helpers.h"
+#include "test_sockmap_redir.skel.h"
+
+/* The meaning of SUPPORTED is "will redirect packet as expected".
+ */
+#define SUPPORTED _BITUL(0)
+
+/* Note on sk_skb-to-ingress ->af_vsock:
+ *
+ * Peer socket may receive the packet some time after the return from sendmsg().
+ * In a typical usage scenario, recvmsg() will block until the redirected packet
+ * appears in the destination queue, or timeout if the packet was dropped. By
+ * that point, the verdict map has already been updated to reflect what has
+ * happened.
+ *
+ * But sk_skb-to-ingress/af_vsock is an unsupported combination, so no recvmsg()
+ * takes place. Which means we may race the execution of the verdict logic and
+ * read map_verd before it has been updated, i.e. we might observe
+ * map_verd[SK_DROP]=0 instead of map_verd[SK_DROP]=1.
+ *
+ * This confuses the selftest logic: if there was no packet dropped, where's the
+ * packet? So here's a heuristic: on map_verd[SK_DROP]=map_verd[SK_PASS]=0
+ * (which implies the verdict program has not been ran) just re-read the verdict
+ * map again.
+ */
+#define UNSUPPORTED_RACY_VERD _BITUL(1)
+
+enum prog_type {
+ SK_MSG_EGRESS,
+ SK_MSG_INGRESS,
+ SK_SKB_EGRESS,
+ SK_SKB_INGRESS,
+};
+
+enum {
+ SEND_INNER = 0,
+ SEND_OUTER,
+};
+
+enum {
+ RECV_INNER = 0,
+ RECV_OUTER,
+};
+
+struct maps {
+ int in;
+ int out;
+ int verd;
+};
+
+struct combo_spec {
+ enum prog_type prog_type;
+ const char *in, *out;
+};
+
+struct redir_spec {
+ const char *name;
+ int idx_send;
+ int idx_recv;
+ enum prog_type prog_type;
+};
+
+struct socket_spec {
+ int family;
+ int sotype;
+ int send_flags;
+ int in[2];
+ int out[2];
+};
+
+static int socket_spec_pairs(struct socket_spec *s)
+{
+ return create_socket_pairs(s->family, s->sotype,
+ &s->in[0], &s->out[0],
+ &s->in[1], &s->out[1]);
+}
+
+static void socket_spec_close(struct socket_spec *s)
+{
+ xclose(s->in[0]);
+ xclose(s->in[1]);
+ xclose(s->out[0]);
+ xclose(s->out[1]);
+}
+
+static void get_redir_params(struct redir_spec *redir,
+ struct test_sockmap_redir *skel, int *prog_fd,
+ enum bpf_attach_type *attach_type,
+ int *redirect_flags)
+{
+ enum prog_type type = redir->prog_type;
+ struct bpf_program *prog;
+ bool sk_msg;
+
+ sk_msg = type == SK_MSG_INGRESS || type == SK_MSG_EGRESS;
+ prog = sk_msg ? skel->progs.prog_msg_verdict : skel->progs.prog_skb_verdict;
+
+ *prog_fd = bpf_program__fd(prog);
+ *attach_type = sk_msg ? BPF_SK_MSG_VERDICT : BPF_SK_SKB_VERDICT;
+
+ if (type == SK_MSG_INGRESS || type == SK_SKB_INGRESS)
+ *redirect_flags = BPF_F_INGRESS;
+ else
+ *redirect_flags = 0;
+}
+
+static void try_recv(const char *prefix, int fd, int flags, bool expect_success)
+{
+ ssize_t n;
+ char buf;
+
+ errno = 0;
+ n = recv(fd, &buf, 1, flags);
+ if (n < 0 && expect_success)
+ FAIL_ERRNO("%s: unexpected failure: retval=%zd", prefix, n);
+ if (!n && !expect_success)
+ FAIL("%s: expected failure: retval=%zd", prefix, n);
+}
+
+static void handle_unsupported(int sd_send, int sd_peer, int sd_in, int sd_out,
+ int sd_recv, int map_verd, int status)
+{
+ unsigned int drop, pass;
+ char recv_buf;
+ ssize_t n;
+
+get_verdict:
+ if (xbpf_map_lookup_elem(map_verd, &u32(SK_DROP), &drop) ||
+ xbpf_map_lookup_elem(map_verd, &u32(SK_PASS), &pass))
+ return;
+
+ if (pass == 0 && drop == 0 && (status & UNSUPPORTED_RACY_VERD)) {
+ sched_yield();
+ goto get_verdict;
+ }
+
+ if (pass != 0) {
+ FAIL("unsupported: wanted verdict pass 0, have %u", pass);
+ return;
+ }
+
+ /* If nothing was dropped, packet should have reached the peer */
+ if (drop == 0) {
+ errno = 0;
+ n = recv_timeout(sd_peer, &recv_buf, 1, 0, IO_TIMEOUT_SEC);
+ if (n != 1)
+ FAIL_ERRNO("unsupported: packet missing, retval=%zd", n);
+ }
+
+ /* Ensure queues are empty */
+ try_recv("bpf.recv(sd_send)", sd_send, MSG_DONTWAIT, false);
+ if (sd_in != sd_send)
+ try_recv("bpf.recv(sd_in)", sd_in, MSG_DONTWAIT, false);
+
+ try_recv("bpf.recv(sd_out)", sd_out, MSG_DONTWAIT, false);
+ if (sd_recv != sd_out)
+ try_recv("bpf.recv(sd_recv)", sd_recv, MSG_DONTWAIT, false);
+}
+
+static void test_send_redir_recv(int sd_send, int send_flags, int sd_peer,
+ int sd_in, int sd_out, int sd_recv,
+ struct maps *maps, int status)
+{
+ unsigned int drop, pass;
+ char *send_buf = "ab";
+ char recv_buf = '\0';
+ ssize_t n, len = 1;
+
+ /* Zero out the verdict map */
+ if (xbpf_map_update_elem(maps->verd, &u32(SK_DROP), &u32(0), BPF_ANY) ||
+ xbpf_map_update_elem(maps->verd, &u32(SK_PASS), &u32(0), BPF_ANY))
+ return;
+
+ if (xbpf_map_update_elem(maps->in, &u32(0), &u64(sd_in), BPF_NOEXIST))
+ return;
+
+ if (xbpf_map_update_elem(maps->out, &u32(0), &u64(sd_out), BPF_NOEXIST))
+ goto del_in;
+
+ /* Last byte is OOB data when send_flags has MSG_OOB bit set */
+ if (send_flags & MSG_OOB)
+ len++;
+ n = send(sd_send, send_buf, len, send_flags);
+ if (n >= 0 && n < len)
+ FAIL("incomplete send");
+ if (n < 0) {
+ /* sk_msg redirect combo not supported? */
+ if (status & SUPPORTED || errno != EACCES)
+ FAIL_ERRNO("send");
+ goto out;
+ }
+
+ if (!(status & SUPPORTED)) {
+ handle_unsupported(sd_send, sd_peer, sd_in, sd_out, sd_recv,
+ maps->verd, status);
+ goto out;
+ }
+
+ errno = 0;
+ n = recv_timeout(sd_recv, &recv_buf, 1, 0, IO_TIMEOUT_SEC);
+ if (n != 1) {
+ FAIL_ERRNO("recv_timeout()");
+ goto out;
+ }
+
+ /* Check verdict _after_ recv(); af_vsock may need time to catch up */
+ if (xbpf_map_lookup_elem(maps->verd, &u32(SK_DROP), &drop) ||
+ xbpf_map_lookup_elem(maps->verd, &u32(SK_PASS), &pass))
+ goto out;
+
+ if (drop != 0 || pass != 1)
+ FAIL("unexpected verdict drop/pass: wanted 0/1, have %u/%u",
+ drop, pass);
+
+ if (recv_buf != send_buf[0])
+ FAIL("recv(): payload check, %02x != %02x", recv_buf, send_buf[0]);
+
+ if (send_flags & MSG_OOB) {
+ /* Fail reading OOB while in sockmap */
+ try_recv("bpf.recv(sd_out, MSG_OOB)", sd_out,
+ MSG_OOB | MSG_DONTWAIT, false);
+
+ /* Remove sd_out from sockmap */
+ xbpf_map_delete_elem(maps->out, &u32(0));
+
+ /* Check that OOB was dropped on redirect */
+ try_recv("recv(sd_out, MSG_OOB)", sd_out,
+ MSG_OOB | MSG_DONTWAIT, false);
+
+ goto del_in;
+ }
+out:
+ xbpf_map_delete_elem(maps->out, &u32(0));
+del_in:
+ xbpf_map_delete_elem(maps->in, &u32(0));
+}
+
+static int is_redir_supported(enum prog_type type, const char *in,
+ const char *out)
+{
+ /* Matching based on strings returned by socket_kind_to_str():
+ * tcp4, udp4, tcp6, udp6, u_str, u_dgr, v_str, v_seq
+ * Plus a wildcard: any
+ * Not in use: u_seq, v_dgr
+ */
+ struct combo_spec *c, combos[] = {
+ /* Send to local: TCP -> any, but vsock */
+ { SK_MSG_INGRESS, "tcp", "tcp" },
+ { SK_MSG_INGRESS, "tcp", "udp" },
+ { SK_MSG_INGRESS, "tcp", "u_str" },
+ { SK_MSG_INGRESS, "tcp", "u_dgr" },
+
+ /* Send to egress: TCP -> TCP */
+ { SK_MSG_EGRESS, "tcp", "tcp" },
+
+ /* Ingress to egress: any -> any */
+ { SK_SKB_EGRESS, "any", "any" },
+
+ /* Ingress to local: any -> any, but vsock */
+ { SK_SKB_INGRESS, "any", "tcp" },
+ { SK_SKB_INGRESS, "any", "udp" },
+ { SK_SKB_INGRESS, "any", "u_str" },
+ { SK_SKB_INGRESS, "any", "u_dgr" },
+ };
+
+ for (c = combos; c < combos + ARRAY_SIZE(combos); c++) {
+ if (c->prog_type == type &&
+ (!strcmp(c->in, "any") || strstarts(in, c->in)) &&
+ (!strcmp(c->out, "any") || strstarts(out, c->out)))
+ return SUPPORTED;
+ }
+
+ return 0;
+}
+
+static int get_support_status(enum prog_type type, const char *in,
+ const char *out)
+{
+ int status = is_redir_supported(type, in, out);
+
+ if (type == SK_SKB_INGRESS && strstarts(out, "v_"))
+ status |= UNSUPPORTED_RACY_VERD;
+
+ return status;
+}
+
+static void test_socket(enum bpf_map_type type, struct redir_spec *redir,
+ struct maps *maps, struct socket_spec *s_in,
+ struct socket_spec *s_out)
+{
+ int fd_in, fd_out, fd_send, fd_peer, fd_recv, flags, status;
+ const char *in_str, *out_str;
+ char s[MAX_TEST_NAME];
+
+ fd_in = s_in->in[0];
+ fd_out = s_out->out[0];
+ fd_send = s_in->in[redir->idx_send];
+ fd_peer = s_in->in[redir->idx_send ^ 1];
+ fd_recv = s_out->out[redir->idx_recv];
+ flags = s_in->send_flags;
+
+ in_str = socket_kind_to_str(fd_in);
+ out_str = socket_kind_to_str(fd_out);
+ status = get_support_status(redir->prog_type, in_str, out_str);
+
+ snprintf(s, sizeof(s),
+ "%-4s %-17s %-5s %s %-5s%6s",
+ /* hash sk_skb-to-ingress u_str → v_str (OOB) */
+ type == BPF_MAP_TYPE_SOCKMAP ? "map" : "hash",
+ redir->name,
+ in_str,
+ status & SUPPORTED ? "→" : " ",
+ out_str,
+ (flags & MSG_OOB) ? "(OOB)" : "");
+
+ if (!test__start_subtest(s))
+ return;
+
+ test_send_redir_recv(fd_send, flags, fd_peer, fd_in, fd_out, fd_recv,
+ maps, status);
+}
+
+static void test_redir(enum bpf_map_type type, struct redir_spec *redir,
+ struct maps *maps)
+{
+ struct socket_spec *s, sockets[] = {
+ { AF_INET, SOCK_STREAM },
+ // { AF_INET, SOCK_STREAM, MSG_OOB }, /* Known to be broken */
+ { AF_INET6, SOCK_STREAM },
+ { AF_INET, SOCK_DGRAM },
+ { AF_INET6, SOCK_DGRAM },
+ { AF_UNIX, SOCK_STREAM },
+ { AF_UNIX, SOCK_STREAM, MSG_OOB },
+ { AF_UNIX, SOCK_DGRAM },
+ // { AF_UNIX, SOCK_SEQPACKET}, /* Unsupported BPF_MAP_UPDATE_ELEM */
+ { AF_VSOCK, SOCK_STREAM },
+ // { AF_VSOCK, SOCK_DGRAM }, /* Unsupported socket() */
+ { AF_VSOCK, SOCK_SEQPACKET },
+ };
+
+ for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++)
+ if (socket_spec_pairs(s))
+ goto out;
+
+ /* Intra-proto */
+ for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++)
+ test_socket(type, redir, maps, s, s);
+
+ /* Cross-proto */
+ for (int i = 0; i < ARRAY_SIZE(sockets); i++) {
+ for (int j = 0; j < ARRAY_SIZE(sockets); j++) {
+ struct socket_spec *out = &sockets[j];
+ struct socket_spec *in = &sockets[i];
+
+ /* Skip intra-proto and between variants */
+ if (out->send_flags ||
+ (in->family == out->family &&
+ in->sotype == out->sotype))
+ continue;
+
+ test_socket(type, redir, maps, in, out);
+ }
+ }
+out:
+ while (--s >= sockets)
+ socket_spec_close(s);
+}
+
+static void test_map(enum bpf_map_type type)
+{
+ struct redir_spec *r, redirs[] = {
+ { "sk_msg-to-ingress", SEND_INNER, RECV_INNER, SK_MSG_INGRESS },
+ { "sk_msg-to-egress", SEND_INNER, RECV_OUTER, SK_MSG_EGRESS },
+ { "sk_skb-to-egress", SEND_OUTER, RECV_OUTER, SK_SKB_EGRESS },
+ { "sk_skb-to-ingress", SEND_OUTER, RECV_INNER, SK_SKB_INGRESS },
+ };
+
+ for (r = redirs; r < redirs + ARRAY_SIZE(redirs); r++) {
+ enum bpf_attach_type attach_type;
+ struct test_sockmap_redir *skel;
+ struct maps maps;
+ int prog_fd;
+
+ skel = test_sockmap_redir__open_and_load();
+ if (!skel) {
+ FAIL("open_and_load");
+ return;
+ }
+
+ switch (type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ maps.in = bpf_map__fd(skel->maps.nop_map);
+ maps.out = bpf_map__fd(skel->maps.sock_map);
+ break;
+ case BPF_MAP_TYPE_SOCKHASH:
+ maps.in = bpf_map__fd(skel->maps.nop_hash);
+ maps.out = bpf_map__fd(skel->maps.sock_hash);
+ break;
+ default:
+ FAIL("Unsupported bpf_map_type");
+ return;
+ }
+
+ skel->bss->redirect_type = type;
+ maps.verd = bpf_map__fd(skel->maps.verdict_map);
+ get_redir_params(r, skel, &prog_fd, &attach_type,
+ &skel->bss->redirect_flags);
+
+ if (xbpf_prog_attach(prog_fd, maps.in, attach_type, 0))
+ return;
+
+ test_redir(type, r, &maps);
+
+ if (xbpf_prog_detach2(prog_fd, maps.in, attach_type))
+ return;
+
+ test_sockmap_redir__destroy(skel);
+ }
+}
+
+void serial_test_sockmap_redir(void)
+{
+ test_map(BPF_MAP_TYPE_SOCKMAP);
+ test_map(BPF_MAP_TYPE_SOCKHASH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
new file mode 100644
index 000000000000..621b3b71888e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <error.h>
+#include <netinet/tcp.h>
+#include <test_progs.h>
+#include "sockmap_helpers.h"
+#include "test_skmsg_load_helpers.skel.h"
+#include "test_sockmap_strp.skel.h"
+
+#define STRP_PKT_HEAD_LEN 4
+#define STRP_PKT_BODY_LEN 6
+#define STRP_PKT_FULL_LEN (STRP_PKT_HEAD_LEN + STRP_PKT_BODY_LEN)
+
+static const char packet[STRP_PKT_FULL_LEN] = "head+body\0";
+static const int test_packet_num = 100;
+
+/* Current implementation of tcp_bpf_recvmsg_parser() invokes data_ready
+ * with sk held if an skb exists in sk_receive_queue. Then for the
+ * data_ready implementation of strparser, it will delay the read
+ * operation if sk is held and EAGAIN is returned.
+ */
+static int sockmap_strp_consume_pre_data(int p)
+{
+ int recvd;
+ bool retried = false;
+ char rcv[10];
+
+retry:
+ errno = 0;
+ recvd = recv_timeout(p, rcv, sizeof(rcv), 0, 1);
+ if (recvd < 0 && errno == EAGAIN && retried == false) {
+ /* On the first call, EAGAIN will certainly be returned.
+ * A 1-second wait is enough for the workqueue to finish.
+ */
+ sleep(1);
+ retried = true;
+ goto retry;
+ }
+
+ if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv error or truncated data") ||
+ !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN),
+ "data mismatch"))
+ return -1;
+ return 0;
+}
+
+static struct test_sockmap_strp *sockmap_strp_init(int *out_map, bool pass,
+ bool need_parser)
+{
+ struct test_sockmap_strp *strp = NULL;
+ int verdict, parser;
+ int err;
+
+ strp = test_sockmap_strp__open_and_load();
+ *out_map = bpf_map__fd(strp->maps.sock_map);
+
+ if (need_parser)
+ parser = bpf_program__fd(strp->progs.prog_skb_parser_partial);
+ else
+ parser = bpf_program__fd(strp->progs.prog_skb_parser);
+
+ if (pass)
+ verdict = bpf_program__fd(strp->progs.prog_skb_verdict_pass);
+ else
+ verdict = bpf_program__fd(strp->progs.prog_skb_verdict);
+
+ err = bpf_prog_attach(parser, *out_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach stream parser"))
+ goto err;
+
+ err = bpf_prog_attach(verdict, *out_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
+ goto err;
+
+ return strp;
+err:
+ test_sockmap_strp__destroy(strp);
+ return NULL;
+}
+
+/* Dispatch packets to different socket by packet size:
+ *
+ * ------ ------
+ * | pkt4 || pkt1 |... > remote socket
+ * ------ ------ / ------ ------
+ * | pkt8 | pkt7 |...
+ * ------ ------ \ ------ ------
+ * | pkt3 || pkt2 |... > local socket
+ * ------ ------
+ */
+static void test_sockmap_strp_dispatch_pkt(int family, int sotype)
+{
+ int i, j, zero = 0, one = 1, recvd;
+ int err, map;
+ int c0 = -1, p0 = -1, c1 = -1, p1 = -1;
+ struct test_sockmap_strp *strp = NULL;
+ int test_cnt = 6;
+ char rcv[10];
+ struct {
+ char data[7];
+ int data_len;
+ int send_cnt;
+ int *receiver;
+ } send_dir[2] = {
+ /* data expected to deliver to local */
+ {"llllll", 6, 0, &p0},
+ /* data expected to deliver to remote */
+ {"rrrrr", 5, 0, &c1}
+ };
+
+ strp = sockmap_strp_init(&map, false, false);
+ if (!ASSERT_TRUE(strp, "sockmap_strp_init"))
+ return;
+
+ err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1);
+ if (!ASSERT_OK(err, "create_socket_pairs()"))
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p0)"))
+ goto out_close;
+
+ err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p1)"))
+ goto out_close;
+
+ err = setsockopt(c1, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
+ if (!ASSERT_OK(err, "setsockopt(TCP_NODELAY)"))
+ goto out_close;
+
+ /* deliver data with data size greater than 5 to local */
+ strp->data->verdict_max_size = 5;
+
+ for (i = 0; i < test_cnt; i++) {
+ int d = i % 2;
+
+ xsend(c0, send_dir[d].data, send_dir[d].data_len, 0);
+ send_dir[d].send_cnt++;
+ }
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < send_dir[i].send_cnt; j++) {
+ int expected = send_dir[i].data_len;
+
+ recvd = recv_timeout(*send_dir[i].receiver, rcv,
+ expected, MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, expected, "recv_timeout()"))
+ goto out_close;
+ if (!ASSERT_OK(memcmp(send_dir[i].data, rcv, recvd),
+ "data mismatch"))
+ goto out_close;
+ }
+ }
+out_close:
+ close(c0);
+ close(c1);
+ close(p0);
+ close(p1);
+out:
+ test_sockmap_strp__destroy(strp);
+}
+
+/* We have multiple packets in one skb
+ * ------------ ------------ ------------
+ * | packet1 | packet2 | ...
+ * ------------ ------------ ------------
+ */
+static void test_sockmap_strp_multiple_pkt(int family, int sotype)
+{
+ int i, zero = 0;
+ int sent, recvd, total;
+ int err, map;
+ int c = -1, p = -1;
+ struct test_sockmap_strp *strp = NULL;
+ char *snd = NULL, *rcv = NULL;
+
+ strp = sockmap_strp_init(&map, true, true);
+ if (!ASSERT_TRUE(strp, "sockmap_strp_init"))
+ return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (err)
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)"))
+ goto out_close;
+
+ /* construct multiple packets in one buffer */
+ total = test_packet_num * STRP_PKT_FULL_LEN;
+ snd = malloc(total);
+ rcv = malloc(total + 1);
+ if (!ASSERT_TRUE(snd, "malloc(snd)") ||
+ !ASSERT_TRUE(rcv, "malloc(rcv)"))
+ goto out_close;
+
+ for (i = 0; i < test_packet_num; i++) {
+ memcpy(snd + i * STRP_PKT_FULL_LEN,
+ packet, STRP_PKT_FULL_LEN);
+ }
+
+ sent = xsend(c, snd, total, 0);
+ if (!ASSERT_EQ(sent, total, "xsend(c)"))
+ goto out_close;
+
+ /* try to recv one more byte to avoid truncation check */
+ recvd = recv_timeout(p, rcv, total + 1, MSG_DONTWAIT, IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, total, "recv(rcv)"))
+ goto out_close;
+
+ /* we sent TCP segment with multiple encapsulation
+ * then check whether packets are handled correctly
+ */
+ if (!ASSERT_OK(memcmp(snd, rcv, total), "data mismatch"))
+ goto out_close;
+
+out_close:
+ close(c);
+ close(p);
+ if (snd)
+ free(snd);
+ if (rcv)
+ free(rcv);
+out:
+ test_sockmap_strp__destroy(strp);
+}
+
+/* Test strparser with partial read */
+static void test_sockmap_strp_partial_read(int family, int sotype)
+{
+ int zero = 0, recvd, off;
+ int err, map;
+ int c = -1, p = -1;
+ struct test_sockmap_strp *strp = NULL;
+ char rcv[STRP_PKT_FULL_LEN + 1] = "0";
+
+ strp = sockmap_strp_init(&map, true, true);
+ if (!ASSERT_TRUE(strp, "sockmap_strp_init"))
+ return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (err)
+ goto out;
+
+ /* sk_data_ready of 'p' will be replaced by strparser handler */
+ err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)"))
+ goto out_close;
+
+ /* 1.1 send partial head, 1 byte header left */
+ off = STRP_PKT_HEAD_LEN - 1;
+ xsend(c, packet, off, 0);
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(-1, recvd, "partial head sent, expected no data"))
+ goto out_close;
+
+ /* 1.2 send remaining head and body */
+ xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0);
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data"))
+ goto out_close;
+
+ /* 2.1 send partial head, 1 byte header left */
+ off = STRP_PKT_HEAD_LEN - 1;
+ xsend(c, packet, off, 0);
+
+ /* 2.2 send remaining head and partial body, 1 byte body left */
+ xsend(c, packet + off, STRP_PKT_FULL_LEN - off - 1, 0);
+ off = STRP_PKT_FULL_LEN - 1;
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(-1, recvd, "partial body sent, expected no data"))
+ goto out_close;
+
+ /* 2.3 send remaining body */
+ xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0);
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data"))
+ goto out_close;
+
+out_close:
+ close(c);
+ close(p);
+
+out:
+ test_sockmap_strp__destroy(strp);
+}
+
+/* Test simple socket read/write with strparser + FIONREAD */
+static void test_sockmap_strp_pass(int family, int sotype, bool fionread)
+{
+ int zero = 0, pkt_size = STRP_PKT_FULL_LEN, sent, recvd, avail;
+ int err, map;
+ int c = -1, p = -1;
+ int test_cnt = 10, i;
+ struct test_sockmap_strp *strp = NULL;
+ char rcv[STRP_PKT_FULL_LEN + 1] = "0";
+
+ strp = sockmap_strp_init(&map, true, true);
+ if (!ASSERT_TRUE(strp, "sockmap_strp_init"))
+ return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (err)
+ goto out;
+
+ /* inject some data before bpf process, it should be read
+ * correctly because we check sk_receive_queue in
+ * tcp_bpf_recvmsg_parser().
+ */
+ sent = xsend(c, packet, pkt_size, 0);
+ if (!ASSERT_EQ(sent, pkt_size, "xsend(pre-data)"))
+ goto out_close;
+
+ /* sk_data_ready of 'p' will be replaced by strparser handler */
+ err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p)"))
+ goto out_close;
+
+ /* consume previous data we injected */
+ if (sockmap_strp_consume_pre_data(p))
+ goto out_close;
+
+ /* Previously, we encountered issues such as deadlocks and
+ * sequence errors that resulted in the inability to read
+ * continuously. Therefore, we perform multiple iterations
+ * of testing here.
+ */
+ for (i = 0; i < test_cnt; i++) {
+ sent = xsend(c, packet, pkt_size, 0);
+ if (!ASSERT_EQ(sent, pkt_size, "xsend(c)"))
+ goto out_close;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, pkt_size, "recv_timeout(p)") ||
+ !ASSERT_OK(memcmp(packet, rcv, pkt_size),
+ "memcmp, data mismatch"))
+ goto out_close;
+ }
+
+ if (fionread) {
+ sent = xsend(c, packet, pkt_size, 0);
+ if (!ASSERT_EQ(sent, pkt_size, "second xsend(c)"))
+ goto out_close;
+
+ err = ioctl(p, FIONREAD, &avail);
+ if (!ASSERT_OK(err, "ioctl(FIONREAD) error") ||
+ !ASSERT_EQ(avail, pkt_size, "ioctl(FIONREAD)"))
+ goto out_close;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, pkt_size, "second recv_timeout(p)") ||
+ !ASSERT_OK(memcmp(packet, rcv, pkt_size),
+ "second memcmp, data mismatch"))
+ goto out_close;
+ }
+
+out_close:
+ close(c);
+ close(p);
+
+out:
+ test_sockmap_strp__destroy(strp);
+}
+
+/* Test strparser with verdict mode */
+static void test_sockmap_strp_verdict(int family, int sotype)
+{
+ int zero = 0, one = 1, sent, recvd, off;
+ int err, map;
+ int c0 = -1, p0 = -1, c1 = -1, p1 = -1;
+ struct test_sockmap_strp *strp = NULL;
+ char rcv[STRP_PKT_FULL_LEN + 1] = "0";
+
+ strp = sockmap_strp_init(&map, false, true);
+ if (!ASSERT_TRUE(strp, "sockmap_strp_init"))
+ return;
+
+ /* We simulate a reverse proxy server.
+ * When p0 receives data from c0, we forward it to c1.
+ * From c1's perspective, it will consider this data
+ * as being sent by p1.
+ */
+ err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1);
+ if (!ASSERT_OK(err, "create_socket_pairs()"))
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p0)"))
+ goto out_close;
+
+ err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(p1)"))
+ goto out_close;
+
+ sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0);
+ if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "xsend(c0)"))
+ goto out_close;
+
+ recvd = recv_timeout(c1, rcv, sizeof(rcv), MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+ if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv_timeout(c1)") ||
+ !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN),
+ "received data does not match the sent data"))
+ goto out_close;
+
+ /* send again to ensure the stream is functioning correctly. */
+ sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0);
+ if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "second xsend(c0)"))
+ goto out_close;
+
+ /* partial read */
+ off = STRP_PKT_FULL_LEN / 2;
+ recvd = recv_timeout(c1, rcv, off, MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+ recvd += recv_timeout(c1, rcv + off, sizeof(rcv) - off, MSG_DONTWAIT,
+ IO_TIMEOUT_SEC);
+
+ if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "partial recv_timeout(c1)") ||
+ !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN),
+ "partial received data does not match the sent data"))
+ goto out_close;
+
+out_close:
+ close(c0);
+ close(c1);
+ close(p0);
+ close(p1);
+out:
+ test_sockmap_strp__destroy(strp);
+}
+
+void test_sockmap_strp(void)
+{
+ if (test__start_subtest("sockmap strp tcp pass"))
+ test_sockmap_strp_pass(AF_INET, SOCK_STREAM, false);
+ if (test__start_subtest("sockmap strp tcp v6 pass"))
+ test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, false);
+ if (test__start_subtest("sockmap strp tcp pass fionread"))
+ test_sockmap_strp_pass(AF_INET, SOCK_STREAM, true);
+ if (test__start_subtest("sockmap strp tcp v6 pass fionread"))
+ test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, true);
+ if (test__start_subtest("sockmap strp tcp verdict"))
+ test_sockmap_strp_verdict(AF_INET, SOCK_STREAM);
+ if (test__start_subtest("sockmap strp tcp v6 verdict"))
+ test_sockmap_strp_verdict(AF_INET6, SOCK_STREAM);
+ if (test__start_subtest("sockmap strp tcp partial read"))
+ test_sockmap_strp_partial_read(AF_INET, SOCK_STREAM);
+ if (test__start_subtest("sockmap strp tcp multiple packets"))
+ test_sockmap_strp_multiple_pkt(AF_INET, SOCK_STREAM);
+ if (test__start_subtest("sockmap strp tcp dispatch"))
+ test_sockmap_strp_dispatch_pkt(AF_INET, SOCK_STREAM);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c
index 3e8517a8395a..eaac83a7f388 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c
@@ -1,14 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <io_uring/mini_liburing.h>
#include "cgroup_helpers.h"
static char bpf_log_buf[4096];
static bool verbose;
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
enum sockopt_test_error {
OK = 0,
DENY_LOAD,
DENY_ATTACH,
+ EOPNOTSUPP_GETSOCKOPT,
EPERM_GETSOCKOPT,
EFAULT_GETSOCKOPT,
EPERM_SETSOCKOPT,
@@ -18,6 +24,7 @@ enum sockopt_test_error {
static struct sockopt_test {
const char *descr;
const struct bpf_insn insns[64];
+ enum bpf_prog_type prog_type;
enum bpf_attach_type attach_type;
enum bpf_attach_type expected_attach_type;
@@ -33,6 +40,7 @@ static struct sockopt_test {
socklen_t get_optlen_ret;
enum sockopt_test_error error;
+ bool io_uring_support;
} tests[] = {
/* ==================== getsockopt ==================== */
@@ -246,7 +254,9 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ .get_level = SOL_SOCKET,
.get_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: deny bigger ctx->optlen",
@@ -271,12 +281,34 @@ static struct sockopt_test {
.get_optlen = 64,
.error = EFAULT_GETSOCKOPT,
+ .io_uring_support = true,
},
{
- .descr = "getsockopt: deny arbitrary ctx->retval",
+ .descr = "getsockopt: ignore >PAGE_SIZE optlen",
.insns = {
- /* ctx->retval = 123 */
- BPF_MOV64_IMM(BPF_REG_0, 123),
+ /* write 0xFF to the first optval byte */
+
+ /* r6 = ctx->optval */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval)),
+ /* r2 = ctx->optval */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ /* r6 = ctx->optval + 1 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+
+ /* r7 = ctx->optval_end */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval_end)),
+
+ /* if (ctx->optval + 1 <= ctx->optval_end) { */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
+ /* ctx->optval[0] = 0xF0 */
+ BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF),
+ /* } */
+
+ /* retval changes are ignored */
+ /* ctx->retval = 5 */
+ BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct bpf_sockopt, retval)),
@@ -287,9 +319,12 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
- .get_optlen = 64,
-
- .error = EFAULT_GETSOCKOPT,
+ .get_level = 1234,
+ .get_optname = 5678,
+ .get_optval = {}, /* the changes are ignored */
+ .get_optlen = PAGE_SIZE + 1,
+ .error = EOPNOTSUPP_GETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: support smaller ctx->optlen",
@@ -309,8 +344,10 @@ static struct sockopt_test {
.attach_type = BPF_CGROUP_GETSOCKOPT,
.expected_attach_type = BPF_CGROUP_GETSOCKOPT,
+ .get_level = SOL_SOCKET,
.get_optlen = 64,
.get_optlen_ret = 32,
+ .io_uring_support = true,
},
{
.descr = "getsockopt: deny writing to ctx->optval",
@@ -490,6 +527,7 @@ static struct sockopt_test {
.set_level = 123,
.set_optlen = 1,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: allow changing ctx->level",
@@ -544,6 +582,7 @@ static struct sockopt_test {
.set_optname = 123,
.set_optlen = 1,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: allow changing ctx->optname",
@@ -596,6 +635,7 @@ static struct sockopt_test {
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: ctx->optlen == -1 is ok",
@@ -612,6 +652,7 @@ static struct sockopt_test {
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.set_optlen = 64,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: deny ctx->optlen < 0 (except -1)",
@@ -630,6 +671,7 @@ static struct sockopt_test {
.set_optlen = 4,
.error = EFAULT_SETSOCKOPT,
+ .io_uring_support = true,
},
{
.descr = "setsockopt: deny ctx->optlen > input optlen",
@@ -647,6 +689,46 @@ static struct sockopt_test {
.set_optlen = 64,
.error = EFAULT_SETSOCKOPT,
+ .io_uring_support = true,
+ },
+ {
+ .descr = "setsockopt: ignore >PAGE_SIZE optlen",
+ .insns = {
+ /* write 0xFF to the first optval byte */
+
+ /* r6 = ctx->optval */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval)),
+ /* r2 = ctx->optval */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+ /* r6 = ctx->optval + 1 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+
+ /* r7 = ctx->optval_end */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sockopt, optval_end)),
+
+ /* if (ctx->optval + 1 <= ctx->optval_end) { */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
+ /* ctx->optval[0] = 0xF0 */
+ BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0),
+ /* } */
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SETSOCKOPT,
+ .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
+
+ .set_level = SOL_IP,
+ .set_optname = IP_TOS,
+ .set_optval = {},
+ .set_optlen = PAGE_SIZE + 1,
+
+ .get_level = SOL_IP,
+ .get_optname = IP_TOS,
+ .get_optval = {}, /* the changes are ignored */
+ .get_optlen = 4,
},
{
.descr = "setsockopt: allow changing ctx->optlen within bounds",
@@ -847,40 +929,157 @@ static struct sockopt_test {
.error = EPERM_SETSOCKOPT,
},
+
+ /* ==================== prog_type ==================== */
+
+ {
+ .descr = "can attach only BPF_CGROUP_SETSOCKOP",
+ .insns = {
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .attach_type = BPF_CGROUP_SETSOCKOPT,
+ .expected_attach_type = 0,
+ .error = DENY_ATTACH,
+ },
+
+ {
+ .descr = "can attach only BPF_CGROUP_GETSOCKOP",
+ .insns = {
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .attach_type = BPF_CGROUP_GETSOCKOPT,
+ .expected_attach_type = 0,
+ .error = DENY_ATTACH,
+ },
};
static int load_prog(const struct bpf_insn *insns,
+ enum bpf_prog_type prog_type,
enum bpf_attach_type expected_attach_type)
{
- struct bpf_load_program_attr attr = {
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
.expected_attach_type = expected_attach_type,
- .insns = insns,
- .license = "GPL",
.log_level = 2,
- };
- int fd;
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
for (;
- insns[attr.insns_cnt].code != (BPF_JMP | BPF_EXIT);
- attr.insns_cnt++) {
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
}
- attr.insns_cnt++;
+ insns_cnt++;
- fd = bpf_load_program_xattr(&attr, bpf_log_buf, sizeof(bpf_log_buf));
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
if (verbose && fd < 0)
fprintf(stderr, "%s\n", bpf_log_buf);
return fd;
}
-static int run_test(int cgroup_fd, struct sockopt_test *test)
+/* Core function that handles io_uring ring initialization,
+ * sending SQE with sockopt command and waiting for the CQE.
+ */
+static int uring_sockopt(int op, int fd, int level, int optname,
+ const void *optval, socklen_t optlen)
+{
+ struct io_uring_cqe *cqe;
+ struct io_uring_sqe *sqe;
+ struct io_uring ring;
+ int err;
+
+ err = io_uring_queue_init(1, &ring, 0);
+ if (!ASSERT_OK(err, "io_uring initialization"))
+ return err;
+
+ sqe = io_uring_get_sqe(&ring);
+ if (!ASSERT_NEQ(sqe, NULL, "Get an SQE")) {
+ err = -1;
+ goto fail;
+ }
+
+ io_uring_prep_cmd(sqe, op, fd, level, optname, optval, optlen);
+
+ err = io_uring_submit(&ring);
+ if (!ASSERT_EQ(err, 1, "Submit SQE"))
+ goto fail;
+
+ err = io_uring_wait_cqe(&ring, &cqe);
+ if (!ASSERT_OK(err, "Wait for CQE"))
+ goto fail;
+
+ err = cqe->res;
+
+fail:
+ io_uring_queue_exit(&ring);
+
+ return err;
+}
+
+static int uring_setsockopt(int fd, int level, int optname, const void *optval,
+ socklen_t optlen)
+{
+ return uring_sockopt(SOCKET_URING_OP_SETSOCKOPT, fd, level, optname,
+ optval, optlen);
+}
+
+static int uring_getsockopt(int fd, int level, int optname, void *optval,
+ socklen_t *optlen)
+{
+ int ret = uring_sockopt(SOCKET_URING_OP_GETSOCKOPT, fd, level, optname,
+ optval, *optlen);
+ if (ret < 0)
+ return ret;
+
+ /* Populate optlen back to be compatible with systemcall interface,
+ * and simplify the test.
+ */
+ *optlen = ret;
+
+ return 0;
+}
+
+/* Execute the setsocktopt operation */
+static int call_setsockopt(bool use_io_uring, int fd, int level, int optname,
+ const void *optval, socklen_t optlen)
+{
+ if (use_io_uring)
+ return uring_setsockopt(fd, level, optname, optval, optlen);
+
+ return setsockopt(fd, level, optname, optval, optlen);
+}
+
+/* Execute the getsocktopt operation */
+static int call_getsockopt(bool use_io_uring, int fd, int level, int optname,
+ void *optval, socklen_t *optlen)
{
- int sock_fd, err, prog_fd;
+ if (use_io_uring)
+ return uring_getsockopt(fd, level, optname, optval, optlen);
+
+ return getsockopt(fd, level, optname, optval, optlen);
+}
+
+static int run_test(int cgroup_fd, struct sockopt_test *test, bool use_io_uring,
+ bool use_link)
+{
+ int prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT;
+ int sock_fd, err, prog_fd, link_fd = -1;
void *optval = NULL;
int ret = 0;
- prog_fd = load_prog(test->insns, test->expected_attach_type);
+ if (test->prog_type)
+ prog_type = test->prog_type;
+
+ prog_fd = load_prog(test->insns, prog_type, test->expected_attach_type);
if (prog_fd < 0) {
if (test->error == DENY_LOAD)
return 0;
@@ -889,7 +1088,12 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
return -1;
}
- err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ if (use_link) {
+ err = bpf_link_create(prog_fd, cgroup_fd, test->attach_type, NULL);
+ link_fd = err;
+ } else {
+ err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0);
+ }
if (err < 0) {
if (test->error == DENY_ATTACH)
goto close_prog_fd;
@@ -907,8 +1111,16 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
}
if (test->set_optlen) {
- err = setsockopt(sock_fd, test->set_level, test->set_optname,
- test->set_optval, test->set_optlen);
+ if (test->set_optlen >= PAGE_SIZE) {
+ int num_pages = test->set_optlen / PAGE_SIZE;
+ int remainder = test->set_optlen % PAGE_SIZE;
+
+ test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
+ }
+
+ err = call_setsockopt(use_io_uring, sock_fd, test->set_level,
+ test->set_optname, test->set_optval,
+ test->set_optlen);
if (err) {
if (errno == EPERM && test->error == EPERM_SETSOCKOPT)
goto close_sock_fd;
@@ -922,14 +1134,24 @@ static int run_test(int cgroup_fd, struct sockopt_test *test)
}
if (test->get_optlen) {
+ if (test->get_optlen >= PAGE_SIZE) {
+ int num_pages = test->get_optlen / PAGE_SIZE;
+ int remainder = test->get_optlen % PAGE_SIZE;
+
+ test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder;
+ }
+
optval = malloc(test->get_optlen);
+ memset(optval, 0, test->get_optlen);
socklen_t optlen = test->get_optlen;
socklen_t expected_get_optlen = test->get_optlen_ret ?:
test->get_optlen;
- err = getsockopt(sock_fd, test->get_level, test->get_optname,
- optval, &optlen);
+ err = call_getsockopt(use_io_uring, sock_fd, test->get_level,
+ test->get_optname, optval, &optlen);
if (err) {
+ if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT)
+ goto free_optval;
if (errno == EPERM && test->error == EPERM_GETSOCKOPT)
goto free_optval;
if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT)
@@ -962,7 +1184,12 @@ free_optval:
close_sock_fd:
close(sock_fd);
detach_prog:
- bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type);
+ if (use_link) {
+ if (link_fd >= 0)
+ close(link_fd);
+ } else {
+ bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type);
+ }
close_prog_fd:
close(prog_fd);
return ret;
@@ -973,12 +1200,20 @@ void test_sockopt(void)
int cgroup_fd, i;
cgroup_fd = test__join_cgroup("/sockopt");
- if (CHECK_FAIL(cgroup_fd < 0))
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- test__start_subtest(tests[i].descr);
- CHECK_FAIL(run_test(cgroup_fd, &tests[i]));
+ if (!test__start_subtest(tests[i].descr))
+ continue;
+
+ ASSERT_OK(run_test(cgroup_fd, &tests[i], false, false),
+ tests[i].descr);
+ ASSERT_OK(run_test(cgroup_fd, &tests[i], false, true),
+ tests[i].descr);
+ if (tests[i].io_uring_support)
+ ASSERT_OK(run_test(cgroup_fd, &tests[i], true, false),
+ tests[i].descr);
}
close(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
index ec281b0363b8..7cd8be2780ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
@@ -1,41 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+#include "sockopt_inherit.skel.h"
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
-static int connect_to_server(int server_fd)
-{
- struct sockaddr_storage addr;
- socklen_t len = sizeof(addr);
- int fd;
-
- fd = socket(AF_INET, SOCK_STREAM, 0);
- if (fd < 0) {
- log_err("Failed to create client socket");
- return -1;
- }
-
- if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
- log_err("Failed to get server addr");
- goto out;
- }
-
- if (connect(fd, (const struct sockaddr *)&addr, len) < 0) {
- log_err("Fail to connect to server");
- goto out;
- }
-
- return fd;
-
-out:
- close(fd);
- return -1;
-}
-
static int verify_sockopt(int fd, int optname, const char *msg, char expected)
{
socklen_t optlen = 1;
@@ -76,20 +50,16 @@ static void *server_thread(void *arg)
pthread_cond_signal(&server_started);
pthread_mutex_unlock(&server_started_mtx);
- if (CHECK_FAIL(err < 0)) {
- perror("Failed to listed on socket");
+ if (!ASSERT_GE(err, 0, "listed on socket"))
return NULL;
- }
err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1);
err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1);
err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1);
client_fd = accept(fd, (struct sockaddr *)&addr, &len);
- if (CHECK_FAIL(client_fd < 0)) {
- perror("Failed to accept client");
+ if (!ASSERT_GE(client_fd, 0, "accept client"))
return NULL;
- }
err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1);
err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1);
@@ -100,125 +70,93 @@ static void *server_thread(void *arg)
return (void *)(long)err;
}
-static int start_server(void)
+static int custom_cb(int fd, void *opts)
{
- struct sockaddr_in addr = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
- };
char buf;
int err;
- int fd;
int i;
- fd = socket(AF_INET, SOCK_STREAM, 0);
- if (fd < 0) {
- log_err("Failed to create server socket");
- return -1;
- }
-
for (i = CUSTOM_INHERIT1; i <= CUSTOM_LISTENER; i++) {
buf = 0x01;
err = setsockopt(fd, SOL_CUSTOM, i, &buf, 1);
if (err) {
log_err("Failed to call setsockopt(%d)", i);
- close(fd);
return -1;
}
}
- if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) {
- log_err("Failed to bind socket");
- close(fd);
- return -1;
- }
-
- return fd;
-}
-
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", title);
- return -1;
- }
-
- prog = bpf_object__find_program_by_title(obj, title);
- if (!prog) {
- log_err("Failed to find %s BPF program", title);
- return -1;
- }
-
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, 0);
- if (err) {
- log_err("Failed to attach %s BPF program", title);
- return -1;
- }
-
return 0;
}
static void run_test(int cgroup_fd)
{
- struct bpf_prog_load_attr attr = {
- .file = "./sockopt_inherit.o",
+ struct bpf_link *link_getsockopt = NULL;
+ struct bpf_link *link_setsockopt = NULL;
+ struct network_helper_opts opts = {
+ .post_socket_cb = custom_cb,
};
int server_fd = -1, client_fd;
- struct bpf_object *obj;
+ struct sockaddr_in addr = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
+ };
+ struct sockopt_inherit *obj;
void *server_err;
pthread_t tid;
- int ignored;
int err;
- err = bpf_prog_load_xattr(&attr, &obj, &ignored);
- if (CHECK_FAIL(err))
+ obj = sockopt_inherit__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
return;
- err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt");
- if (CHECK_FAIL(err))
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt"))
goto close_bpf_object;
- err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt");
- if (CHECK_FAIL(err))
+ link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt"))
goto close_bpf_object;
- server_fd = start_server();
- if (CHECK_FAIL(server_fd < 0))
+ server_fd = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr,
+ sizeof(addr), &opts);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
goto close_bpf_object;
pthread_mutex_lock(&server_started_mtx);
- if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
- (void *)&server_fd)))
+ if (!ASSERT_OK(pthread_create(&tid, NULL, server_thread,
+ (void *)&server_fd), "pthread_create")) {
+ pthread_mutex_unlock(&server_started_mtx);
goto close_server_fd;
+ }
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);
- client_fd = connect_to_server(server_fd);
- if (CHECK_FAIL(client_fd < 0))
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_server"))
goto close_server_fd;
- CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0));
- CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0));
- CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0));
+ ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0), "verify_sockopt1");
+ ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0), "verify_sockopt2");
+ ASSERT_OK(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0), "verify_sockopt ener");
pthread_join(tid, &server_err);
err = (int)(long)server_err;
- CHECK_FAIL(err);
+ ASSERT_OK(err, "pthread_join retval");
close(client_fd);
close_server_fd:
close(server_fd);
close_bpf_object:
- bpf_object__close(obj);
+ bpf_link__destroy(link_getsockopt);
+ bpf_link__destroy(link_setsockopt);
+
+ sockopt_inherit__destroy(obj);
}
void test_sockopt_inherit(void)
@@ -226,7 +164,7 @@ void test_sockopt_inherit(void)
int cgroup_fd;
cgroup_fd = test__join_cgroup("/sockopt_inherit");
- if (CHECK_FAIL(cgroup_fd < 0))
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
return;
run_test(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
index 51fac975b316..759bbb6f8c5f 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
@@ -2,61 +2,13 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", title);
- return -1;
- }
-
- prog = bpf_object__find_program_by_title(obj, title);
- if (!prog) {
- log_err("Failed to find %s BPF program", title);
- return -1;
- }
-
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, BPF_F_ALLOW_MULTI);
- if (err) {
- log_err("Failed to attach %s BPF program", title);
- return -1;
- }
-
- return 0;
-}
+#include "sockopt_multi.skel.h"
-static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
-
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err)
- return -1;
-
- prog = bpf_object__find_program_by_title(obj, title);
- if (!prog)
- return -1;
-
- err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd,
- attach_type);
- if (err)
- return -1;
-
- return 0;
-}
-
-static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
+static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
+ struct bpf_link *link_parent = NULL;
+ struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
@@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - child: 0x80 -> 0x90
*/
- err = prog_attach(obj, cg_child, "cgroup/getsockopt/child");
- if (err)
+ link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child,
+ cg_child);
+ if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child"))
goto detach;
buf = 0x00;
@@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: 0x90 -> 0xA0
*/
- err = prog_attach(obj, cg_parent, "cgroup/getsockopt/parent");
- if (err)
+ link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent,
+ cg_parent);
+ if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent"))
goto detach;
buf = 0x00;
@@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
* - parent: unexpected 0x40, EPERM
*/
- err = prog_detach(obj, cg_child, "cgroup/getsockopt/child");
- if (err) {
- log_err("Failed to detach child program");
- goto detach;
- }
+ bpf_link__destroy(link_child);
+ link_child = NULL;
buf = 0x00;
optlen = 1;
@@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/getsockopt/child");
- prog_detach(obj, cg_parent, "cgroup/getsockopt/parent");
+ bpf_link__destroy(link_child);
+ bpf_link__destroy(link_parent);
return err;
}
-static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
+static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent,
int cg_child, int sock_fd)
{
+ struct bpf_link *link_parent = NULL;
+ struct bpf_link *link_child = NULL;
socklen_t optlen;
__u8 buf;
int err;
@@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach child program and make sure it adds 0x10. */
- err = prog_attach(obj, cg_child, "cgroup/setsockopt");
- if (err)
+ link_child = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cg_child);
+ if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child"))
goto detach;
buf = 0x80;
@@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
/* Attach parent program and make sure it adds another 0x10. */
- err = prog_attach(obj, cg_parent, "cgroup/setsockopt");
- if (err)
+ link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt,
+ cg_parent);
+ if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent"))
goto detach;
buf = 0x80;
@@ -289,45 +244,42 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent,
}
detach:
- prog_detach(obj, cg_child, "cgroup/setsockopt");
- prog_detach(obj, cg_parent, "cgroup/setsockopt");
+ bpf_link__destroy(link_child);
+ bpf_link__destroy(link_parent);
return err;
}
void test_sockopt_multi(void)
{
- struct bpf_prog_load_attr attr = {
- .file = "./sockopt_multi.o",
- };
int cg_parent = -1, cg_child = -1;
- struct bpf_object *obj = NULL;
+ struct sockopt_multi *obj = NULL;
int sock_fd = -1;
- int err = -1;
- int ignored;
cg_parent = test__join_cgroup("/parent");
- if (CHECK_FAIL(cg_parent < 0))
+ if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
goto out;
cg_child = test__join_cgroup("/parent/child");
- if (CHECK_FAIL(cg_child < 0))
+ if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child"))
goto out;
- err = bpf_prog_load_xattr(&attr, &obj, &ignored);
- if (CHECK_FAIL(err))
+ obj = sockopt_multi__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "skel-load"))
goto out;
+ obj->bss->page_size = sysconf(_SC_PAGESIZE);
+
sock_fd = socket(AF_INET, SOCK_STREAM, 0);
- if (CHECK_FAIL(sock_fd < 0))
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
- CHECK_FAIL(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd));
- CHECK_FAIL(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd));
+ ASSERT_OK(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd), "getsockopt_test");
+ ASSERT_OK(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd), "setsockopt_test");
out:
close(sock_fd);
- bpf_object__close(obj);
+ sockopt_multi__destroy(obj);
close(cg_child);
close(cg_parent);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c
new file mode 100644
index 000000000000..6b2d300e9fd4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <netinet/tcp.h>
+#include "sockopt_qos_to_cc.skel.h"
+
+static void run_setsockopt_test(int cg_fd, int sock_fd)
+{
+ socklen_t optlen;
+ char cc[16]; /* TCP_CA_NAME_MAX */
+ int buf;
+ int err = -1;
+
+ buf = 0x2D;
+ err = setsockopt(sock_fd, SOL_IPV6, IPV6_TCLASS, &buf, sizeof(buf));
+ if (!ASSERT_OK(err, "setsockopt(sock_fd, IPV6_TCLASS)"))
+ return;
+
+ /* Verify the setsockopt cc change */
+ optlen = sizeof(cc);
+ err = getsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, cc, &optlen);
+ if (!ASSERT_OK(err, "getsockopt(sock_fd, TCP_CONGESTION)"))
+ return;
+
+ if (!ASSERT_STREQ(cc, "reno", "getsockopt(sock_fd, TCP_CONGESTION)"))
+ return;
+}
+
+void test_sockopt_qos_to_cc(void)
+{
+ struct sockopt_qos_to_cc *skel;
+ char cc_cubic[16] = "cubic"; /* TCP_CA_NAME_MAX */
+ int cg_fd = -1;
+ int sock_fd = -1;
+ int err;
+
+ cg_fd = test__join_cgroup("/sockopt_qos_to_cc");
+ if (!ASSERT_GE(cg_fd, 0, "cg-join(sockopt_qos_to_cc)"))
+ return;
+
+ skel = sockopt_qos_to_cc__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto done;
+
+ skel->bss->page_size = sysconf(_SC_PAGESIZE);
+
+ sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sock_fd, 0, "v6 socket open"))
+ goto done;
+
+ err = setsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, &cc_cubic,
+ sizeof(cc_cubic));
+ if (!ASSERT_OK(err, "setsockopt(sock_fd, TCP_CONGESTION)"))
+ goto done;
+
+ skel->links.sockopt_qos_to_cc =
+ bpf_program__attach_cgroup(skel->progs.sockopt_qos_to_cc,
+ cg_fd);
+ if (!ASSERT_OK_PTR(skel->links.sockopt_qos_to_cc,
+ "prog_attach(sockopt_qos_to_cc)"))
+ goto done;
+
+ run_setsockopt_test(cg_fd, sock_fd);
+
+done:
+ if (sock_fd != -1)
+ close(sock_fd);
+ if (cg_fd != -1)
+ close(cg_fd);
+ /* destroy can take null and error pointer */
+ sockopt_qos_to_cc__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index b25c9c45c148..ba6b3ec1156a 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -2,6 +2,14 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
+#include <netinet/tcp.h>
+#include <linux/netlink.h>
+#include "sockopt_sk.skel.h"
+
+#ifndef SOL_TCP
+#define SOL_TCP IPPROTO_TCP
+#endif
+
#define SOL_CUSTOM 0xdeadbeef
static int getsetsockopt(void)
@@ -11,6 +19,7 @@ static int getsetsockopt(void)
char u8[4];
__u32 u32;
char cc[16]; /* TCP_CA_NAME_MAX */
+ struct tcp_zerocopy_receive zc;
} buf = {};
socklen_t optlen;
char *big_buf = NULL;
@@ -154,69 +163,87 @@ static int getsetsockopt(void)
goto err;
}
- free(big_buf);
- close(fd);
- return 0;
-err:
- free(big_buf);
- close(fd);
- return -1;
-}
+ /* TCP_ZEROCOPY_RECEIVE triggers */
+ memset(&buf, 0, sizeof(buf));
+ optlen = sizeof(buf.zc);
+ err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
+ if (err) {
+ log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
+ err, errno);
+ goto err;
+ }
-static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
-{
- enum bpf_attach_type attach_type;
- enum bpf_prog_type prog_type;
- struct bpf_program *prog;
- int err;
+ memset(&buf, 0, sizeof(buf));
+ buf.zc.address = 12345; /* Not page aligned. Rejected by tcp_zerocopy_receive() */
+ optlen = sizeof(buf.zc);
+ errno = 0;
+ err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
+ if (errno != EINVAL) {
+ log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
+ err, errno);
+ goto err;
+ }
- err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
- if (err) {
- log_err("Failed to deduct types for %s BPF program", title);
+ /* optval=NULL case is handled correctly */
+
+ close(fd);
+ fd = socket(AF_NETLINK, SOCK_RAW, 0);
+ if (fd < 0) {
+ log_err("Failed to create AF_NETLINK socket");
return -1;
}
- prog = bpf_object__find_program_by_title(obj, title);
- if (!prog) {
- log_err("Failed to find %s BPF program", title);
- return -1;
+ buf.u32 = 1;
+ optlen = sizeof(__u32);
+ err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen);
+ if (err) {
+ log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d",
+ err, errno);
+ goto err;
}
- err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
- attach_type, 0);
+ optlen = 0;
+ err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen);
if (err) {
- log_err("Failed to attach %s BPF program", title);
- return -1;
+ log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d",
+ err, errno);
+ goto err;
}
+ ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
+ free(big_buf);
+ close(fd);
return 0;
+err:
+ free(big_buf);
+ close(fd);
+ return -1;
}
static void run_test(int cgroup_fd)
{
- struct bpf_prog_load_attr attr = {
- .file = "./sockopt_sk.o",
- };
- struct bpf_object *obj;
- int ignored;
- int err;
-
- err = bpf_prog_load_xattr(&attr, &obj, &ignored);
- if (CHECK_FAIL(err))
- return;
+ struct sockopt_sk *skel;
+
+ skel = sockopt_sk__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ skel->bss->page_size = getpagesize();
- err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt");
- if (CHECK_FAIL(err))
- goto close_bpf_object;
+ skel->links._setsockopt =
+ bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link"))
+ goto cleanup;
- err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt");
- if (CHECK_FAIL(err))
- goto close_bpf_object;
+ skel->links._getsockopt =
+ bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
+ goto cleanup;
- CHECK_FAIL(getsetsockopt());
+ ASSERT_OK(getsetsockopt(), "getsetsockopt");
-close_bpf_object:
- bpf_object__close(obj);
+cleanup:
+ sockopt_sk__destroy(skel);
}
void test_sockopt_sk(void)
@@ -224,7 +251,7 @@ void test_sockopt_sk(void)
int cgroup_fd;
cgroup_fd = test__join_cgroup("/sockopt_sk");
- if (CHECK_FAIL(cgroup_fd < 0))
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /sockopt_sk"))
return;
run_test(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
new file mode 100644
index 000000000000..254fbfeab06a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <regex.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_spin_lock.skel.h"
+#include "test_spin_lock_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+static struct {
+ const char *prog_name;
+ const char *err_msg;
+} spin_lock_fail_tests[] = {
+ { "lock_id_kptr_preserve",
+ "5: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2) "
+ "R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=ptr_ expected=percpu_ptr_" },
+ { "lock_id_global_zero",
+ "; R1=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_mapval_preserve",
+ "[0-9]\\+: (bf) r1 = r0 ;"
+ " R0=map_value(id=1,map=array_map,ks=4,vs=8)"
+ " R1=map_value(id=1,map=array_map,ks=4,vs=8)\n"
+ "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_innermapval_preserve",
+ "[0-9]\\+: (bf) r1 = r0 ;"
+ " R0=map_value(id=2,ks=4,vs=8)"
+ " R1=map_value(id=2,ks=4,vs=8)\n"
+ "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
+ "R1 type=map_value expected=percpu_ptr_" },
+ { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
+ { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" },
+ { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" },
+ { "lock_global_sleepable_helper_subprog", "global function calls are not allowed while holding a lock" },
+ { "lock_global_sleepable_kfunc_subprog", "global function calls are not allowed while holding a lock" },
+ { "lock_global_sleepable_subprog_indirect", "global function calls are not allowed while holding a lock" },
+};
+
+static int match_regex(const char *pattern, const char *string)
+{
+ int err, rc;
+ regex_t re;
+
+ err = regcomp(&re, pattern, REG_NOSUB);
+ if (err) {
+ char errbuf[512];
+
+ regerror(err, &re, errbuf, sizeof(errbuf));
+ PRINT_FAIL("Can't compile regex: %s\n", errbuf);
+ return -1;
+ }
+ rc = regexec(&re, string, 0, NULL, 0);
+ regfree(&re);
+ return rc == 0 ? 1 : 0;
+}
+
+static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct test_spin_lock_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = test_spin_lock_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = test_spin_lock_fail__load(skel);
+ if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail"))
+ goto end;
+
+ /* Skip check if JIT does not support kfuncs */
+ if (strstr(log_buf, "JIT does not support calling kernel function")) {
+ test__skip();
+ goto end;
+ }
+
+ ret = match_regex(err_msg, log_buf);
+ if (!ASSERT_GE(ret, 0, "match_regex"))
+ goto end;
+
+ if (!ASSERT_TRUE(ret, "no match for expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ test_spin_lock_fail__destroy(skel);
+}
+
+static void *spin_lock_thread(void *arg)
+{
+ int err, prog_fd = *(u32 *) arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 10000,
+ );
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_OK(topts.retval, "test_run retval");
+ pthread_exit(arg);
+}
+
+void test_spin_lock_success(void)
+{
+ struct test_spin_lock *skel;
+ pthread_t thread_id[4];
+ int prog_fd, i;
+ void *ret;
+
+ skel = test_spin_lock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test);
+ for (i = 0; i < 4; i++) {
+ int err;
+
+ err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto end;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
+ goto end;
+ if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
+ goto end;
+ }
+end:
+ test_spin_lock__destroy(skel);
+}
+
+void test_spin_lock(void)
+{
+ int i;
+
+ test_spin_lock_success();
+
+ for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) {
+ if (!test__start_subtest(spin_lock_fail_tests[i].prog_name))
+ continue;
+ test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name,
+ spin_lock_fail_tests[i].err_msg);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
deleted file mode 100644
index 7577a77a4c4c..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/spinlock.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <test_progs.h>
-#include <network_helpers.h>
-
-static void *spin_lock_thread(void *arg)
-{
- __u32 duration, retval;
- int err, prog_fd = *(u32 *) arg;
-
- err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
- pthread_exit(arg);
-}
-
-void test_spinlock(void)
-{
- const char *file = "./test_spin_lock.o";
- pthread_t thread_id[4];
- struct bpf_object *obj = NULL;
- int prog_fd;
- int err = 0, i;
- void *ret;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
- if (CHECK_FAIL(err)) {
- printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
- goto close_prog;
- }
- for (i = 0; i < 4; i++)
- if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
- &spin_lock_thread, &prog_fd)))
- goto close_prog;
-
- for (i = 0; i < 4; i++)
- if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
- ret != (void *)&prog_fd))
- goto close_prog;
-close_prog:
- bpf_object__close(obj);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/stack_var_off.c b/tools/testing/selftests/bpf/prog_tests/stack_var_off.c
new file mode 100644
index 000000000000..2ce9deefa59c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stack_var_off.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "test_stack_var_off.skel.h"
+
+/* Test read and writes to the stack performed with offsets that are not
+ * statically known.
+ */
+void test_stack_var_off(void)
+{
+ int duration = 0;
+ struct test_stack_var_off *skel;
+
+ skel = test_stack_var_off__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ /* Give pid to bpf prog so it doesn't trigger for anyone else. */
+ skel->bss->test_pid = getpid();
+ /* Initialize the probe's input. */
+ skel->bss->input[0] = 2;
+ skel->bss->input[1] = 42; /* This will be returned in probe_res. */
+
+ if (!ASSERT_OK(test_stack_var_off__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ /* Trigger probe. */
+ usleep(1);
+
+ if (CHECK(skel->bss->probe_res != 42, "check_probe_res",
+ "wrong probe res: %d\n", skel->bss->probe_res))
+ goto cleanup;
+
+cleanup:
+ test_stack_var_off__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
index e8399ae50e77..271b5cc9fc01 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -7,13 +7,12 @@ void test_stacktrace_build_id(void)
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
struct test_stacktrace_build_id *skel;
- int err, stack_trace_len;
- __u32 key, previous_key, val, duration = 0;
- char buf[256];
- int i, j;
+ int err, stack_trace_len, build_id_size;
+ __u32 key, prev_key, val, duration = 0;
+ char buf[BPF_BUILD_ID_SIZE];
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
- int retry = 1;
+ int i, retry = 1;
retry:
skel = test_stacktrace_build_id__open_and_load();
@@ -40,7 +39,7 @@ retry:
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
@@ -52,20 +51,19 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
- err = extract_build_id(buf, 256);
+ build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
+ err = build_id_size < 0 ? build_id_size : 0;
- if (CHECK(err, "get build_id with readelf",
+ if (CHECK(err, "read_build_id",
"err %d errno %d\n", err, errno))
goto cleanup;
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
do {
- char build_id[64];
-
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
@@ -73,14 +71,11 @@ retry:
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
+ if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
build_id_matches = 1;
}
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ prev_key = key;
+ } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index 11a769e18f5d..b277dddd5af7 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -2,21 +2,6 @@
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
-static __u64 read_perf_max_sample_freq(void)
-{
- __u64 sample_freq = 5000; /* fallback to 5000 on error */
- FILE *f;
- __u32 duration = 0;
-
- f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
- if (f == NULL)
- return sample_freq;
- CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
- "return default value: 5000,err %d\n", -errno);
- fclose(f);
- return sample_freq;
-}
-
void test_stacktrace_build_id_nmi(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd;
@@ -27,12 +12,11 @@ void test_stacktrace_build_id_nmi(void)
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
- __u32 key, previous_key, val, duration = 0;
- char buf[256];
- int i, j;
+ __u32 key, prev_key, val, duration = 0;
+ char buf[BPF_BUILD_ID_SIZE];
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
- int retry = 1;
+ int build_id_matches = 0, build_id_size;
+ int i, retry = 1;
attr.sample_freq = read_perf_max_sample_freq();
@@ -42,7 +26,7 @@ retry:
return;
/* override program type */
- bpf_program__set_perf_event(skel->progs.oncpu);
+ bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
@@ -51,7 +35,7 @@ retry:
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
- if (pmu_fd < 0 && errno == ENOENT) {
+ if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
test__skip();
goto cleanup;
@@ -62,8 +46,7 @@ retry:
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
- if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
- "err %ld\n", PTR_ERR(skel->links.oncpu))) {
+ if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
close(pmu_fd);
goto cleanup;
}
@@ -83,7 +66,7 @@ retry:
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
@@ -95,35 +78,32 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
- err = extract_build_id(buf, 256);
+ build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
+ err = build_id_size < 0 ? build_id_size : 0;
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
goto cleanup;
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
+ id_offs, sizeof(id_offs), 0);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
+ if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
build_id_matches = 1;
}
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ prev_key = key;
+ } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
new file mode 100644
index 000000000000..c9efdd2a5b18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_ips.skel.h"
+
+#ifdef __x86_64__
+static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
+{
+ __u64 ips[PERF_MAX_STACK_DEPTH];
+ struct ksyms *ksyms = NULL;
+ int i, err = 0;
+ va_list args;
+
+ /* sorted by addr */
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
+ return -1;
+
+ /* unlikely, but... */
+ if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
+ return -1;
+
+ err = bpf_map_lookup_elem(fd, &key, ips);
+ if (err)
+ goto out;
+
+ /*
+ * Compare all symbols provided via arguments with stacktrace ips,
+ * and their related symbol addresses.t
+ */
+ va_start(args, cnt);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned long val;
+ struct ksym *ksym;
+
+ val = va_arg(args, unsigned long);
+ ksym = ksym_search_local(ksyms, ips[i]);
+ if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
+ break;
+ ASSERT_EQ(ksym->addr, val, "stack_cmp");
+ }
+
+ va_end(args);
+
+out:
+ free_kallsyms_local(ksyms);
+ return err;
+}
+
+static void test_stacktrace_ips_kprobe_multi(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
+ .retprobe = retprobe
+ );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct stacktrace_ips *skel;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.kprobe_multi_test,
+ "bpf_testmod_stacktrace_test", &opts);
+ if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void test_stacktrace_ips_raw_tp(void)
+{
+ __u32 info_len = sizeof(struct bpf_prog_info);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_prog_info info = {};
+ struct stacktrace_ips *skel;
+ __u64 bpf_prog_ksym = 0;
+ int err;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.rawtp_test = bpf_program__attach_raw_tracepoint(
+ skel->progs.rawtp_test,
+ "bpf_testmod_test_read");
+ if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint"))
+ goto cleanup;
+
+ /* get bpf program address */
+ info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym);
+ info.nr_jited_ksyms = 1;
+ err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test),
+ &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2,
+ bpf_prog_ksym,
+ ksym_get_addr("bpf_trace_run2"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void __test_stacktrace_ips(void)
+{
+ if (test__start_subtest("kprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(false);
+ if (test__start_subtest("kretprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(true);
+ if (test__start_subtest("raw_tp"))
+ test_stacktrace_ips_raw_tp();
+}
+#else
+static void __test_stacktrace_ips(void)
+{
+ test__skip();
+}
+#endif
+
+void test_stacktrace_ips(void)
+{
+ __test_stacktrace_ips();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index 37269d23df93..c23b97414813 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -1,46 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "stacktrace_map.skel.h"
void test_stacktrace_map(void)
{
+ struct stacktrace_map *skel;
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *prog_name = "tracepoint/sched/sched_switch";
- int err, prog_fd, stack_trace_len;
- const char *file = "./test_stacktrace_map.o";
- __u32 key, val, duration = 0;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_link *link;
+ int err, stack_trace_len;
+ __u32 key, val, stack_id, duration = 0;
+ __u64 stack[PERF_MAX_STACK_DEPTH];
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ skel = stacktrace_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
- prog = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
- goto close_prog;
-
- link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch");
- if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
- goto close_prog;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK_FAIL(control_map_fd < 0))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK_FAIL(stackid_hmap_fd < 0))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK_FAIL(stackmap_fd < 0))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK_FAIL(stack_amap_fd < 0))
- goto disable_pmu;
+ control_map_fd = bpf_map__fd(skel->maps.control_map);
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+ err = stacktrace_map__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
/* give some time for bpf program run */
sleep(1);
@@ -50,26 +31,32 @@ void test_stacktrace_map(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto out;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto out;
stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
-
-disable_pmu:
- bpf_link__destroy(link);
-close_prog:
- bpf_object__close(obj);
+ goto out;
+
+ stack_id = skel->bss->stack_id;
+ err = bpf_map_lookup_and_delete_elem(stackmap_fd, &stack_id, stack);
+ if (!ASSERT_OK(err, "lookup and delete target stack_id"))
+ goto out;
+
+ err = bpf_map_lookup_elem(stackmap_fd, &stack_id, stack);
+ if (!ASSERT_EQ(err, -ENOENT, "lookup deleted stack_id"))
+ goto out;
+out:
+ stacktrace_map__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
index 404a5498e1a3..e985d51d3d47 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -3,25 +3,25 @@
void test_stacktrace_map_raw_tp(void)
{
- const char *prog_name = "tracepoint/sched/sched_switch";
+ const char *prog_name = "oncpu";
int control_map_fd, stackid_hmap_fd, stackmap_fd;
- const char *file = "./test_stacktrace_map.o";
+ const char *file = "./stacktrace_map.bpf.o";
__u32 key, val, duration = 0;
int err, prog_fd;
struct bpf_program *prog;
struct bpf_object *obj;
struct bpf_link *link = NULL;
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
goto close_prog;
link = bpf_program__attach_raw_tracepoint(prog, "sched_switch");
- if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
/* find map fds */
@@ -46,7 +46,7 @@ void test_stacktrace_map_raw_tp(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
@@ -59,7 +59,6 @@ void test_stacktrace_map_raw_tp(void)
goto close_prog;
close_prog:
- if (!IS_ERR_OR_NULL(link))
- bpf_link__destroy(link);
+ bpf_link__destroy(link);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
new file mode 100644
index 000000000000..dc2ccf6a14d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_map_skip.skel.h"
+
+#define TEST_STACK_DEPTH 2
+
+void test_stacktrace_map_skip(void)
+{
+ struct stacktrace_map_skip *skel;
+ int stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ int err, stack_trace_len;
+
+ skel = stacktrace_map_skip__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ /* find map fds */
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd"))
+ goto out;
+
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd"))
+ goto out;
+
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+ if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd"))
+ goto out;
+
+ skel->bss->pid = getpid();
+
+ err = stacktrace_map_skip__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ skel->bss->control = 1;
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vice versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
+ goto out;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap"))
+ goto out;
+
+ stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap"))
+ goto out;
+
+ if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed"))
+ goto out;
+
+out:
+ stacktrace_map_skip__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c
new file mode 100644
index 000000000000..5c4e3014e063
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/static_linked.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <test_progs.h>
+#include "test_static_linked.skel.h"
+
+void test_static_linked(void)
+{
+ int err;
+ struct test_static_linked* skel;
+
+ skel = test_static_linked__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->rovar1 = 1;
+ skel->rodata->rovar2 = 4;
+
+ err = test_static_linked__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_static_linked__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ usleep(1);
+
+ ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1");
+ ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2");
+
+cleanup:
+ test_static_linked__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stream.c b/tools/testing/selftests/bpf/prog_tests/stream.c
new file mode 100644
index 000000000000..c3cce5c292bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stream.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+
+#include "stream.skel.h"
+#include "stream_fail.skel.h"
+
+void test_stream_failure(void)
+{
+ RUN_TESTS(stream_fail);
+}
+
+void test_stream_success(void)
+{
+ RUN_TESTS(stream);
+ return;
+}
+
+void test_stream_syscall(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
+ struct stream *skel;
+ int ret, prog_fd;
+ char buf[64];
+
+ skel = stream__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stream__open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.stream_syscall);
+ ret = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+
+ ASSERT_LT(bpf_prog_stream_read(0, BPF_STREAM_STDOUT, buf, sizeof(buf), &ropts), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -EINVAL, "bad prog_fd");
+
+ ASSERT_LT(bpf_prog_stream_read(prog_fd, 0, buf, sizeof(buf), &ropts), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -ENOENT, "bad stream id");
+
+ ASSERT_LT(bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, NULL, sizeof(buf), NULL), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -EFAULT, "bad stream buf");
+
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL);
+ ASSERT_EQ(ret, 2, "bytes");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL);
+ ASSERT_EQ(ret, 1, "bytes");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 1, &ropts);
+ ASSERT_EQ(ret, 0, "no bytes stdout");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, 1, &ropts);
+ ASSERT_EQ(ret, 0, "no bytes stderr");
+
+ stream__destroy(skel);
+}
+
+static void test_address(struct bpf_program *prog, unsigned long *fault_addr_p)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
+ int ret, prog_fd;
+ char fault_addr[64];
+ char buf[1024];
+
+ prog_fd = bpf_program__fd(prog);
+
+ ret = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+
+ sprintf(fault_addr, "0x%lx", *fault_addr_p);
+
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, sizeof(buf), &ropts);
+ ASSERT_GT(ret, 0, "stream read");
+ ASSERT_LE(ret, 1023, "len for buf");
+ buf[ret] = '\0';
+
+ if (!ASSERT_HAS_SUBSTR(buf, fault_addr, "fault_addr")) {
+ fprintf(stderr, "Output from stream:\n%s\n", buf);
+ fprintf(stderr, "Fault Addr: %s\n", fault_addr);
+ }
+}
+
+void test_stream_arena_fault_address(void)
+{
+ struct stream *skel;
+
+#if !defined(__x86_64__) && !defined(__aarch64__)
+ printf("%s:SKIP: arena fault reporting not supported\n", __func__);
+ test__skip();
+ return;
+#endif
+
+ skel = stream__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stream__open_and_load"))
+ return;
+
+ if (test__start_subtest("read_fault"))
+ test_address(skel->progs.stream_arena_read_fault, &skel->bss->fault_addr);
+ if (test__start_subtest("write_fault"))
+ test_address(skel->progs.stream_arena_write_fault, &skel->bss->fault_addr);
+
+ stream__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c
new file mode 100644
index 000000000000..0f3bf594e7a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include <test_progs.h>
+#include "string_kfuncs_success.skel.h"
+#include "string_kfuncs_failure1.skel.h"
+#include "string_kfuncs_failure2.skel.h"
+#include <sys/mman.h>
+
+static const char * const test_cases[] = {
+ "strcmp",
+ "strcasecmp",
+ "strchr",
+ "strchrnul",
+ "strnchr",
+ "strrchr",
+ "strlen",
+ "strnlen",
+ "strspn_str",
+ "strspn_accept",
+ "strcspn_str",
+ "strcspn_reject",
+ "strstr",
+ "strcasestr",
+ "strnstr",
+ "strncasestr",
+};
+
+void run_too_long_tests(void)
+{
+ struct string_kfuncs_failure2 *skel;
+ struct bpf_program *prog;
+ char test_name[256];
+ int err, i;
+
+ skel = string_kfuncs_failure2__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "string_kfuncs_failure2__open_and_load"))
+ return;
+
+ memset(skel->bss->long_str, 'a', sizeof(skel->bss->long_str));
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ sprintf(test_name, "test_%s_too_long", test_cases[i]);
+ if (!test__start_subtest(test_name))
+ continue;
+
+ prog = bpf_object__find_program_by_name(skel->obj, test_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run"))
+ goto cleanup;
+
+ ASSERT_EQ(topts.retval, -E2BIG, "reading too long string fails with -E2BIG");
+ }
+
+cleanup:
+ string_kfuncs_failure2__destroy(skel);
+}
+
+void test_string_kfuncs(void)
+{
+ RUN_TESTS(string_kfuncs_success);
+ RUN_TESTS(string_kfuncs_failure1);
+
+ run_too_long_tests();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
new file mode 100644
index 000000000000..a5cc593c1e1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_autocreate.skel.h"
+#include "struct_ops_autocreate2.skel.h"
+
+static void cant_load_full_object(void)
+{
+ struct struct_ops_autocreate *skel;
+ char *log = NULL;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
+ return;
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+ /* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't
+ * match the BTF of the actual struct bpf_testmod_ops defined in the
+ * kernel, so we should fail to load it if we don't disable autocreate
+ * for that map.
+ */
+ err = struct_ops_autocreate__load(skel);
+ log = stop_libbpf_log_capture();
+ if (!ASSERT_ERR(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message");
+ ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP");
+
+cleanup:
+ free(log);
+ struct_ops_autocreate__destroy(skel);
+}
+
+static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map)
+{
+ struct bpf_link *link;
+ int err;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
+ return -1;
+
+ /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
+ err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
+ bpf_link__destroy(link);
+ return err;
+}
+
+static void can_load_partial_object(void)
+{
+ struct struct_ops_autocreate *skel;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
+ return;
+
+ err = bpf_map__set_autocreate(skel->maps.testmod_2, false);
+ if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload");
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload");
+
+ err = struct_ops_autocreate__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload");
+
+ check_test_1_link(skel, skel->maps.testmod_1);
+
+cleanup:
+ struct_ops_autocreate__destroy(skel);
+}
+
+static void optional_maps(void)
+{
+ struct struct_ops_autocreate *skel;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
+ return;
+
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate");
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate");
+
+ err = bpf_map__set_autocreate(skel->maps.testmod_1, false);
+ err |= bpf_map__set_autocreate(skel->maps.testmod_2, false);
+ err |= bpf_map__set_autocreate(skel->maps.optional_map2, true);
+ if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
+ goto cleanup;
+
+ err = struct_ops_autocreate__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ check_test_1_link(skel, skel->maps.optional_map2);
+
+cleanup:
+ struct_ops_autocreate__destroy(skel);
+}
+
+/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars.
+ * test_mod1 load should enable autoload for 'foo'.
+ */
+static void autoload_and_shadow_vars(void)
+{
+ struct struct_ops_autocreate2 *skel = NULL;
+ struct bpf_link *link = NULL;
+ int err;
+
+ skel = struct_ops_autocreate2__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
+ return;
+
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload");
+
+ /* loading map testmod_1 would switch foo's autoload to true */
+ skel->struct_ops.testmod_1->test_1 = skel->progs.foo;
+
+ err = struct_ops_autocreate2__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload");
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
+ goto cleanup;
+
+ /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
+ err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ struct_ops_autocreate2__destroy(skel);
+}
+
+void test_struct_ops_autocreate(void)
+{
+ if (test__start_subtest("cant_load_full_object"))
+ cant_load_full_object();
+ if (test__start_subtest("can_load_partial_object"))
+ can_load_partial_object();
+ if (test__start_subtest("autoload_and_shadow_vars"))
+ autoload_and_shadow_vars();
+ if (test__start_subtest("optional_maps"))
+ optional_maps();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c
new file mode 100644
index 000000000000..4006879ca3fe
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_private_stack.skel.h"
+#include "struct_ops_private_stack_fail.skel.h"
+#include "struct_ops_private_stack_recur.skel.h"
+
+static void test_private_stack(void)
+{
+ struct struct_ops_private_stack *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = struct_ops_private_stack__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack__open"))
+ return;
+
+ if (skel->data->skip) {
+ test__skip();
+ goto cleanup;
+ }
+
+ err = struct_ops_private_stack__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_private_stack__load"))
+ goto cleanup;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->val_i, 3, "val_i");
+ ASSERT_EQ(skel->bss->val_j, 8, "val_j");
+
+ bpf_link__destroy(link);
+
+cleanup:
+ struct_ops_private_stack__destroy(skel);
+}
+
+static void test_private_stack_fail(void)
+{
+ struct struct_ops_private_stack_fail *skel;
+ int err;
+
+ skel = struct_ops_private_stack_fail__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_fail__open"))
+ return;
+
+ if (skel->data->skip) {
+ test__skip();
+ goto cleanup;
+ }
+
+ err = struct_ops_private_stack_fail__load(skel);
+ if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load"))
+ goto cleanup;
+ return;
+
+cleanup:
+ struct_ops_private_stack_fail__destroy(skel);
+}
+
+static void test_private_stack_recur(void)
+{
+ struct struct_ops_private_stack_recur *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = struct_ops_private_stack_recur__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_recur__open"))
+ return;
+
+ if (skel->data->skip) {
+ test__skip();
+ goto cleanup;
+ }
+
+ err = struct_ops_private_stack_recur__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_private_stack_recur__load"))
+ goto cleanup;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->val_j, 3, "val_j");
+
+ bpf_link__destroy(link);
+
+cleanup:
+ struct_ops_private_stack_recur__destroy(skel);
+}
+
+void test_struct_ops_private_stack(void)
+{
+ if (test__start_subtest("private_stack"))
+ test_private_stack();
+ if (test__start_subtest("private_stack_fail"))
+ test_private_stack_fail();
+ if (test__start_subtest("private_stack_recur"))
+ test_private_stack_recur();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c
index 3f3d2ac4dd57..903f35a9e62e 100644
--- a/tools/testing/selftests/bpf/prog_tests/subprogs.c
+++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c
@@ -1,32 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
-#include <time.h>
#include "test_subprogs.skel.h"
#include "test_subprogs_unused.skel.h"
-static int duration;
+struct toggler_ctx {
+ int fd;
+ bool stop;
+};
-void test_subprogs(void)
+static void *toggle_jit_harden(void *arg)
+{
+ struct toggler_ctx *ctx = arg;
+ char two = '2';
+ char zero = '0';
+
+ while (!ctx->stop) {
+ lseek(ctx->fd, SEEK_SET, 0);
+ write(ctx->fd, &two, sizeof(two));
+ lseek(ctx->fd, SEEK_SET, 0);
+ write(ctx->fd, &zero, sizeof(zero));
+ }
+
+ return NULL;
+}
+
+static void test_subprogs_with_jit_harden_toggling(void)
+{
+ struct toggler_ctx ctx;
+ pthread_t toggler;
+ int err;
+ unsigned int i, loop = 10;
+
+ ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR);
+ if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden"))
+ return;
+
+ ctx.stop = false;
+ err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx);
+ if (!ASSERT_OK(err, "new toggler"))
+ goto out;
+
+ /* Make toggler thread to run */
+ usleep(1);
+
+ for (i = 0; i < loop; i++) {
+ struct test_subprogs *skel = test_subprogs__open_and_load();
+
+ if (!ASSERT_OK_PTR(skel, "skel open"))
+ break;
+ test_subprogs__destroy(skel);
+ }
+
+ ctx.stop = true;
+ pthread_join(toggler, NULL);
+out:
+ close(ctx.fd);
+}
+
+static void test_subprogs_alone(void)
{
struct test_subprogs *skel;
struct test_subprogs_unused *skel2;
int err;
skel = test_subprogs__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = test_subprogs__attach(skel);
- if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
+ if (!ASSERT_OK(err, "skel attach"))
goto cleanup;
usleep(1);
- CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
- CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
- CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
- CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
+ ASSERT_EQ(skel->bss->res1, 12, "res1");
+ ASSERT_EQ(skel->bss->res2, 17, "res2");
+ ASSERT_EQ(skel->bss->res3, 19, "res3");
+ ASSERT_EQ(skel->bss->res4, 36, "res4");
skel2 = test_subprogs_unused__open_and_load();
ASSERT_OK_PTR(skel2, "unused_progs_skel");
@@ -35,3 +86,11 @@ void test_subprogs(void)
cleanup:
test_subprogs__destroy(skel);
}
+
+void test_subprogs(void)
+{
+ if (test__start_subtest("subprogs_alone"))
+ test_subprogs_alone();
+ if (test__start_subtest("subprogs_and_jit_harden"))
+ test_subprogs_with_jit_harden_toggling();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs_extable.c b/tools/testing/selftests/bpf/prog_tests/subprogs_extable.c
new file mode 100644
index 000000000000..3afd9f775f68
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/subprogs_extable.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_subprogs_extable.skel.h"
+
+void test_subprogs_extable(void)
+{
+ const int read_sz = 456;
+ struct test_subprogs_extable *skel;
+ int err;
+
+ skel = test_subprogs_extable__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = test_subprogs_extable__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ ASSERT_OK(trigger_module_test_read(read_sz), "trigger_read");
+
+ ASSERT_NEQ(skel->bss->triggered, 0, "verify at least one program ran");
+
+ test_subprogs_extable__detach(skel);
+
+cleanup:
+ test_subprogs_extable__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
new file mode 100644
index 000000000000..fdf13ed0152a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "test_subskeleton.skel.h"
+#include "test_subskeleton_lib.subskel.h"
+
+static void subskeleton_lib_setup(struct bpf_object *obj)
+{
+ struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
+
+ if (!ASSERT_OK_PTR(lib, "open subskeleton"))
+ return;
+
+ *lib->rodata.var1 = 1;
+ *lib->data.var2 = 2;
+ lib->bss.var3->var3_1 = 3;
+ lib->bss.var3->var3_2 = 4;
+
+ test_subskeleton_lib__destroy(lib);
+}
+
+static int subskeleton_lib_subresult(struct bpf_object *obj)
+{
+ struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
+ int result;
+
+ if (!ASSERT_OK_PTR(lib, "open subskeleton"))
+ return -EINVAL;
+
+ result = *lib->bss.libout1;
+ ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
+
+ ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
+ ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
+ "lib_perf_handler", "program name");
+
+ ASSERT_OK_PTR(lib->maps.map1, "map1");
+ ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
+
+ ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
+ ASSERT_EQ(*lib->data.var6, 6, "extern var6");
+ ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
+
+ test_subskeleton_lib__destroy(lib);
+ return result;
+}
+
+/* initialize and load through skeleton, then instantiate subskeleton out of it */
+static void subtest_skel_subskeleton(void)
+{
+ int err, result;
+ struct test_subskeleton *skel;
+
+ skel = test_subskeleton__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->rodata->rovar1 = 10;
+ skel->rodata->var1 = 1;
+ subskeleton_lib_setup(skel->obj);
+
+ err = test_subskeleton__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_subskeleton__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ result = subskeleton_lib_subresult(skel->obj) * 10;
+ ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
+
+cleanup:
+ test_subskeleton__destroy(skel);
+}
+
+/* initialize and load through generic bpf_object API, then instantiate subskeleton out of it */
+static void subtest_obj_subskeleton(void)
+{
+ int err, result;
+ const void *elf_bytes;
+ size_t elf_bytes_sz = 0, rodata_sz = 0, bss_sz = 0;
+ struct bpf_object *obj;
+ const struct bpf_map *map;
+ const struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+ struct test_subskeleton__rodata *rodata;
+ struct test_subskeleton__bss *bss;
+
+ elf_bytes = test_subskeleton__elf_bytes(&elf_bytes_sz);
+ if (!ASSERT_OK_PTR(elf_bytes, "elf_bytes"))
+ return;
+
+ obj = bpf_object__open_mem(elf_bytes, elf_bytes_sz, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
+ return;
+
+ map = bpf_object__find_map_by_name(obj, ".rodata");
+ if (!ASSERT_OK_PTR(map, "rodata_map_by_name"))
+ goto cleanup;
+
+ rodata = bpf_map__initial_value(map, &rodata_sz);
+ if (!ASSERT_OK_PTR(rodata, "rodata_get"))
+ goto cleanup;
+
+ rodata->rovar1 = 10;
+ rodata->var1 = 1;
+ subskeleton_lib_setup(obj);
+
+ err = bpf_object__load(obj);
+ if (!ASSERT_OK(err, "obj_load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(obj, "handler1");
+ if (!ASSERT_OK_PTR(prog, "prog_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ map = bpf_object__find_map_by_name(obj, ".bss");
+ if (!ASSERT_OK_PTR(map, "bss_map_by_name"))
+ goto cleanup;
+
+ bss = bpf_map__initial_value(map, &bss_sz);
+ if (!ASSERT_OK_PTR(rodata, "rodata_get"))
+ goto cleanup;
+
+ result = subskeleton_lib_subresult(obj) * 10;
+ ASSERT_EQ(bss->out1, result, "out1");
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+}
+
+
+void test_subskeleton(void)
+{
+ if (test__start_subtest("skel_subskel"))
+ subtest_skel_subskeleton();
+ if (test__start_subtest("obj_subskel"))
+ subtest_obj_subskeleton();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/summarization.c b/tools/testing/selftests/bpf/prog_tests/summarization.c
new file mode 100644
index 000000000000..5dd6c120a838
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/summarization.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bpf/libbpf.h"
+#include "summarization_freplace.skel.h"
+#include "summarization.skel.h"
+#include <test_progs.h>
+
+static void print_verifier_log(const char *log)
+{
+ if (env.verbosity >= VERBOSE_VERY)
+ fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log);
+}
+
+static void test_aux(const char *main_prog_name,
+ const char *to_be_replaced,
+ const char *replacement,
+ bool expect_load,
+ const char *err_msg)
+{
+ struct summarization_freplace *freplace = NULL;
+ struct bpf_program *freplace_prog = NULL;
+ struct bpf_program *main_prog = NULL;
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct summarization *main = NULL;
+ char log[16*1024];
+ int err;
+
+ opts.kernel_log_buf = log;
+ opts.kernel_log_size = sizeof(log);
+ if (env.verbosity >= VERBOSE_SUPER)
+ opts.kernel_log_level = 1 | 2 | 4;
+ main = summarization__open_opts(&opts);
+ if (!ASSERT_OK_PTR(main, "summarization__open"))
+ goto out;
+ main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name);
+ if (!ASSERT_OK_PTR(main_prog, "main_prog"))
+ goto out;
+ bpf_program__set_autoload(main_prog, true);
+ err = summarization__load(main);
+ print_verifier_log(log);
+ if (!ASSERT_OK(err, "summarization__load"))
+ goto out;
+ freplace = summarization_freplace__open_opts(&opts);
+ if (!ASSERT_OK_PTR(freplace, "summarization_freplace__open"))
+ goto out;
+ freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement);
+ if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog"))
+ goto out;
+ bpf_program__set_autoload(freplace_prog, true);
+ bpf_program__set_autoattach(freplace_prog, true);
+ bpf_program__set_attach_target(freplace_prog,
+ bpf_program__fd(main_prog),
+ to_be_replaced);
+ err = summarization_freplace__load(freplace);
+ print_verifier_log(log);
+
+ /* The might_sleep extension doesn't work yet as sleepable calls are not
+ * allowed, but preserve the check in case it's supported later and then
+ * this particular combination can be enabled.
+ */
+ if (!strcmp("might_sleep", replacement) && err) {
+ ASSERT_HAS_SUBSTR(log, "helper call might sleep in a non-sleepable prog", "error log");
+ ASSERT_EQ(err, -EINVAL, "err");
+ test__skip();
+ goto out;
+ }
+
+ if (expect_load) {
+ ASSERT_OK(err, "summarization_freplace__load");
+ } else {
+ ASSERT_ERR(err, "summarization_freplace__load");
+ ASSERT_HAS_SUBSTR(log, err_msg, "error log");
+ }
+
+out:
+ summarization_freplace__destroy(freplace);
+ summarization__destroy(main);
+}
+
+/* There are two global subprograms in both summarization.skel.h:
+ * - one changes packet data;
+ * - another does not.
+ * It is ok to freplace subprograms that change packet data with those
+ * that either do or do not. It is only ok to freplace subprograms
+ * that do not change packet data with those that do not as well.
+ * The below tests check outcomes for each combination of such freplace.
+ * Also test a case when main subprogram itself is replaced and is a single
+ * subprogram in a program.
+ *
+ * This holds for might_sleep programs. It is ok to replace might_sleep with
+ * might_sleep and with does_not_sleep, but does_not_sleep cannot be replaced
+ * with might_sleep.
+ */
+void test_summarization_freplace(void)
+{
+ struct {
+ const char *main;
+ const char *to_be_replaced;
+ bool has_side_effect;
+ } mains[2][4] = {
+ {
+ { "main_changes_with_subprogs", "changes_pkt_data", true },
+ { "main_changes_with_subprogs", "does_not_change_pkt_data", false },
+ { "main_changes", "main_changes", true },
+ { "main_does_not_change", "main_does_not_change", false },
+ },
+ {
+ { "main_might_sleep_with_subprogs", "might_sleep", true },
+ { "main_might_sleep_with_subprogs", "does_not_sleep", false },
+ { "main_might_sleep", "main_might_sleep", true },
+ { "main_does_not_sleep", "main_does_not_sleep", false },
+ },
+ };
+ const char *pkt_err = "Extension program changes packet data";
+ const char *slp_err = "Extension program may sleep";
+ struct {
+ const char *func;
+ bool has_side_effect;
+ const char *err_msg;
+ } replacements[2][2] = {
+ {
+ { "changes_pkt_data", true, pkt_err },
+ { "does_not_change_pkt_data", false, pkt_err },
+ },
+ {
+ { "might_sleep", true, slp_err },
+ { "does_not_sleep", false, slp_err },
+ },
+ };
+ char buf[64];
+
+ for (int t = 0; t < 2; t++) {
+ for (int i = 0; i < ARRAY_SIZE(mains); ++i) {
+ for (int j = 0; j < ARRAY_SIZE(replacements); ++j) {
+ snprintf(buf, sizeof(buf), "%s_with_%s",
+ mains[t][i].to_be_replaced, replacements[t][j].func);
+ if (!test__start_subtest(buf))
+ continue;
+ test_aux(mains[t][i].main, mains[t][i].to_be_replaced, replacements[t][j].func,
+ mains[t][i].has_side_effect || !replacements[t][j].has_side_effect,
+ replacements[t][j].err_msg);
+ }
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c
new file mode 100644
index 000000000000..0be8301c0ffd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/syscall.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "syscall.skel.h"
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+static void test_syscall_load_prog(void)
+{
+ static char verifier_log[8192];
+ struct args ctx = {
+ .max_entries = 1024,
+ .log_buf = (uintptr_t) verifier_log,
+ .log_size = sizeof(verifier_log),
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ );
+ struct syscall *skel = NULL;
+ __u64 key = 12, value = 0;
+ int err, prog_fd;
+
+ skel = syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.load_prog);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ ASSERT_EQ(err, 0, "err");
+ ASSERT_EQ(tattr.retval, 1, "retval");
+ ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
+ ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd");
+ ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1),
+ "verifier_log");
+
+ err = bpf_map_lookup_elem(ctx.map_fd, &key, &value);
+ ASSERT_EQ(err, 0, "map_lookup");
+ ASSERT_EQ(value, 34, "map lookup value");
+cleanup:
+ syscall__destroy(skel);
+ if (ctx.prog_fd > 0)
+ close(ctx.prog_fd);
+ if (ctx.map_fd > 0)
+ close(ctx.map_fd);
+ if (ctx.btf_fd > 0)
+ close(ctx.btf_fd);
+}
+
+static void test_syscall_update_outer_map(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct syscall *skel;
+ int err, prog_fd;
+
+ skel = syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.update_outer_map);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_EQ(err, 0, "err");
+ ASSERT_EQ(opts.retval, 1, "retval");
+cleanup:
+ syscall__destroy(skel);
+}
+
+void test_syscall(void)
+{
+ if (test__start_subtest("load_prog"))
+ test_syscall_load_prog();
+ if (test__start_subtest("update_outer_map"))
+ test_syscall_update_outer_map();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index ee27d68d2a1c..0ab36503c3b2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1,6 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
#include <test_progs.h>
#include <network_helpers.h>
+#include "tailcall_poke.skel.h"
+#include "tailcall_bpf2bpf_hierarchy2.skel.h"
+#include "tailcall_bpf2bpf_hierarchy3.skel.h"
+#include "tailcall_freplace.skel.h"
+#include "tc_bpf2bpf.skel.h"
+#include "tailcall_fail.skel.h"
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -12,16 +19,20 @@ static void test_tailcall_1(void)
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char prog_name[32];
char buff[128] = {};
-
- err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -37,10 +48,10 @@ static void test_tailcall_1(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -53,26 +64,24 @@ static void test_tailcall_1(void)
goto out;
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != i, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, i, "tailcall retval");
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -85,16 +94,15 @@ static void test_tailcall_1(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_OK(topts.retval, "tailcall retval");
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- j = bpf_map__def(prog_array)->max_entries - 1 - i;
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ j = bpf_map__max_entries(prog_array) - 1 - i;
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -107,33 +115,30 @@ static void test_tailcall_1(void)
goto out;
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- j = bpf_map__def(prog_array)->max_entries - 1 - i;
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ j = bpf_map__max_entries(prog_array) - 1 - i;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != j, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, j, "tailcall retval");
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err >= 0 || errno != ENOENT))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
@@ -150,16 +155,20 @@ static void test_tailcall_2(void)
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char prog_name[32];
char buff[128] = {};
-
- err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -175,10 +184,10 @@ static void test_tailcall_2(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -191,52 +200,52 @@ static void test_tailcall_2(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 2, "tailcall retval");
i = 2;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
out:
bpf_object__close(obj);
}
-/* test_tailcall_3 checks that the count value of the tail call limit
- * enforcement matches with expectations.
- */
-static void test_tailcall_3(void)
+static void test_tailcall_count(const char *which, bool test_fentry,
+ bool test_fexit)
{
+ struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
+ struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
- struct bpf_object *obj;
- __u32 retval, duration;
char buff[128] = {};
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
- err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -252,7 +261,7 @@ static void test_tailcall_3(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
@@ -265,37 +274,137 @@ static void test_tailcall_3(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ if (test_fentry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+ }
+
+ if (test_fexit) {
+ fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
+ if (!ASSERT_OK_PTR(prog, "find fexit prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fexit_obj);
+ if (!ASSERT_OK(err, "load fexit_obj"))
+ goto out;
+
+ fexit_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
+ goto out;
+ }
+
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
- CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
- err, errno, val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 33, "tailcall count");
+
+ if (test_fentry) {
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 33, "fentry count");
+ }
+
+ if (test_fexit) {
+ data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fexit.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fexit.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 33, "fexit count");
+ }
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_OK(topts.retval, "tailcall retval");
out:
+ bpf_link__destroy(fentry_link);
+ bpf_link__destroy(fexit_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(fexit_obj);
bpf_object__close(obj);
}
+/* test_tailcall_3 checks that the count value of the tail call limit
+ * enforcement matches with expectations. JIT uses direct jump.
+ */
+static void test_tailcall_3(void)
+{
+ test_tailcall_count("tailcall3.bpf.o", false, false);
+}
+
+/* test_tailcall_6 checks that the count value of the tail call limit
+ * enforcement matches with expectations. JIT uses indirect jump.
+ */
+static void test_tailcall_6(void)
+{
+ test_tailcall_count("tailcall6.bpf.o", false, false);
+}
+
/* test_tailcall_4 checks that the kernel properly selects indirect jump
* for the case where the key is not known. Latter is passed via global
* data to select different targets we can compare return value of.
@@ -306,17 +415,21 @@ static void test_tailcall_4(void)
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
static const int zero = 0;
char buff[128] = {};
char prog_name[32];
-
- err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -334,16 +447,16 @@ static void test_tailcall_4(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -356,18 +469,17 @@ static void test_tailcall_4(void)
goto out;
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != i, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, i, "tailcall retval");
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err))
goto out;
@@ -376,10 +488,9 @@ static void test_tailcall_4(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
bpf_object__close(obj);
@@ -394,17 +505,21 @@ static void test_tailcall_5(void)
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
static const int zero = 0;
char buff[128] = {};
char prog_name[32];
-
- err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
- &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -422,16 +537,16 @@ static void test_tailcall_5(void)
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -444,18 +559,17 @@ static void test_tailcall_5(void)
goto out;
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != i, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, i, "tailcall retval");
}
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err))
goto out;
@@ -464,10 +578,9 @@ static void test_tailcall_5(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 3, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 3, "tailcall retval");
}
out:
bpf_object__close(obj);
@@ -482,15 +595,19 @@ static void test_tailcall_bpf2bpf_1(void)
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char prog_name[32];
-
- err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
- &obj, &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -507,10 +624,10 @@ static void test_tailcall_bpf2bpf_1(void)
goto out;
/* nop -> jmp */
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -523,10 +640,9 @@ static void test_tailcall_bpf2bpf_1(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- 0, &retval, &duration);
- CHECK(err || retval != 1, "tailcall",
- "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
/* jmp -> nop, call subprog that will do tailcall */
i = 1;
@@ -534,10 +650,9 @@ static void test_tailcall_bpf2bpf_1(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- 0, &retval, &duration);
- CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_OK(topts.retval, "tailcall retval");
/* make sure that subprog can access ctx and entry prog that
* called this subprog can properly return
@@ -547,11 +662,9 @@ static void test_tailcall_bpf2bpf_1(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- 0, &retval, &duration);
- CHECK(err || retval != sizeof(pkt_v4) * 2,
- "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
out:
bpf_object__close(obj);
}
@@ -566,15 +679,19 @@ static void test_tailcall_bpf2bpf_2(void)
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char buff[128] = {};
-
- err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
- &obj, &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -590,7 +707,7 @@ static void test_tailcall_bpf2bpf_2(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ prog = bpf_object__find_program_by_name(obj, "classifier_0");
if (CHECK_FAIL(!prog))
goto out;
@@ -603,33 +720,31 @@ static void test_tailcall_bpf2bpf_2(void)
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
- return;
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
- CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
- err, errno, val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 33, "tailcall count");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_OK(topts.retval, "tailcall retval");
out:
bpf_object__close(obj);
}
@@ -644,15 +759,19 @@ static void test_tailcall_bpf2bpf_3(void)
struct bpf_map *prog_array;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char prog_name[32];
-
- err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
- &obj, &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -668,10 +787,10 @@ static void test_tailcall_bpf2bpf_3(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -684,37 +803,33 @@ static void test_tailcall_bpf2bpf_3(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4) * 3,
- "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
i = 1;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4),
- "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
i = 0;
err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err))
goto out;
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4) * 2,
- "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
out:
bpf_object__close(obj);
}
+#include "tailcall_bpf2bpf4.skel.h"
+
/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
* across tailcalls combined with bpf2bpf calls. for making sure that tailcall
* counter behaves correctly, bpf program will go through following flow:
@@ -727,22 +842,31 @@ out:
* the loop begins. At the end of the test make sure that the global counter is
* equal to 31, because tailcall counter includes the first two tailcalls
* whereas global counter is incremented only on loop presented on flow above.
+ *
+ * The noise parameter is used to insert bpf_map_update calls into the logic
+ * to force verifier to patch instructions. This allows us to ensure jump
+ * logic remains correct with instruction movement.
*/
-static void test_tailcall_bpf2bpf_4(void)
+static void test_tailcall_bpf2bpf_4(bool noise)
{
- int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ int err, map_fd, prog_fd, main_fd, data_fd, i;
+ struct tailcall_bpf2bpf4__bss val;
struct bpf_map *prog_array, *data_map;
struct bpf_program *prog;
struct bpf_object *obj;
- __u32 retval, duration;
char prog_name[32];
-
- err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
- &obj, &prog_fd);
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- prog = bpf_object__find_program_by_title(obj, "classifier");
+ prog = bpf_object__find_program_by_name(obj, "entry");
if (CHECK_FAIL(!prog))
goto out;
@@ -758,10 +882,10 @@ static void test_tailcall_bpf2bpf_4(void)
if (CHECK_FAIL(map_fd < 0))
goto out;
- for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
- prog = bpf_object__find_program_by_title(obj, prog_name);
+ prog = bpf_object__find_program_by_name(obj, prog_name);
if (CHECK_FAIL(!prog))
goto out;
@@ -774,28 +898,761 @@ static void test_tailcall_bpf2bpf_4(void)
goto out;
}
- err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
- &duration, &retval, NULL);
- CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
- err, errno, retval);
-
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
- return;
+ goto out;
data_fd = bpf_map__fd(data_map);
- if (CHECK_FAIL(map_fd < 0))
+ if (CHECK_FAIL(data_fd < 0))
+ goto out;
+
+ i = 0;
+ val.noise = noise;
+ val.count = 0;
+ err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val.count, 31, "tailcall count");
+
+out:
+ bpf_object__close(obj);
+}
+
+#include "tailcall_bpf2bpf6.skel.h"
+
+/* Tail call counting works even when there is data on stack which is
+ * not aligned to 8 bytes.
+ */
+static void test_tailcall_bpf2bpf_6(void)
+{
+ struct tailcall_bpf2bpf6 *obj;
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ obj = tailcall_bpf2bpf6__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "open and load"))
return;
+ main_fd = bpf_program__fd(obj->progs.entry);
+ if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
+ goto out;
+
+ map_fd = bpf_map__fd(obj->maps.jmp_table);
+ if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
+ goto out;
+
+ prog_fd = bpf_program__fd(obj->progs.classifier_0);
+ if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "jmp_table map update"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "entry prog test run");
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ goto out;
+
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
- CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
- err, errno, val);
+ ASSERT_OK(err, "bss map lookup");
+ ASSERT_EQ(val, 1, "done flag is set");
out:
+ tailcall_bpf2bpf6__destroy(obj);
+}
+
+/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
+}
+
+/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
+ * limit enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call, and the bpf2bpf call is traced by fexit.
+ */
+static void test_tailcall_bpf2bpf_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
+ */
+static void test_tailcall_bpf2bpf_fentry_fexit(void)
+{
+ test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
+}
+
+/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcall is preceded
+ * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_fentry_entry(void)
+{
+ struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
+ int err, map_fd, prog_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_link *fentry_link = NULL;
+ struct bpf_program *prog;
+ char buff[128] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
+ BPF_PROG_TYPE_SCHED_CLS,
+ &tgt_obj, &prog_fd);
+ if (!ASSERT_OK(err, "load tgt_obj"))
+ return;
+
+ prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
+ if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
+ if (!ASSERT_OK(err, "set_attach_target classifier_0"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 34, "tailcall count");
+
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_FALSE(data_fd < 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 1, "fentry count");
+
+out:
+ bpf_link__destroy(fentry_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(tgt_obj);
+}
+
+#define JMP_TABLE "/sys/fs/bpf/jmp_table"
+
+static int poke_thread_exit;
+
+static void *poke_update(void *arg)
+{
+ __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
+ struct tailcall_poke *call = arg;
+
+ map_fd = bpf_map__fd(call->maps.jmp_table);
+ prog1_fd = bpf_program__fd(call->progs.call1);
+ prog2_fd = bpf_program__fd(call->progs.call2);
+
+ while (!poke_thread_exit) {
+ bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
+ bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
+ }
+
+ return NULL;
+}
+
+/*
+ * We are trying to hit prog array update during another program load
+ * that shares the same prog array map.
+ *
+ * For that we share the jmp_table map between two skeleton instances
+ * by pinning the jmp_table to same path. Then first skeleton instance
+ * periodically updates jmp_table in 'poke update' thread while we load
+ * the second skeleton instance in the main thread.
+ */
+static void test_tailcall_poke(void)
+{
+ struct tailcall_poke *call, *test;
+ int err, cnt = 10;
+ pthread_t thread;
+
+ unlink(JMP_TABLE);
+
+ call = tailcall_poke__open_and_load();
+ if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
+ return;
+
+ err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin"))
+ goto out;
+
+ err = pthread_create(&thread, NULL, poke_update, call);
+ if (!ASSERT_OK(err, "new toggler"))
+ goto out;
+
+ while (cnt--) {
+ test = tailcall_poke__open();
+ if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
+ break;
+
+ err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin")) {
+ tailcall_poke__destroy(test);
+ break;
+ }
+
+ bpf_program__set_autoload(test->progs.test, true);
+ bpf_program__set_autoload(test->progs.call1, false);
+ bpf_program__set_autoload(test->progs.call2, false);
+
+ err = tailcall_poke__load(test);
+ tailcall_poke__destroy(test);
+ if (!ASSERT_OK(err, "tailcall_poke__load"))
+ break;
+ }
+
+ poke_thread_exit = 1;
+ ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
+
+out:
+ bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
+ tailcall_poke__destroy(call);
+}
+
+static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
+ bool test_fexit,
+ bool test_fentry_entry)
+{
+ int err, map_fd, prog_fd, main_data_fd, fentry_data_fd = 0, fexit_data_fd = 0, i, val;
+ struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
+ struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
+ struct bpf_program *prog, *fentry_prog;
+ struct bpf_map *prog_array, *data_map;
+ int fentry_prog_fd;
+ char buff[128] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (!ASSERT_OK(err, "load obj"))
+ return;
+
+ prog = bpf_object__find_program_by_name(obj, "entry");
+ if (!ASSERT_OK_PTR(prog, "find entry prog"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ if (test_fentry_entry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ fentry_prog = bpf_object__find_program_by_name(fentry_obj,
+ "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(fentry_prog, prog_fd,
+ "entry");
+ if (!ASSERT_OK(err, "set_attach_target entry"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(fentry_prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+
+ fentry_prog_fd = bpf_program__fd(fentry_prog);
+ if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find data_map"))
+ goto out;
+
+ } else {
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find data_map"))
+ goto out;
+ }
+
+ if (test_fentry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+ }
+
+ if (test_fexit) {
+ fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
+ if (!ASSERT_OK_PTR(prog, "find fexit prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fexit_obj);
+ if (!ASSERT_OK(err, "load fexit_obj"))
+ goto out;
+
+ fexit_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
+ goto out;
+ }
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ main_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(main_data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 34, "tailcall count");
+
+ if (test_fentry) {
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ fentry_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(fentry_data_fd, 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 68, "fentry count");
+ }
+
+ if (test_fexit) {
+ data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fexit.bss map"))
+ goto out;
+
+ fexit_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(fexit_data_fd, 0,
+ "find tailcall_bpf2bpf_fexit.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 68, "fexit count");
+ }
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ i = 0;
+ err = bpf_map_lookup_elem(main_data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 35, "tailcall count");
+
+ if (test_fentry) {
+ i = 0;
+ err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 70, "fentry count");
+ }
+
+ if (test_fexit) {
+ i = 0;
+ err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 70, "fexit count");
+ }
+
+out:
+ bpf_link__destroy(fentry_link);
+ bpf_link__destroy(fexit_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(fexit_obj);
bpf_object__close(obj);
}
+/* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcalls are preceded
+ * with two bpf2bpf calls.
+ *
+ * subprog --tailcall-> entry
+ * entry <
+ * subprog --tailcall-> entry
+ */
+static void test_tailcall_bpf2bpf_hierarchy_1(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ false, false, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
+ * tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ true, false, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcalls are preceded
+ * with two bpf2bpf calls, and the two subprogs are traced by fexit.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ false, true, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
+ * the tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls, and the two subprogs are traced by both
+ * fentry and fexit.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ true, true, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
+ * the tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls in fentry.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
+{
+ test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
+ * call limit enforcement matches with expectations:
+ *
+ * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
+ * entry <
+ * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
+ */
+static void test_tailcall_bpf2bpf_hierarchy_2(void)
+{
+ RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
+ * call limit enforcement matches with expectations:
+ *
+ * subprog with jmp_table0 to classifier_0
+ * entry --tailcall-> classifier_0 <
+ * subprog with jmp_table1 to classifier_0
+ */
+static void test_tailcall_bpf2bpf_hierarchy_3(void)
+{
+ RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
+}
+
+/* test_tailcall_freplace checks that the freplace prog fails to update the
+ * prog_array map, no matter whether the freplace prog attaches to its target.
+ */
+static void test_tailcall_freplace(void)
+{
+ struct tailcall_freplace *freplace_skel = NULL;
+ struct bpf_link *freplace_link = NULL;
+ struct bpf_program *freplace_prog;
+ struct tc_bpf2bpf *tc_skel = NULL;
+ int prog_fd, tc_prog_fd, map_fd;
+ char buff[128] = {};
+ int err, key;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ freplace_skel = tailcall_freplace__open();
+ if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
+ return;
+
+ tc_skel = tc_bpf2bpf__open_and_load();
+ if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
+ goto out;
+
+ tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ freplace_prog = freplace_skel->progs.entry_freplace;
+ err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
+ "subprog_tc");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = tailcall_freplace__load(freplace_skel);
+ if (!ASSERT_OK(err, "tailcall_freplace__load"))
+ goto out;
+
+ map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+ prog_fd = bpf_program__fd(freplace_prog);
+ key = 0;
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "update jmp_table failure");
+
+ freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
+ "subprog_tc");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ ASSERT_ERR(err, "update jmp_table failure");
+
+out:
+ bpf_link__destroy(freplace_link);
+ tailcall_freplace__destroy(freplace_skel);
+ tc_bpf2bpf__destroy(tc_skel);
+}
+
+/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
+ * callee prog with freplace prog or fails to update an extended prog to
+ * prog_array map.
+ */
+static void test_tailcall_bpf2bpf_freplace(void)
+{
+ struct tailcall_freplace *freplace_skel = NULL;
+ struct bpf_link *freplace_link = NULL;
+ struct tc_bpf2bpf *tc_skel = NULL;
+ char buff[128] = {};
+ int prog_fd, map_fd;
+ int err, key;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ tc_skel = tc_bpf2bpf__open_and_load();
+ if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ freplace_skel = tailcall_freplace__open();
+ if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
+ goto out;
+
+ err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = tailcall_freplace__load(freplace_skel);
+ if (!ASSERT_OK(err, "tailcall_freplace__load"))
+ goto out;
+
+ /* OK to attach then detach freplace prog. */
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ err = bpf_link__destroy(freplace_link);
+ freplace_link = NULL;
+ if (!ASSERT_OK(err, "destroy link"))
+ goto out;
+
+ /* OK to update prog_array map then delete element from the map. */
+
+ key = 0;
+ map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ err = bpf_map_delete_elem(map_fd, &key);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ /* Fail to attach a tail callee prog with freplace prog. */
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
+ goto out;
+
+ err = bpf_map_delete_elem(map_fd, &key);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ /* Fail to update an extended prog to prog_array map. */
+
+ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
+ prog_fd, "subprog_tc");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_ERR(err, "update jmp_table failure"))
+ goto out;
+
+out:
+ bpf_link__destroy(freplace_link);
+ tailcall_freplace__destroy(freplace_skel);
+ tc_bpf2bpf__destroy(tc_skel);
+}
+
+static void test_tailcall_failure()
+{
+ RUN_TESTS(tailcall_fail);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -808,6 +1665,8 @@ void test_tailcalls(void)
test_tailcall_4();
if (test__start_subtest("tailcall_5"))
test_tailcall_5();
+ if (test__start_subtest("tailcall_6"))
+ test_tailcall_6();
if (test__start_subtest("tailcall_bpf2bpf_1"))
test_tailcall_bpf2bpf_1();
if (test__start_subtest("tailcall_bpf2bpf_2"))
@@ -815,5 +1674,37 @@ void test_tailcalls(void)
if (test__start_subtest("tailcall_bpf2bpf_3"))
test_tailcall_bpf2bpf_3();
if (test__start_subtest("tailcall_bpf2bpf_4"))
- test_tailcall_bpf2bpf_4();
+ test_tailcall_bpf2bpf_4(false);
+ if (test__start_subtest("tailcall_bpf2bpf_5"))
+ test_tailcall_bpf2bpf_4(true);
+ if (test__start_subtest("tailcall_bpf2bpf_6"))
+ test_tailcall_bpf2bpf_6();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry"))
+ test_tailcall_bpf2bpf_fentry();
+ if (test__start_subtest("tailcall_bpf2bpf_fexit"))
+ test_tailcall_bpf2bpf_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
+ test_tailcall_bpf2bpf_fentry_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
+ test_tailcall_bpf2bpf_fentry_entry();
+ if (test__start_subtest("tailcall_poke"))
+ test_tailcall_poke();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
+ test_tailcall_bpf2bpf_hierarchy_1();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
+ test_tailcall_bpf2bpf_hierarchy_fentry();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
+ test_tailcall_bpf2bpf_hierarchy_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
+ test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
+ test_tailcall_bpf2bpf_hierarchy_fentry_entry();
+ test_tailcall_bpf2bpf_hierarchy_2();
+ test_tailcall_bpf2bpf_hierarchy_3();
+ if (test__start_subtest("tailcall_freplace"))
+ test_tailcall_freplace();
+ if (test__start_subtest("tailcall_bpf2bpf_freplace"))
+ test_tailcall_bpf2bpf_freplace();
+ if (test__start_subtest("tailcall_failure"))
+ test_tailcall_failure();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
index 1bdc1d86a50c..3d34bab01e48 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -3,7 +3,7 @@
void test_task_fd_query_rawtp(void)
{
- const char *file = "./test_get_stack_rawtp.o";
+ const char *file = "./test_get_stack_rawtp.bpf.o";
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
struct bpf_object *obj;
@@ -11,7 +11,7 @@ void test_task_fd_query_rawtp(void)
__u32 duration = 0;
char buf[256];
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
index 3f131b8fe328..c91eda624657 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -4,7 +4,7 @@
static void test_task_fd_query_tp_core(const char *probe_name,
const char *tp_name)
{
- const char *file = "./test_tracepoint.o";
+ const char *file = "./test_tracepoint.bpf.o";
int err, bytes, efd, prog_fd, pmu_fd;
struct perf_event_attr attr = {};
__u64 probe_offset, probe_addr;
@@ -13,12 +13,17 @@ static void test_task_fd_query_tp_core(const char *probe_name,
__u32 duration = 0;
char buf[256];
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto close_prog;
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/tracing/events/%s/id", probe_name);
+ } else {
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/%s/id", probe_name);
+ }
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
new file mode 100644
index 000000000000..83b90335967a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <sys/wait.h>
+#include <test_progs.h>
+#include <unistd.h>
+
+#include "task_kfunc_failure.skel.h"
+#include "task_kfunc_success.skel.h"
+
+static struct task_kfunc_success *open_load_task_kfunc_skel(void)
+{
+ struct task_kfunc_success *skel;
+ int err;
+
+ skel = task_kfunc_success__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ err = task_kfunc_success__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ task_kfunc_success__destroy(skel);
+ return NULL;
+}
+
+static void run_success_test(const char *prog_name)
+{
+ struct task_kfunc_success *skel;
+ int status;
+ pid_t child_pid;
+ struct bpf_program *prog;
+ struct bpf_link *link = NULL;
+
+ skel = open_load_task_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_spawn_err"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attached_link"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+ if (child_pid == 0)
+ _exit(0);
+ waitpid(child_pid, &status, 0);
+
+ ASSERT_OK(skel->bss->err, "post_wait_err");
+
+cleanup:
+ bpf_link__destroy(link);
+ task_kfunc_success__destroy(skel);
+}
+
+static int run_vpid_test(void *prog_name)
+{
+ struct task_kfunc_success *skel;
+ struct bpf_program *prog;
+ int prog_fd, err = 0;
+
+ if (getpid() != 1)
+ return 1;
+
+ skel = open_load_task_kfunc_skel();
+ if (!skel)
+ return 2;
+
+ if (skel->bss->err) {
+ err = 3;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog) {
+ err = 4;
+ goto cleanup;
+ }
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ err = 5;
+ goto cleanup;
+ }
+
+ if (bpf_prog_test_run_opts(prog_fd, NULL)) {
+ err = 6;
+ goto cleanup;
+ }
+
+ if (skel->bss->err)
+ err = 7 + skel->bss->err;
+cleanup:
+ task_kfunc_success__destroy(skel);
+ return err;
+}
+
+static void run_vpid_success_test(const char *prog_name)
+{
+ const int stack_size = 1024 * 1024;
+ int child_pid, wstatus;
+ char *stack;
+
+ stack = (char *)malloc(stack_size);
+ if (!ASSERT_OK_PTR(stack, "clone_stack"))
+ return;
+
+ child_pid = clone(run_vpid_test, stack + stack_size,
+ CLONE_NEWPID | SIGCHLD, (void *)prog_name);
+ if (!ASSERT_GT(child_pid, -1, "child_pid"))
+ goto cleanup;
+
+ if (!ASSERT_GT(waitpid(child_pid, &wstatus, 0), -1, "waitpid"))
+ goto cleanup;
+
+ if (WEXITSTATUS(wstatus) > 7)
+ ASSERT_OK(WEXITSTATUS(wstatus) - 7, "vpid_test_failure");
+ else
+ ASSERT_OK(WEXITSTATUS(wstatus), "run_vpid_test_err");
+cleanup:
+ free(stack);
+}
+
+static const char * const success_tests[] = {
+ "test_task_acquire_release_argument",
+ "test_task_acquire_release_current",
+ "test_task_acquire_leave_in_map",
+ "test_task_xchg_release",
+ "test_task_map_acquire_release",
+ "test_task_current_acquire_release",
+ "test_task_from_pid_arg",
+ "test_task_from_pid_current",
+ "test_task_from_pid_invalid",
+ "task_kfunc_acquire_trusted_walked",
+ "test_task_kfunc_flavor_relo",
+ "test_task_kfunc_flavor_relo_not_found",
+};
+
+static const char * const vpid_success_tests[] = {
+ "test_task_from_vpid_current",
+ "test_task_from_vpid_invalid",
+};
+
+void test_task_kfunc(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i]))
+ continue;
+
+ run_success_test(success_tests[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vpid_success_tests); i++) {
+ if (!test__start_subtest(vpid_success_tests[i]))
+ continue;
+
+ run_vpid_success_test(vpid_success_tests[i]);
+ }
+
+ RUN_TESTS(task_kfunc_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_data.h b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
new file mode 100644
index 000000000000..2de38776a2d4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TASK_LOCAL_DATA_H
+#define __TASK_LOCAL_DATA_H
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdatomic.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+#include <pthread.h>
+#endif
+
+#include <bpf/bpf.h>
+
+/*
+ * OPTIONS
+ *
+ * Define the option before including the header
+ *
+ * TLD_FREE_DATA_ON_THREAD_EXIT - Frees memory on thread exit automatically
+ *
+ * Thread-specific memory for storing TLD is allocated lazily on the first call to
+ * tld_get_data(). The thread that calls it must also call tld_free() on thread exit
+ * to prevent memory leak. Pthread will be included if the option is defined. A pthread
+ * key will be registered with a destructor that calls tld_free().
+ *
+ *
+ * TLD_DYN_DATA_SIZE - The maximum size of memory allocated for TLDs created dynamically
+ * (default: 64 bytes)
+ *
+ * A TLD can be defined statically using TLD_DEFINE_KEY() or created on the fly using
+ * tld_create_key(). As the total size of TLDs created with tld_create_key() cannot be
+ * possibly known statically, a memory area of size TLD_DYN_DATA_SIZE will be allocated
+ * for these TLDs. This additional memory is allocated for every thread that calls
+ * tld_get_data() even if no tld_create_key are actually called, so be mindful of
+ * potential memory wastage. Use TLD_DEFINE_KEY() whenever possible as just enough memory
+ * will be allocated for TLDs created with it.
+ *
+ *
+ * TLD_NAME_LEN - The maximum length of the name of a TLD (default: 62)
+ *
+ * Setting TLD_NAME_LEN will affect the maximum number of TLDs a process can store,
+ * TLD_MAX_DATA_CNT.
+ *
+ *
+ * TLD_DATA_USE_ALIGNED_ALLOC - Always use aligned_alloc() instead of malloc()
+ *
+ * When allocating the memory for storing TLDs, we need to make sure there is a memory
+ * region of the X bytes within a page. This is due to the limit posed by UPTR: memory
+ * pinned to the kernel cannot exceed a page nor can it cross the page boundary. The
+ * library normally calls malloc(2*X) given X bytes of total TLDs, and only uses
+ * aligned_alloc(PAGE_SIZE, X) when X >= PAGE_SIZE / 2. This is to reduce memory wastage
+ * as not all memory allocator can use the exact amount of memory requested to fulfill
+ * aligned_alloc(). For example, some may round the size up to the alignment. Enable the
+ * option to always use aligned_alloc() if the implementation has low memory overhead.
+ */
+
+#define TLD_PAGE_SIZE getpagesize()
+#define TLD_PAGE_MASK (~(TLD_PAGE_SIZE - 1))
+
+#define TLD_ROUND_MASK(x, y) ((__typeof__(x))((y) - 1))
+#define TLD_ROUND_UP(x, y) ((((x) - 1) | TLD_ROUND_MASK(x, y)) + 1)
+
+#define TLD_READ_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#ifndef TLD_DYN_DATA_SIZE
+#define TLD_DYN_DATA_SIZE 64
+#endif
+
+#define TLD_MAX_DATA_CNT (TLD_PAGE_SIZE / sizeof(struct tld_metadata) - 1)
+
+#ifndef TLD_NAME_LEN
+#define TLD_NAME_LEN 62
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ __s16 off;
+} tld_key_t;
+
+struct tld_metadata {
+ char name[TLD_NAME_LEN];
+ _Atomic __u16 size;
+};
+
+struct tld_meta_u {
+ _Atomic __u8 cnt;
+ __u16 size;
+ struct tld_metadata metadata[];
+};
+
+struct tld_data_u {
+ __u64 start; /* offset of tld_data_u->data in a page */
+ char data[];
+};
+
+struct tld_map_value {
+ void *data;
+ struct tld_meta_u *meta;
+};
+
+struct tld_meta_u * _Atomic tld_meta_p __attribute__((weak));
+__thread struct tld_data_u *tld_data_p __attribute__((weak));
+__thread void *tld_data_alloc_p __attribute__((weak));
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+pthread_key_t tld_pthread_key __attribute__((weak));
+
+static void tld_free(void);
+
+static void __tld_thread_exit_handler(void *unused)
+{
+ tld_free();
+}
+#endif
+
+static int __tld_init_meta_p(void)
+{
+ struct tld_meta_u *meta, *uninit = NULL;
+ int err = 0;
+
+ meta = (struct tld_meta_u *)aligned_alloc(TLD_PAGE_SIZE, TLD_PAGE_SIZE);
+ if (!meta) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memset(meta, 0, TLD_PAGE_SIZE);
+ meta->size = TLD_DYN_DATA_SIZE;
+
+ if (!atomic_compare_exchange_strong(&tld_meta_p, &uninit, meta)) {
+ free(meta);
+ goto out;
+ }
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+ pthread_key_create(&tld_pthread_key, __tld_thread_exit_handler);
+#endif
+out:
+ return err;
+}
+
+static int __tld_init_data_p(int map_fd)
+{
+ bool use_aligned_alloc = false;
+ struct tld_map_value map_val;
+ struct tld_data_u *data;
+ void *data_alloc = NULL;
+ int err, tid_fd = -1;
+
+ tid_fd = syscall(SYS_pidfd_open, sys_gettid(), O_EXCL);
+ if (tid_fd < 0) {
+ err = -errno;
+ goto out;
+ }
+
+#ifdef TLD_DATA_USE_ALIGNED_ALLOC
+ use_aligned_alloc = true;
+#endif
+
+ /*
+ * tld_meta_p->size = TLD_DYN_DATA_SIZE +
+ * total size of TLDs defined via TLD_DEFINE_KEY()
+ */
+ data_alloc = (use_aligned_alloc || tld_meta_p->size * 2 >= TLD_PAGE_SIZE) ?
+ aligned_alloc(TLD_PAGE_SIZE, tld_meta_p->size) :
+ malloc(tld_meta_p->size * 2);
+ if (!data_alloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Always pass a page-aligned address to UPTR since the size of tld_map_value::data
+ * is a page in BTF. If data_alloc spans across two pages, use the page that contains large
+ * enough memory.
+ */
+ if (TLD_PAGE_SIZE - (~TLD_PAGE_MASK & (intptr_t)data_alloc) >= tld_meta_p->size) {
+ map_val.data = (void *)(TLD_PAGE_MASK & (intptr_t)data_alloc);
+ data = data_alloc;
+ data->start = (~TLD_PAGE_MASK & (intptr_t)data_alloc) +
+ offsetof(struct tld_data_u, data);
+ } else {
+ map_val.data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
+ data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
+ data->start = offsetof(struct tld_data_u, data);
+ }
+ map_val.meta = TLD_READ_ONCE(tld_meta_p);
+
+ err = bpf_map_update_elem(map_fd, &tid_fd, &map_val, 0);
+ if (err) {
+ free(data_alloc);
+ goto out;
+ }
+
+ tld_data_p = data;
+ tld_data_alloc_p = data_alloc;
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+ pthread_setspecific(tld_pthread_key, (void *)1);
+#endif
+out:
+ if (tid_fd >= 0)
+ close(tid_fd);
+ return err;
+}
+
+static tld_key_t __tld_create_key(const char *name, size_t size, bool dyn_data)
+{
+ int err, i, sz, off = 0;
+ __u8 cnt;
+
+ if (!TLD_READ_ONCE(tld_meta_p)) {
+ err = __tld_init_meta_p();
+ if (err)
+ return (tld_key_t){err};
+ }
+
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+retry:
+ cnt = atomic_load(&tld_meta_p->cnt);
+ if (i < cnt) {
+ /* A metadata is not ready until size is updated with a non-zero value */
+ while (!(sz = atomic_load(&tld_meta_p->metadata[i].size)))
+ sched_yield();
+
+ if (!strncmp(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN))
+ return (tld_key_t){-EEXIST};
+
+ off += TLD_ROUND_UP(sz, 8);
+ continue;
+ }
+
+ /*
+ * TLD_DEFINE_KEY() is given memory upto a page while at most
+ * TLD_DYN_DATA_SIZE is allocated for tld_create_key()
+ */
+ if (dyn_data) {
+ if (off + TLD_ROUND_UP(size, 8) > tld_meta_p->size)
+ return (tld_key_t){-E2BIG};
+ } else {
+ if (off + TLD_ROUND_UP(size, 8) > TLD_PAGE_SIZE - sizeof(struct tld_data_u))
+ return (tld_key_t){-E2BIG};
+ tld_meta_p->size += TLD_ROUND_UP(size, 8);
+ }
+
+ /*
+ * Only one tld_create_key() can increase the current cnt by one and
+ * takes the latest available slot. Other threads will check again if a new
+ * TLD can still be added, and then compete for the new slot after the
+ * succeeding thread update the size.
+ */
+ if (!atomic_compare_exchange_strong(&tld_meta_p->cnt, &cnt, cnt + 1))
+ goto retry;
+
+ strncpy(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN);
+ atomic_store(&tld_meta_p->metadata[i].size, size);
+ return (tld_key_t){(__s16)off};
+ }
+
+ return (tld_key_t){-ENOSPC};
+}
+
+/**
+ * TLD_DEFINE_KEY() - Define a TLD and a global variable key associated with the TLD.
+ *
+ * @name: The name of the TLD
+ * @size: The size of the TLD
+ * @key: The variable name of the key. Cannot exceed TLD_NAME_LEN
+ *
+ * The macro can only be used in file scope.
+ *
+ * A global variable key of opaque type, tld_key_t, will be declared and initialized before
+ * main() starts. Use tld_key_is_err() or tld_key_err_or_zero() later to check if the key
+ * creation succeeded. Pass the key to tld_get_data() to get a pointer to the TLD.
+ * bpf programs can also fetch the same key by name.
+ *
+ * The total size of TLDs created using TLD_DEFINE_KEY() cannot exceed a page. Just
+ * enough memory will be allocated for each thread on the first call to tld_get_data().
+ */
+#define TLD_DEFINE_KEY(key, name, size) \
+tld_key_t key; \
+ \
+__attribute__((constructor)) \
+void __tld_define_key_##key(void) \
+{ \
+ key = __tld_create_key(name, size, false); \
+}
+
+/**
+ * tld_create_key() - Create a TLD and return a key associated with the TLD.
+ *
+ * @name: The name the TLD
+ * @size: The size of the TLD
+ *
+ * Return an opaque object key. Use tld_key_is_err() or tld_key_err_or_zero() to check
+ * if the key creation succeeded. Pass the key to tld_get_data() to get a pointer to
+ * locate the TLD. bpf programs can also fetch the same key by name.
+ *
+ * Use tld_create_key() only when a TLD needs to be created dynamically (e.g., @name is
+ * not known statically or a TLD needs to be created conditionally)
+ *
+ * An additional TLD_DYN_DATA_SIZE bytes are allocated per-thread to accommodate TLDs
+ * created dynamically with tld_create_key(). Since only a user page is pinned to the
+ * kernel, when TLDs created with TLD_DEFINE_KEY() uses more than TLD_PAGE_SIZE -
+ * TLD_DYN_DATA_SIZE, the buffer size will be limited to the rest of the page.
+ */
+__attribute__((unused))
+static tld_key_t tld_create_key(const char *name, size_t size)
+{
+ return __tld_create_key(name, size, true);
+}
+
+__attribute__((unused))
+static inline bool tld_key_is_err(tld_key_t key)
+{
+ return key.off < 0;
+}
+
+__attribute__((unused))
+static inline int tld_key_err_or_zero(tld_key_t key)
+{
+ return tld_key_is_err(key) ? key.off : 0;
+}
+
+/**
+ * tld_get_data() - Get a pointer to the TLD associated with the given key of the
+ * calling thread.
+ *
+ * @map_fd: A file descriptor of tld_data_map, the underlying BPF task local storage map
+ * of task local data.
+ * @key: A key object created by TLD_DEFINE_KEY() or tld_create_key().
+ *
+ * Return a pointer to the TLD if the key is valid; NULL if not enough memory for TLD
+ * for this thread, or the key is invalid. The returned pointer is guaranteed to be 8-byte
+ * aligned.
+ *
+ * Threads that call tld_get_data() must call tld_free() on exit to prevent
+ * memory leak if TLD_FREE_DATA_ON_THREAD_EXIT is not defined.
+ */
+__attribute__((unused))
+static void *tld_get_data(int map_fd, tld_key_t key)
+{
+ if (!TLD_READ_ONCE(tld_meta_p))
+ return NULL;
+
+ /* tld_data_p is allocated on the first invocation of tld_get_data() */
+ if (!tld_data_p && __tld_init_data_p(map_fd))
+ return NULL;
+
+ return tld_data_p->data + key.off;
+}
+
+/**
+ * tld_free() - Free task local data memory of the calling thread
+ *
+ * For the calling thread, all pointers to TLDs acquired before will become invalid.
+ *
+ * Users must call tld_free() on thread exit to prevent memory leak. Alternatively,
+ * define TLD_FREE_DATA_ON_THREAD_EXIT and a thread exit handler will be registered
+ * to free the memory automatically.
+ */
+__attribute__((unused))
+static void tld_free(void)
+{
+ if (tld_data_alloc_p) {
+ free(tld_data_alloc_p);
+ tld_data_alloc_p = NULL;
+ tld_data_p = NULL;
+ }
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __TASK_LOCAL_DATA_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
new file mode 100644
index 000000000000..42e822ea352f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#define _GNU_SOURCE /* See feature_test_macros(7) */
+#include <unistd.h>
+#include <sched.h>
+#include <pthread.h>
+#include <sys/syscall.h> /* For SYS_xxx definitions */
+#include <sys/types.h>
+#include <sys/eventfd.h>
+#include <sys/mman.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "task_local_storage_helpers.h"
+#include "task_local_storage.skel.h"
+#include "task_local_storage_exit_creds.skel.h"
+#include "task_ls_recursion.skel.h"
+#include "task_storage_nodeadlock.skel.h"
+#include "uptr_test_common.h"
+#include "task_ls_uptr.skel.h"
+#include "uptr_update_failure.skel.h"
+#include "uptr_failure.skel.h"
+#include "uptr_map_failure.skel.h"
+
+static void test_sys_enter_exit(void)
+{
+ struct task_local_storage *skel;
+ int err;
+
+ skel = task_local_storage__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ skel->bss->target_pid = sys_gettid();
+
+ err = task_local_storage__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ sys_gettid();
+ sys_gettid();
+
+ /* 3x syscalls: 1x attach and 2x gettid */
+ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
+ ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
+ ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
+out:
+ task_local_storage__destroy(skel);
+}
+
+static void test_exit_creds(void)
+{
+ struct task_local_storage_exit_creds *skel;
+ int err, run_count, sync_rcu_calls = 0;
+ const int MAX_SYNC_RCU_CALLS = 1000;
+
+ skel = task_local_storage_exit_creds__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ err = task_local_storage_exit_creds__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger at least one exit_creds() */
+ if (CHECK_FAIL(system("ls > /dev/null")))
+ goto out;
+
+ /* kern_sync_rcu is not enough on its own as the read section we want
+ * to wait for may start after we enter synchronize_rcu, so our call
+ * won't wait for the section to finish. Loop on the run counter
+ * as well to ensure the program has run.
+ */
+ do {
+ kern_sync_rcu();
+ run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST);
+ } while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS);
+
+ ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS,
+ "sync_rcu count too high");
+ ASSERT_NEQ(run_count, 0, "run_count");
+ ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
+ ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
+out:
+ task_local_storage_exit_creds__destroy(skel);
+}
+
+static void test_recursion(void)
+{
+ int err, map_fd, prog_fd, task_fd;
+ struct task_ls_recursion *skel;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ long value;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
+ return;
+
+ skel = task_ls_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ err = task_ls_recursion__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ /* trigger sys_enter, make sure it does not cause deadlock */
+ skel->bss->test_pid = getpid();
+ sys_gettid();
+ skel->bss->test_pid = 0;
+ task_ls_recursion__detach(skel);
+
+ /* Refer to the comment in BPF_PROG(on_update) for
+ * the explanation on the value 201 and 100.
+ */
+ map_fd = bpf_map__fd(skel->maps.map_a);
+ err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+ ASSERT_OK(err, "lookup map_a");
+ ASSERT_EQ(value, 201, "map_a value");
+ ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy");
+
+ map_fd = bpf_map__fd(skel->maps.map_b);
+ err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
+ ASSERT_OK(err, "lookup map_b");
+ ASSERT_EQ(value, 100, "map_b value");
+
+ prog_fd = bpf_program__fd(skel->progs.on_update);
+ memset(&info, 0, sizeof(info));
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
+
+ prog_fd = bpf_program__fd(skel->progs.on_enter);
+ memset(&info, 0, sizeof(info));
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
+
+out:
+ close(task_fd);
+ task_ls_recursion__destroy(skel);
+}
+
+static bool stop;
+
+static void waitall(const pthread_t *tids, int nr)
+{
+ int i;
+
+ stop = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+static void *sock_create_loop(void *arg)
+{
+ struct task_storage_nodeadlock *skel = arg;
+ int fd;
+
+ while (!stop) {
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ close(fd);
+ if (skel->bss->nr_get_errs || skel->bss->nr_del_errs)
+ stop = true;
+ }
+
+ return NULL;
+}
+
+static void test_nodeadlock(void)
+{
+ struct task_storage_nodeadlock *skel;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const int nr_threads = 32;
+ pthread_t tids[nr_threads];
+ int i, prog_fd, err;
+ cpu_set_t old, new;
+
+ /* Pin all threads to one cpu to increase the chance of preemption
+ * in a sleepable bpf prog.
+ */
+ CPU_ZERO(&new);
+ CPU_SET(0, &new);
+ err = sched_getaffinity(getpid(), sizeof(old), &old);
+ if (!ASSERT_OK(err, "getaffinity"))
+ return;
+ err = sched_setaffinity(getpid(), sizeof(new), &new);
+ if (!ASSERT_OK(err, "setaffinity"))
+ return;
+
+ skel = task_storage_nodeadlock__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto done;
+
+ /* Unnecessary recursion and deadlock detection are reproducible
+ * in the preemptible kernel.
+ */
+ if (!skel->kconfig->CONFIG_PREEMPTION) {
+ test__skip();
+ goto done;
+ }
+
+ err = task_storage_nodeadlock__attach(skel);
+ ASSERT_OK(err, "attach prog");
+
+ for (i = 0; i < nr_threads; i++) {
+ err = pthread_create(&tids[i], NULL, sock_create_loop, skel);
+ if (err) {
+ /* Only assert once here to avoid excessive
+ * PASS printing during test failure.
+ */
+ ASSERT_OK(err, "pthread_create");
+ waitall(tids, i);
+ goto done;
+ }
+ }
+
+ /* With 32 threads, 1s is enough to reproduce the issue */
+ sleep(1);
+ waitall(tids, nr_threads);
+
+ info_len = sizeof(info);
+ prog_fd = bpf_program__fd(skel->progs.socket_post_create);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ ASSERT_OK(err, "get prog info");
+ ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
+
+ ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy");
+ ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy");
+
+done:
+ task_storage_nodeadlock__destroy(skel);
+ sched_setaffinity(getpid(), sizeof(old), &old);
+}
+
+static struct user_data udata __attribute__((aligned(16))) = {
+ .a = 1,
+ .b = 2,
+};
+
+static struct user_data udata2 __attribute__((aligned(16))) = {
+ .a = 3,
+ .b = 4,
+};
+
+static void check_udata2(int expected)
+{
+ udata2.result = udata2.nested_result = 0;
+ usleep(1);
+ ASSERT_EQ(udata2.result, expected, "udata2.result");
+ ASSERT_EQ(udata2.nested_result, expected, "udata2.nested_result");
+}
+
+static void test_uptr_basic(void)
+{
+ int map_fd, parent_task_fd, ev_fd;
+ struct value_type value = {};
+ struct task_ls_uptr *skel;
+ pid_t child_pid, my_tid;
+ __u64 ev_dummy_data = 1;
+ int err;
+
+ my_tid = sys_gettid();
+ parent_task_fd = sys_pidfd_open(my_tid, 0);
+ if (!ASSERT_OK_FD(parent_task_fd, "parent_task_fd"))
+ return;
+
+ ev_fd = eventfd(0, 0);
+ if (!ASSERT_OK_FD(ev_fd, "ev_fd")) {
+ close(parent_task_fd);
+ return;
+ }
+
+ skel = task_ls_uptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+ value.udata = &udata;
+ value.nested.udata = &udata;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "update_elem(udata)"))
+ goto out;
+
+ err = task_ls_uptr__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ child_pid = fork();
+ if (!ASSERT_NEQ(child_pid, -1, "fork"))
+ goto out;
+
+ /* Call syscall in the child process, but access the map value of
+ * the parent process in the BPF program to check if the user kptr
+ * is translated/mapped correctly.
+ */
+ if (child_pid == 0) {
+ /* child */
+
+ /* Overwrite the user_data in the child process to check if
+ * the BPF program accesses the user_data of the parent.
+ */
+ udata.a = 0;
+ udata.b = 0;
+
+ /* Wait for the parent to set child_pid */
+ read(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data));
+ exit(0);
+ }
+
+ skel->bss->parent_pid = my_tid;
+ skel->bss->target_pid = child_pid;
+
+ write(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data));
+
+ err = waitpid(child_pid, NULL, 0);
+ ASSERT_EQ(err, child_pid, "waitpid");
+ ASSERT_EQ(udata.result, MAGIC_VALUE + udata.a + udata.b, "udata.result");
+ ASSERT_EQ(udata.nested_result, MAGIC_VALUE + udata.a + udata.b, "udata.nested_result");
+
+ skel->bss->target_pid = my_tid;
+
+ /* update_elem: uptr changes from udata1 to udata2 */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(MAGIC_VALUE + udata2.a + udata2.b);
+
+ /* update_elem: uptr changes from udata2 uptr to NULL */
+ memset(&value, 0, sizeof(value));
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(0);
+
+ /* update_elem: uptr changes from NULL to udata2 */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST);
+ if (!ASSERT_OK(err, "update_elem(udata2)"))
+ goto out;
+ check_udata2(MAGIC_VALUE + udata2.a + udata2.b);
+
+ /* Check if user programs can access the value of user kptrs
+ * through bpf_map_lookup_elem(). Make sure the kernel value is not
+ * leaked.
+ */
+ err = bpf_map_lookup_elem(map_fd, &parent_task_fd, &value);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ goto out;
+ ASSERT_EQ(value.udata, NULL, "value.udata");
+ ASSERT_EQ(value.nested.udata, NULL, "value.nested.udata");
+
+ /* delete_elem */
+ err = bpf_map_delete_elem(map_fd, &parent_task_fd);
+ ASSERT_OK(err, "delete_elem(udata2)");
+ check_udata2(0);
+
+ /* update_elem: add uptr back to test map_free */
+ value.udata = &udata2;
+ value.nested.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST);
+ ASSERT_OK(err, "update_elem(udata2)");
+
+out:
+ task_ls_uptr__destroy(skel);
+ close(ev_fd);
+ close(parent_task_fd);
+}
+
+static void test_uptr_across_pages(void)
+{
+ int page_size = getpagesize();
+ struct value_type value = {};
+ struct task_ls_uptr *skel;
+ int err, task_fd, map_fd;
+ void *mem;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_OK_FD(task_fd, "task_fd"))
+ return;
+
+ mem = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (!ASSERT_OK_PTR(mem, "mmap(page_size * 2)")) {
+ close(task_fd);
+ return;
+ }
+
+ skel = task_ls_uptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+ value.udata = mem + page_size - offsetof(struct user_data, b);
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, 0);
+ if (!ASSERT_ERR(err, "update_elem(udata)"))
+ goto out;
+ ASSERT_EQ(errno, EOPNOTSUPP, "errno");
+
+ value.udata = mem + page_size - sizeof(struct user_data);
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, 0);
+ ASSERT_OK(err, "update_elem(udata)");
+
+out:
+ task_ls_uptr__destroy(skel);
+ close(task_fd);
+ munmap(mem, page_size * 2);
+}
+
+static void test_uptr_update_failure(void)
+{
+ struct value_lock_type value = {};
+ struct uptr_update_failure *skel;
+ int err, task_fd, map_fd;
+
+ task_fd = sys_pidfd_open(getpid(), 0);
+ if (!ASSERT_OK_FD(task_fd, "task_fd"))
+ return;
+
+ skel = uptr_update_failure__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ map_fd = bpf_map__fd(skel->maps.datamap);
+
+ value.udata = &udata;
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_F_LOCK);
+ if (!ASSERT_ERR(err, "update_elem(udata, BPF_F_LOCK)"))
+ goto out;
+ ASSERT_EQ(errno, EOPNOTSUPP, "errno");
+
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_EXIST);
+ if (!ASSERT_ERR(err, "update_elem(udata, BPF_EXIST)"))
+ goto out;
+ ASSERT_EQ(errno, ENOENT, "errno");
+
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "update_elem(udata, BPF_NOEXIST)"))
+ goto out;
+
+ value.udata = &udata2;
+ err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST);
+ if (!ASSERT_ERR(err, "update_elem(udata2, BPF_NOEXIST)"))
+ goto out;
+ ASSERT_EQ(errno, EEXIST, "errno");
+
+out:
+ uptr_update_failure__destroy(skel);
+ close(task_fd);
+}
+
+static void test_uptr_map_failure(const char *map_name, int expected_errno)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, create_attr);
+ struct uptr_map_failure *skel;
+ struct bpf_map *map;
+ struct btf *btf;
+ int map_fd, err;
+
+ skel = uptr_map_failure__open();
+ if (!ASSERT_OK_PTR(skel, "uptr_map_failure__open"))
+ return;
+
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ btf = bpf_object__btf(skel->obj);
+ err = btf__load_into_kernel(btf);
+ if (!ASSERT_OK(err, "btf__load_into_kernel"))
+ goto done;
+
+ create_attr.map_flags = bpf_map__map_flags(map);
+ create_attr.btf_fd = btf__fd(btf);
+ create_attr.btf_key_type_id = bpf_map__btf_key_type_id(map);
+ create_attr.btf_value_type_id = bpf_map__btf_value_type_id(map);
+ map_fd = bpf_map_create(bpf_map__type(map), map_name,
+ bpf_map__key_size(map), bpf_map__value_size(map),
+ 0, &create_attr);
+ if (ASSERT_ERR_FD(map_fd, "map_create"))
+ ASSERT_EQ(errno, expected_errno, "errno");
+ else
+ close(map_fd);
+
+done:
+ uptr_map_failure__destroy(skel);
+}
+
+void test_task_local_storage(void)
+{
+ if (test__start_subtest("sys_enter_exit"))
+ test_sys_enter_exit();
+ if (test__start_subtest("exit_creds"))
+ test_exit_creds();
+ if (test__start_subtest("recursion"))
+ test_recursion();
+ if (test__start_subtest("nodeadlock"))
+ test_nodeadlock();
+ if (test__start_subtest("uptr_basic"))
+ test_uptr_basic();
+ if (test__start_subtest("uptr_across_pages"))
+ test_uptr_across_pages();
+ if (test__start_subtest("uptr_update_failure"))
+ test_uptr_update_failure();
+ if (test__start_subtest("uptr_map_failure_e2big")) {
+ if (getpagesize() == PAGE_SIZE)
+ test_uptr_map_failure("large_uptr_map", E2BIG);
+ else
+ test__skip();
+ }
+ if (test__start_subtest("uptr_map_failure_size0"))
+ test_uptr_map_failure("empty_uptr_map", EINVAL);
+ if (test__start_subtest("uptr_map_failure_kstruct"))
+ test_uptr_map_failure("kstruct_uptr_map", EINVAL);
+ RUN_TESTS(uptr_failure);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
new file mode 100644
index 000000000000..f000734a3d1f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include "test_task_pt_regs.skel.h"
+
+/* uprobe attach point */
+static noinline void trigger_func(void)
+{
+ asm volatile ("");
+}
+
+void test_task_pt_regs(void)
+{
+ struct test_task_pt_regs *skel;
+ struct bpf_link *uprobe_link;
+ ssize_t uprobe_offset;
+ bool match;
+
+ uprobe_offset = get_uprobe_offset(&trigger_func);
+ if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
+ return;
+
+ skel = test_task_pt_regs__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+ if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
+ goto cleanup;
+
+ uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
+ false /* retprobe */,
+ 0 /* self pid */,
+ "/proc/self/exe",
+ uprobe_offset);
+ if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
+ goto cleanup;
+ skel->links.handle_uprobe = uprobe_link;
+
+ /* trigger & validate uprobe */
+ trigger_func();
+
+ if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res"))
+ goto cleanup;
+
+ match = !memcmp(&skel->bss->current_regs, &skel->bss->ctx_regs,
+ sizeof(skel->bss->current_regs));
+ ASSERT_TRUE(match, "check_regs_match");
+
+cleanup:
+ test_task_pt_regs__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
new file mode 100644
index 000000000000..626d76fe43a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include "test_task_under_cgroup.skel.h"
+
+#define FOO "/foo"
+
+void test_task_under_cgroup(void)
+{
+ struct test_task_under_cgroup *skel;
+ int ret, foo;
+ pid_t pid;
+
+ foo = test__join_cgroup(FOO);
+ if (!ASSERT_OK(foo < 0, "cgroup_join_foo"))
+ return;
+
+ skel = test_task_under_cgroup__open();
+ if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open"))
+ goto cleanup;
+
+ skel->rodata->local_pid = getpid();
+ skel->bss->remote_pid = getpid();
+ skel->rodata->cgid = get_cgroup_id(FOO);
+
+ ret = test_task_under_cgroup__load(skel);
+ if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
+ goto cleanup;
+
+ /* First, attach the LSM program, and then it will be triggered when the
+ * TP_BTF program is attached.
+ */
+ skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run);
+ if (!ASSERT_OK_PTR(skel->links.lsm_run, "attach_lsm"))
+ goto cleanup;
+
+ skel->links.tp_btf_run = bpf_program__attach_trace(skel->progs.tp_btf_run);
+ if (!ASSERT_OK_PTR(skel->links.tp_btf_run, "attach_tp_btf"))
+ goto cleanup;
+
+ pid = fork();
+ if (pid == 0)
+ exit(0);
+
+ ret = (pid == -1);
+ if (ASSERT_OK(ret, "fork process"))
+ wait(NULL);
+
+ test_task_under_cgroup__detach(skel);
+
+ ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid,
+ "test task_under_cgroup");
+
+cleanup:
+ test_task_under_cgroup__destroy(skel);
+ close(foo);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_work_stress.c b/tools/testing/selftests/bpf/prog_tests/task_work_stress.c
new file mode 100644
index 000000000000..450d17d91a56
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_work_stress.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work_stress.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdatomic.h>
+
+struct test_data {
+ int prog_fd;
+ atomic_int exit;
+};
+
+void *runner(void *test_data)
+{
+ struct test_data *td = test_data;
+ int err = 0;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ while (!err && !atomic_load(&td->exit))
+ err = bpf_prog_test_run_opts(td->prog_fd, &opts);
+
+ return NULL;
+}
+
+static int get_env_int(const char *str, int def)
+{
+ const char *s = getenv(str);
+ char *end;
+ int retval;
+
+ if (!s || !*s)
+ return def;
+ errno = 0;
+ retval = strtol(s, &end, 10);
+ if (errno || *end || retval < 0)
+ return def;
+ return retval;
+}
+
+static void task_work_run(bool enable_delete)
+{
+ struct task_work_stress *skel;
+ struct bpf_program *scheduler, *deleter;
+ int nthreads = 16;
+ int test_time_s = get_env_int("BPF_TASK_WORK_TEST_TIME", 1);
+ pthread_t tid[nthreads], tid_del;
+ bool started[nthreads], started_del = false;
+ struct test_data td_sched = { .exit = 0 }, td_del = { .exit = 1 };
+ int i, err;
+
+ skel = task_work_stress__open();
+ if (!ASSERT_OK_PTR(skel, "task_work__open"))
+ return;
+
+ scheduler = bpf_object__find_program_by_name(skel->obj, "schedule_task_work");
+ bpf_program__set_autoload(scheduler, true);
+
+ deleter = bpf_object__find_program_by_name(skel->obj, "delete_task_work");
+ bpf_program__set_autoload(deleter, true);
+
+ err = task_work_stress__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ for (i = 0; i < nthreads; ++i)
+ started[i] = false;
+
+ td_sched.prog_fd = bpf_program__fd(scheduler);
+ for (i = 0; i < nthreads; ++i) {
+ if (pthread_create(&tid[i], NULL, runner, &td_sched) != 0) {
+ fprintf(stderr, "could not start thread");
+ goto cancel;
+ }
+ started[i] = true;
+ }
+
+ if (enable_delete)
+ atomic_store(&td_del.exit, 0);
+
+ td_del.prog_fd = bpf_program__fd(deleter);
+ if (pthread_create(&tid_del, NULL, runner, &td_del) != 0) {
+ fprintf(stderr, "could not start thread");
+ goto cancel;
+ }
+ started_del = true;
+
+ /* Run stress test for some time */
+ sleep(test_time_s);
+
+cancel:
+ atomic_store(&td_sched.exit, 1);
+ atomic_store(&td_del.exit, 1);
+ for (i = 0; i < nthreads; ++i) {
+ if (started[i])
+ pthread_join(tid[i], NULL);
+ }
+
+ if (started_del)
+ pthread_join(tid_del, NULL);
+
+ ASSERT_GT(skel->bss->callback_scheduled, 0, "work scheduled");
+ /* Some scheduling attempts should have failed due to contention */
+ ASSERT_GT(skel->bss->schedule_error, 0, "schedule error");
+
+ if (enable_delete) {
+ /* If delete thread is enabled, it has cancelled some callbacks */
+ ASSERT_GT(skel->bss->delete_success, 0, "delete success");
+ ASSERT_LT(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+ } else {
+ /* Without delete thread number of scheduled callbacks is the same as fired */
+ ASSERT_EQ(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+ }
+
+cleanup:
+ task_work_stress__destroy(skel);
+}
+
+void test_task_work_stress(void)
+{
+ if (test__start_subtest("no_delete"))
+ task_work_run(false);
+ if (test__start_subtest("with_delete"))
+ task_work_run(true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
new file mode 100644
index 000000000000..48b55539331e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/pkt_cls.h>
+
+#include "cap_helpers.h"
+#include "test_tc_bpf.skel.h"
+
+#define LO_IFINDEX 1
+
+#define TEST_DECLARE_OPTS(__fd) \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \
+ .flags = BPF_TC_F_REPLACE); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \
+ .prog_id = 42); \
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1);
+
+static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int ret;
+
+ ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(ret, "bpf_prog_get_info_by_fd"))
+ return ret;
+
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ return ret;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+ opts.prog_id = 0;
+ opts.flags = BPF_TC_F_REPLACE;
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach replace mode"))
+ goto end;
+
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_query(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_query"))
+ goto end;
+
+ if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
+ !ASSERT_EQ(opts.priority, 1, "priority set") ||
+ !ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
+ goto end;
+
+end:
+ opts.flags = opts.prog_fd = opts.prog_id = 0;
+ ret = bpf_tc_detach(hook, &opts);
+ ASSERT_OK(ret, "bpf_tc_detach");
+ return ret;
+}
+
+static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1);
+ int ret;
+
+ ret = bpf_tc_hook_create(NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL"))
+ return -EINVAL;
+
+ /* hook ifindex = 0 */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0"))
+ return -EINVAL;
+
+ /* hook ifindex < 0 */
+ inv_hook.ifindex = -1;
+
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0"))
+ return -EINVAL;
+ attach_opts.prog_id = 0;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0"))
+ return -EINVAL;
+
+ inv_hook.ifindex = LO_IFINDEX;
+
+ /* hook.attach_point invalid */
+ inv_hook.attach_point = 0xabcd;
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* hook.attach_point valid, but parent invalid */
+ inv_hook.parent = TC_H_MAKE(1UL << 16, 10);
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_CUSTOM;
+ inv_hook.parent = 0;
+ /* These return EOPNOTSUPP instead of EINVAL as parent is checked after
+ * attach_point of the hook.
+ */
+ ret = bpf_tc_hook_create(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_hook_destroy(&inv_hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(&inv_hook, &attach_opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(&inv_hook, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
+ return -EINVAL;
+
+ inv_hook.attach_point = BPF_TC_INGRESS;
+
+ /* detach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_detach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_detach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+ }
+
+ /* query */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_query(NULL, &opts);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpr);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpf);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_p);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_h);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_query(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ /* when chain is not present, kernel returns -EINVAL */
+ ret = bpf_tc_query(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set"))
+ return -EINVAL;
+ }
+
+ /* attach */
+ {
+ TEST_DECLARE_OPTS(fd);
+
+ ret = bpf_tc_attach(NULL, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL"))
+ return -EINVAL;
+
+ opts_hp.flags = 42;
+ ret = bpf_tc_attach(hook, &opts_hp);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, NULL);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_hpi);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_pf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset"))
+ return -EINVAL;
+ opts_pf.prog_fd = opts_pf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_hf);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset"))
+ return -EINVAL;
+ opts_hf.prog_fd = opts_hf.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach");
+
+ ret = bpf_tc_attach(hook, &opts_prio_max);
+ if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX"))
+ return -EINVAL;
+
+ ret = bpf_tc_attach(hook, &opts_f);
+ if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset"))
+ return -EINVAL;
+ opts_f.prog_fd = opts_f.prog_id = 0;
+ ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach");
+ }
+
+ return 0;
+}
+
+void tc_bpf_root(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
+ .attach_point = BPF_TC_INGRESS);
+ struct test_tc_bpf *skel = NULL;
+ bool hook_created = false;
+ int cls_fd, ret;
+
+ skel = test_tc_bpf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
+ return;
+
+ cls_fd = bpf_program__fd(skel->progs.cls);
+
+ ret = bpf_tc_hook_create(&hook);
+ if (ret == 0)
+ hook_created = true;
+
+ ret = ret == -EEXIST ? 0 : ret;
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)"))
+ goto end;
+
+ hook.attach_point = BPF_TC_CUSTOM;
+ hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point"))
+ goto end;
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ ret = bpf_tc_hook_destroy(&hook);
+ if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point"))
+ goto end;
+
+ hook.attach_point = BPF_TC_INGRESS;
+ hook.parent = 0;
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal ingress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ hook.attach_point = BPF_TC_EGRESS;
+ ret = test_tc_bpf_basic(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_internal egress"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+ ret = test_tc_bpf_api(&hook, cls_fd);
+ if (!ASSERT_OK(ret, "test_tc_bpf_api"))
+ goto end;
+
+ bpf_tc_hook_destroy(&hook);
+
+end:
+ if (hook_created) {
+ hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&hook);
+ }
+ test_tc_bpf__destroy(skel);
+}
+
+void tc_bpf_non_root(void)
+{
+ struct test_tc_bpf *skel = NULL;
+ __u64 caps = 0;
+ int ret;
+
+ /* In case CAP_BPF and CAP_PERFMON is not set */
+ ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
+ if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
+ return;
+ ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
+ if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
+ goto restore_cap;
+
+ skel = test_tc_bpf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
+ goto restore_cap;
+
+ test_tc_bpf__destroy(skel);
+
+restore_cap:
+ if (caps)
+ cap_enable_effective(caps, NULL);
+}
+
+void test_tc_bpf(void)
+{
+ if (test__start_subtest("tc_bpf_root"))
+ tc_bpf_root();
+ if (test__start_subtest("tc_bpf_non_root"))
+ tc_bpf_non_root();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c
new file mode 100644
index 000000000000..74752233e779
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <error.h>
+#include <test_progs.h>
+#include <linux/pkt_cls.h>
+
+#include "test_tc_change_tail.skel.h"
+#include "socket_helpers.h"
+
+#define LO_IFINDEX 1
+
+void test_tc_change_tail(void)
+{
+ LIBBPF_OPTS(bpf_tcx_opts, tcx_opts);
+ struct test_tc_change_tail *skel = NULL;
+ struct bpf_link *link;
+ int c1, p1;
+ char buf[2];
+ int ret;
+
+ skel = test_tc_change_tail__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_change_tail__open_and_load"))
+ return;
+
+ link = bpf_program__attach_tcx(skel->progs.change_tail, LO_IFINDEX,
+ &tcx_opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_tcx"))
+ goto destroy;
+
+ skel->links.change_tail = link;
+ ret = create_pair(AF_INET, SOCK_DGRAM, &c1, &p1);
+ if (!ASSERT_OK(ret, "create_pair"))
+ goto destroy;
+
+ ret = xsend(p1, "Tr", 2, 0);
+ ASSERT_EQ(ret, 2, "xsend(p1)");
+ ret = recv(c1, buf, 2, 0);
+ ASSERT_EQ(ret, 2, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ ret = xsend(p1, "G", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 2, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ ret = xsend(p1, "E", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 1, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+ ret = xsend(p1, "Z", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 1, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+ close(c1);
+ close(p1);
+destroy:
+ test_tc_change_tail__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
new file mode 100644
index 000000000000..d52a62af77bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef TC_HELPERS
+#define TC_HELPERS
+#include <test_progs.h>
+
+#ifndef loopback
+# define loopback 1
+#endif
+
+static inline __u32 ifindex_from_link_fd(int fd)
+{
+ struct bpf_link_info link_info = {};
+ __u32 link_info_len = sizeof(link_info);
+ int err;
+
+ err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
+ if (!ASSERT_OK(err, "id_from_link_fd"))
+ return 0;
+
+ return link_info.tcx.ifindex;
+}
+
+static inline void __assert_mprog_count(int target, int expected, int ifindex)
+{
+ __u32 count = 0, attach_flags = 0;
+ int err;
+
+ err = bpf_prog_query(ifindex, target, 0, &attach_flags,
+ NULL, &count);
+ ASSERT_EQ(count, expected, "count");
+ ASSERT_EQ(err, 0, "prog_query");
+}
+
+static inline void assert_mprog_count(int target, int expected)
+{
+ __assert_mprog_count(target, expected, loopback);
+}
+
+static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
+{
+ __assert_mprog_count(target, expected, ifindex);
+}
+
+static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
+{
+ memset(skel->bss, 0, sizeof(*skel->bss));
+}
+
+#endif /* TC_HELPERS */
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
new file mode 100644
index 000000000000..2186a24e7d8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/pkt_sched.h>
+#include <net/if.h>
+#include <test_progs.h>
+
+#define loopback 1
+#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+#include "test_tc_link.skel.h"
+
+#include "netlink_helpers.h"
+#include "tc_helpers.h"
+
+void test_ns_tc_links_basic(void)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count(BPF_TCX_INGRESS, 0);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(BPF_TCX_INGRESS, 1);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+ ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
+
+ assert_mprog_count(BPF_TCX_INGRESS, 1);
+ assert_mprog_count(BPF_TCX_EGRESS, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count(BPF_TCX_INGRESS, 0);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+}
+
+static void test_tc_links_before_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[5], link_ids[5];
+ __u32 pid1, pid2, pid3, pid4;
+ __u32 lid1, lid2, lid3, lid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_LINK,
+ .relative_id = lid1,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], pid2, "prog_ids[3]");
+ ASSERT_EQ(optq.link_ids[3], lid2, "link_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_before(void)
+{
+ test_tc_links_before_target(BPF_TCX_INGRESS);
+ test_tc_links_before_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_after_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[5], link_ids[5];
+ __u32 pid1, pid2, pid3, pid4;
+ __u32 lid1, lid2, lid3, lid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(skel->links.tc2),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]");
+ ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_after(void)
+{
+ test_tc_links_after_target(BPF_TCX_INGRESS);
+ test_tc_links_after_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_revision_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[3], link_ids[3];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count(target, 0);
+
+ optl.expected_revision = 1;
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ optl.expected_revision = 1;
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 1);
+
+ optl.expected_revision = 2;
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_revision(void)
+{
+ test_tc_links_revision_target(BPF_TCX_INGRESS);
+ test_tc_links_revision_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_chain_classic(int target, bool chain_tc_old)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
+ bool hook_created = false, tc_attached = false;
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, pid3;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ if (chain_tc_old) {
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(&tc_hook);
+ if (err == 0)
+ hook_created = true;
+ err = err == -EEXIST ? 0 : err;
+ if (!ASSERT_OK(err, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3);
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup;
+ tc_attached = true;
+ }
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ assert_mprog_count(target, 2);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+
+ err = bpf_link__detach(skel->links.tc2);
+ if (!ASSERT_OK(err, "prog_detach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+cleanup:
+ if (tc_attached) {
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ err = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(err, "bpf_tc_detach");
+ }
+ if (hook_created) {
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+ }
+ assert_mprog_count(target, 1);
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_chain_classic(void)
+{
+ test_tc_chain_classic(BPF_TCX_INGRESS, false);
+ test_tc_chain_classic(BPF_TCX_EGRESS, false);
+ test_tc_chain_classic(BPF_TCX_INGRESS, true);
+ test_tc_chain_classic(BPF_TCX_EGRESS, true);
+}
+
+static void test_tc_links_replace_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, pid3, lid1, lid2;
+ __u32 prog_ids[4], link_ids[4];
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ optl.expected_revision = 1;
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_id = pid1,
+ .expected_revision = 2,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_REPLACE,
+ .relative_fd = bpf_program__fd(skel->progs.tc2),
+ .expected_revision = 3,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_REPLACE | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(skel->links.tc2),
+ .expected_revision = 3,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_REPLACE | BPF_F_LINK | BPF_F_AFTER,
+ .relative_id = lid2,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 2);
+
+ err = bpf_link__update_program(skel->links.tc2, skel->progs.tc3);
+ if (!ASSERT_OK(err, "link_update"))
+ goto cleanup;
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 4, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+
+ err = bpf_link__detach(skel->links.tc2);
+ if (!ASSERT_OK(err, "link_detach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+
+ err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
+ if (!ASSERT_OK(err, "link_update_self"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_replace(void)
+{
+ test_tc_links_replace_target(BPF_TCX_INGRESS);
+ test_tc_links_replace_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_invalid_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, lid1;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count(target, 0);
+
+ optl.flags = BPF_F_BEFORE | BPF_F_AFTER;
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_ID,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER | BPF_F_ID,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_ID,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_LINK,
+ .relative_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_LINK,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .relative_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_ID,
+ .relative_id = pid2,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_ID,
+ .relative_id = 42,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_LINK,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, 0, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER | BPF_F_LINK,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optl);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER | BPF_F_LINK,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
+ .relative_id = ~0,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
+ .relative_id = lid1,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_ID,
+ .relative_id = pid1,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID,
+ .relative_id = lid1,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ assert_mprog_count(target, 2);
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_invalid(void)
+{
+ test_tc_links_invalid_target(BPF_TCX_INGRESS);
+ test_tc_links_invalid_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_prepend_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[5], link_ids[5];
+ __u32 pid1, pid2, pid3, pid4;
+ __u32 lid1, lid2, lid3, lid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], pid1, "prog_ids[3]");
+ ASSERT_EQ(optq.link_ids[3], lid1, "link_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_prepend(void)
+{
+ test_tc_links_prepend_target(BPF_TCX_INGRESS);
+ test_tc_links_prepend_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_append_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 prog_ids[5], link_ids[5];
+ __u32 pid1, pid2, pid3, pid4;
+ __u32 lid1, lid2, lid3, lid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3));
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_AFTER,
+ );
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]");
+ ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_append(void)
+{
+ test_tc_links_append_target(BPF_TCX_INGRESS);
+ test_tc_links_append_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_dev_cleanup_target(int target)
+{
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 pid1, pid2, pid3, pid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 3);
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 4);
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex");
+
+ test_tc_link__destroy(skel);
+ return;
+cleanup:
+ test_tc_link__destroy(skel);
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void test_ns_tc_links_dev_cleanup(void)
+{
+ test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS);
+ test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_chain_mixed(int target)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ __u32 pid1, pid2, pid3;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc5, target),
+ 0, "tc5_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc6, target),
+ 0, "tc6_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc5));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc6));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(&tc_hook);
+ err = err == -EEXIST ? 0 : err;
+ if (!ASSERT_OK(err, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5);
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup;
+
+ link = bpf_program__attach_tcx(skel->progs.tc6, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc6 = link;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
+
+ err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
+ if (!ASSERT_OK(err, "link_update"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
+
+ err = bpf_link__detach(skel->links.tc6);
+ if (!ASSERT_OK(err, "prog_detach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 0);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
+
+cleanup:
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ err = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(err, "bpf_tc_detach");
+
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_links_chain_mixed(void)
+{
+ test_tc_chain_mixed(BPF_TCX_INGRESS);
+ test_tc_chain_mixed(BPF_TCX_EGRESS);
+}
+
+static void test_tc_links_ingress(int target, bool chain_tc_old,
+ bool tcx_teardown_first)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts,
+ .handle = 1,
+ .priority = 1,
+ );
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .ifindex = loopback,
+ .attach_point = BPF_TC_CUSTOM,
+ .parent = TC_H_INGRESS,
+ );
+ bool hook_created = false, tc_attached = false;
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, pid3;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ if (chain_tc_old) {
+ ASSERT_OK(system("tc qdisc add dev lo ingress"), "add_ingress");
+ hook_created = true;
+
+ tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3);
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup;
+ tc_attached = true;
+ }
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ assert_mprog_count(target, 2);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+
+ err = bpf_link__detach(skel->links.tc2);
+ if (!ASSERT_OK(err, "prog_detach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+cleanup:
+ if (tc_attached) {
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ err = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(err, "bpf_tc_detach");
+ }
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+ assert_mprog_count(target, 1);
+ if (hook_created && tcx_teardown_first)
+ ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress");
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+ test_tc_link__destroy(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+ if (hook_created && !tcx_teardown_first)
+ ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress");
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_links_ingress(void)
+{
+ test_tc_links_ingress(BPF_TCX_INGRESS, true, true);
+ test_tc_links_ingress(BPF_TCX_INGRESS, true, false);
+ test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
+}
+
+struct qdisc_req {
+ struct nlmsghdr n;
+ struct tcmsg t;
+ char buf[1024];
+};
+
+static int qdisc_replace(int ifindex, const char *kind, bool block)
+{
+ struct rtnl_handle rth = { .fd = -1 };
+ struct qdisc_req req;
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST;
+ req.n.nlmsg_type = RTM_NEWQDISC;
+ req.t.tcm_family = AF_UNSPEC;
+ req.t.tcm_ifindex = ifindex;
+ req.t.tcm_parent = 0xfffffff1;
+
+ addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1);
+ if (block)
+ addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ return err;
+}
+
+void test_ns_tc_links_dev_chain0(void)
+{
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth");
+ ifindex = if_nametoindex("foo");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+ err = qdisc_replace(ifindex, "ingress", true);
+ if (!ASSERT_OK(err, "attaching ingress"))
+ goto cleanup;
+ ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block");
+ err = qdisc_replace(ifindex, "clsact", false);
+ if (!ASSERT_OK(err, "attaching clsact"))
+ goto cleanup;
+ /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s
+ * triggered the issue without the fix reliably 100% of the time.
+ */
+ sleep(5);
+ ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter");
+cleanup:
+ ASSERT_OK(system("ip link del dev foo"), "del veth");
+ ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed");
+ ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed");
+}
+
+static void test_tc_links_dev_mixed(int target)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, pid3, pid4;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc3 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 3);
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc4 = link;
+
+ assert_mprog_count_ifindex(ifindex, target, 4);
+
+ tc_hook.ifindex = ifindex;
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+
+ err = bpf_tc_hook_create(&tc_hook);
+ err = err == -EEXIST ? 0 : err;
+ if (!ASSERT_OK(err, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5);
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup;
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex");
+ ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex");
+
+ test_tc_link__destroy(skel);
+ return;
+cleanup:
+ test_tc_link__destroy(skel);
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void test_ns_tc_links_dev_mixed(void)
+{
+ test_tc_links_dev_mixed(BPF_TCX_INGRESS);
+ test_tc_links_dev_mixed(BPF_TCX_EGRESS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
new file mode 100644
index 000000000000..2461d183dee5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <uapi/linux/if_link.h>
+#include <net/if.h>
+#include <test_progs.h>
+
+#define netkit_peer "nk0"
+#define netkit_name "nk1"
+
+#define ping_addr_neigh 0x0a000002 /* 10.0.0.2 */
+#define ping_addr_noneigh 0x0a000003 /* 10.0.0.3 */
+
+#include "test_tc_link.skel.h"
+#include "netlink_helpers.h"
+#include "tc_helpers.h"
+
+#define NETKIT_HEADROOM 32
+#define NETKIT_TAILROOM 8
+
+#define MARK 42
+#define PRIO 0xeb9f
+#define ICMP_ECHO 8
+
+#define FLAG_ADJUST_ROOM (1 << 0)
+#define FLAG_SAME_NETNS (1 << 1)
+
+struct icmphdr {
+ __u8 type;
+ __u8 code;
+ __sum16 checksum;
+ struct {
+ __be16 id;
+ __be16 sequence;
+ } echo;
+};
+
+struct iplink_req {
+ struct nlmsghdr n;
+ struct ifinfomsg i;
+ char buf[1024];
+};
+
+static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
+ int scrub, int peer_scrub, __u32 flags)
+{
+ struct rtnl_handle rth = { .fd = -1 };
+ struct iplink_req req = {};
+ struct rtattr *linkinfo, *data;
+ const char *type = "netkit";
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ req.n.nlmsg_type = RTM_NEWLINK;
+ req.i.ifi_family = AF_UNSPEC;
+
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, netkit_name,
+ strlen(netkit_name));
+ linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO);
+ addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type));
+ data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_POLICY, policy);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_POLICY, peer_policy);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_SCRUB, scrub);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_SCRUB, peer_scrub);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode);
+ if (flags & FLAG_ADJUST_ROOM) {
+ addattr16(&req.n, sizeof(req), IFLA_NETKIT_HEADROOM, NETKIT_HEADROOM);
+ addattr16(&req.n, sizeof(req), IFLA_NETKIT_TAILROOM, NETKIT_TAILROOM);
+ }
+ addattr_nest_end(&req.n, data);
+ addattr_nest_end(&req.n, linkinfo);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ *ifindex = if_nametoindex(netkit_name);
+
+ ASSERT_GT(*ifindex, 0, "retrieve_ifindex");
+ ASSERT_OK(system("ip netns add foo"), "create netns");
+ ASSERT_OK(system("ip link set dev " netkit_name " up"),
+ "up primary");
+ ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
+ "addr primary");
+
+ if (mode == NETKIT_L3) {
+ ASSERT_EQ(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd 2> /dev/null"), 512,
+ "set hwaddress");
+ } else {
+ ASSERT_OK(system("ip link set dev " netkit_name
+ " addr ee:ff:bb:cc:aa:dd"),
+ "set hwaddress");
+ }
+ if (flags & FLAG_SAME_NETNS) {
+ ASSERT_OK(system("ip link set dev " netkit_peer " up"),
+ "up peer");
+ ASSERT_OK(system("ip addr add dev " netkit_peer " 10.0.0.2/24"),
+ "addr peer");
+ } else {
+ ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+ "move peer");
+ ASSERT_OK(system("ip netns exec foo ip link set dev "
+ netkit_peer " up"), "up peer");
+ ASSERT_OK(system("ip netns exec foo ip addr add dev "
+ netkit_peer " 10.0.0.2/24"), "addr peer");
+ }
+ return err;
+}
+
+static void move_netkit(void)
+{
+ ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
+ "move peer");
+ ASSERT_OK(system("ip netns exec foo ip link set dev "
+ netkit_peer " up"), "up peer");
+ ASSERT_OK(system("ip netns exec foo ip addr add dev "
+ netkit_peer " 10.0.0.2/24"), "addr peer");
+}
+
+static void destroy_netkit(void)
+{
+ ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
+ ASSERT_OK(system("ip netns del foo"), "delete netns");
+ ASSERT_EQ(if_nametoindex(netkit_name), 0, netkit_name "_ifindex");
+}
+
+static int __send_icmp(__u32 dest)
+{
+ int sock, ret, mark = MARK, prio = PRIO;
+ struct sockaddr_in addr;
+ struct icmphdr icmp;
+
+ ret = write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0");
+ if (!ASSERT_OK(ret, "write_sysctl(net.ipv4.ping_group_range)"))
+ return ret;
+
+ sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);
+ if (!ASSERT_GE(sock, 0, "icmp_socket"))
+ return -errno;
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
+ netkit_name, strlen(netkit_name) + 1);
+ if (!ASSERT_OK(ret, "setsockopt(SO_BINDTODEVICE)"))
+ goto out;
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
+ if (!ASSERT_OK(ret, "setsockopt(SO_MARK)"))
+ goto out;
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
+ &prio, sizeof(prio));
+ if (!ASSERT_OK(ret, "setsockopt(SO_PRIORITY)"))
+ goto out;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(dest);
+
+ memset(&icmp, 0, sizeof(icmp));
+ icmp.type = ICMP_ECHO;
+ icmp.echo.id = 1234;
+ icmp.echo.sequence = 1;
+
+ ret = sendto(sock, &icmp, sizeof(icmp), 0,
+ (struct sockaddr *)&addr, sizeof(addr));
+ if (!ASSERT_GE(ret, 0, "icmp_sendto"))
+ ret = -errno;
+ else
+ ret = 0;
+out:
+ close(sock);
+ return ret;
+}
+
+static int send_icmp(void)
+{
+ return __send_icmp(ping_addr_neigh);
+}
+
+void serial_test_tc_netkit_basic(void)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, 0);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ BPF_NETKIT_PEER), 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+ ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PEER, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+ destroy_netkit();
+}
+
+static void serial_test_tc_netkit_multi_links_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[3], link_ids[3];
+ __u32 pid1, pid2, lid1, lid2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, 0);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ target), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ target), 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ LIBBPF_OPTS_RESET(optl,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+ ASSERT_NEQ(lid1, lid2, "link_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_multi_links(void)
+{
+ serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PEER);
+ serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PEER);
+}
+
+static void serial_test_tc_netkit_multi_opts_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 pid1, pid2, fd1, fd2;
+ __u32 prog_ids[3];
+ struct test_tc_link *skel;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, 0);
+ if (err)
+ return;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+
+ pid1 = id_from_prog_fd(fd1);
+ pid2 = id_from_prog_fd(fd2);
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_fd1;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_fd1;
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_fd2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+
+cleanup_fd2:
+ err = bpf_prog_detach_opts(fd2, ifindex, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count_ifindex(ifindex, target, 1);
+cleanup_fd1:
+ err = bpf_prog_detach_opts(fd1, ifindex, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count_ifindex(ifindex, target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_multi_opts(void)
+{
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PEER);
+ serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PEER);
+}
+
+void serial_test_tc_netkit_device(void)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, pid2, lid1;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex, ifindex2;
+
+ err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS);
+ if (err)
+ return;
+
+ ifindex2 = if_nametoindex(netkit_peer);
+ ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2,
+ BPF_NETKIT_PEER), 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3,
+ BPF_NETKIT_PRIMARY), 0, "tc3_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PRIMARY, &optq);
+ ASSERT_EQ(err, -EACCES, "prog_query_should_fail");
+
+ err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PEER, &optq);
+ ASSERT_EQ(err, -EACCES, "prog_query_should_fail");
+
+ link = bpf_program__attach_netkit(skel->progs.tc2, ifindex2, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_netkit(skel->progs.tc3, ifindex2, &optl);
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
+ bpf_link__destroy(link);
+ goto cleanup;
+ }
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+ destroy_netkit();
+}
+
+static void serial_test_tc_netkit_neigh_links_target(int mode, int target)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ __u32 prog_ids[2], link_ids[2];
+ __u32 pid1, lid1;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, 0);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth");
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1));
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ optq.prog_ids = prog_ids;
+ optq.link_ids = link_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(link_ids, 0, sizeof(link_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(ifindex, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]");
+ ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(__send_icmp(ping_addr_noneigh), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true /* L2: ARP */, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_eth, mode == NETKIT_L3, "seen_eth");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_neigh_links(void)
+{
+ serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
+ serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
+}
+
+static void serial_test_tc_netkit_pkt_type_mode(int mode)
+{
+ LIBBPF_OPTS(bpf_netkit_opts, optl_nk);
+ LIBBPF_OPTS(bpf_tcx_opts, optl_tcx);
+ int err, ifindex, ifindex2;
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+
+ err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, NETKIT_SCRUB_DEFAULT,
+ NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS);
+ if (err)
+ return;
+
+ ifindex2 = if_nametoindex(netkit_peer);
+ ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
+ BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc7,
+ BPF_TCX_INGRESS), 0, "tc7_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl_nk);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc1 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
+
+ link = bpf_program__attach_tcx(skel->progs.tc7, ifindex2, &optl_tcx);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc7 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 1);
+
+ move_netkit();
+
+ tc_skel_reset_all_seen(skel);
+ skel->bss->set_type = true;
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc7, true, "seen_tc7");
+
+ ASSERT_EQ(skel->bss->seen_host, true, "seen_host");
+ ASSERT_EQ(skel->bss->seen_mcast, true, "seen_mcast");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_pkt_type(void)
+{
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L2);
+ serial_test_tc_netkit_pkt_type_mode(NETKIT_L3);
+}
+
+static void serial_test_tc_netkit_scrub_type(int scrub, bool room)
+{
+ LIBBPF_OPTS(bpf_netkit_opts, optl);
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, ifindex;
+
+ err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS,
+ &ifindex, scrub, scrub,
+ room ? FLAG_ADJUST_ROOM : 0);
+ if (err)
+ return;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc8,
+ BPF_NETKIT_PRIMARY), 0, "tc8_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc8, false, "seen_tc8");
+
+ link = bpf_program__attach_netkit(skel->progs.tc8, ifindex, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup;
+
+ skel->links.tc8 = link;
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
+
+ ASSERT_EQ(skel->bss->seen_tc8, true, "seen_tc8");
+ ASSERT_EQ(skel->bss->mark, scrub == NETKIT_SCRUB_NONE ? MARK : 0, "mark");
+ ASSERT_EQ(skel->bss->prio, scrub == NETKIT_SCRUB_NONE ? PRIO : 0, "prio");
+ ASSERT_EQ(skel->bss->headroom, room ? NETKIT_HEADROOM : 0, "headroom");
+ ASSERT_EQ(skel->bss->tailroom, room ? NETKIT_TAILROOM : 0, "tailroom");
+cleanup:
+ test_tc_link__destroy(skel);
+
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
+ assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0);
+ destroy_netkit();
+}
+
+void serial_test_tc_netkit_scrub(void)
+{
+ serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT, false);
+ serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE, true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
new file mode 100644
index 000000000000..dd7a138d8c3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -0,0 +1,2814 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <uapi/linux/if_link.h>
+#include <net/if.h>
+#include <test_progs.h>
+
+#define loopback 1
+#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+#include "test_tc_link.skel.h"
+#include "tc_helpers.h"
+
+void test_ns_tc_opts_basic(void)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, id1, id2;
+ struct test_tc_link *skel;
+ __u32 prog_ids[2];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+
+ assert_mprog_count(BPF_TCX_INGRESS, 0);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+ ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(BPF_TCX_INGRESS, 1);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_in;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+
+ err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_in;
+
+ assert_mprog_count(BPF_TCX_INGRESS, 1);
+ assert_mprog_count(BPF_TCX_EGRESS, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_eg;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+
+cleanup_eg:
+ err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd);
+ ASSERT_OK(err, "prog_detach_eg");
+
+ assert_mprog_count(BPF_TCX_INGRESS, 1);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+cleanup_in:
+ err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd);
+ ASSERT_OK(err, "prog_detach_in");
+
+ assert_mprog_count(BPF_TCX_INGRESS, 0);
+ assert_mprog_count(BPF_TCX_EGRESS, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+static void test_tc_opts_before_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd2,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target3;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 4, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_id = id1,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target3;
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+
+cleanup_target4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup_target3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup_target2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup_target:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_before(void)
+{
+ test_tc_opts_before_target(BPF_TCX_INGRESS);
+ test_tc_opts_before_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_after_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target3;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 4, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target3;
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+
+cleanup_target4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target3;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 6, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+cleanup_target3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 7, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+cleanup_target2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 8, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+cleanup_target:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_after(void)
+{
+ test_tc_opts_after_target(BPF_TCX_INGRESS);
+ test_tc_opts_after_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_revision_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, id1, id2;
+ struct test_tc_link *skel;
+ __u32 prog_ids[3];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, -ESTALE, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+
+ LIBBPF_OPTS_RESET(optd,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_EQ(err, -ESTALE, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup_target2:
+ LIBBPF_OPTS_RESET(optd,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup_target:
+ LIBBPF_OPTS_RESET(optd);
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_revision(void)
+{
+ test_tc_opts_revision_target(BPF_TCX_INGRESS);
+ test_tc_opts_revision_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_chain_classic(int target, bool chain_tc_old)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ bool hook_created = false, tc_attached = false;
+ __u32 fd1, fd2, fd3, id1, id2, id3;
+ struct test_tc_link *skel;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ if (chain_tc_old) {
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(&tc_hook);
+ if (err == 0)
+ hook_created = true;
+ err = err == -EEXIST ? 0 : err;
+ if (!ASSERT_OK(err, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = fd3;
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup;
+ tc_attached = true;
+ }
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_detach;
+
+ assert_mprog_count(target, 2);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ if (!ASSERT_OK(err, "prog_detach"))
+ goto cleanup_detach;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
+
+cleanup_detach:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ if (!ASSERT_OK(err, "prog_detach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 0);
+cleanup:
+ if (tc_attached) {
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ err = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(err, "bpf_tc_detach");
+ }
+ if (hook_created) {
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+ }
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_opts_chain_classic(void)
+{
+ test_tc_chain_classic(BPF_TCX_INGRESS, false);
+ test_tc_chain_classic(BPF_TCX_EGRESS, false);
+ test_tc_chain_classic(BPF_TCX_INGRESS, true);
+ test_tc_chain_classic(BPF_TCX_EGRESS, true);
+}
+
+static void test_tc_opts_replace_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, id1, id2, id3, detach_fd;
+ __u32 prog_ids[4], prog_flags[4];
+ struct test_tc_link *skel;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_id = id1,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ detach_fd = fd2;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_attach_flags = prog_flags;
+ optq.prog_ids = prog_ids;
+
+ memset(prog_flags, 0, sizeof(prog_flags));
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
+ ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
+ ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ detach_fd = fd3;
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 4, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE | BPF_F_BEFORE,
+ .replace_prog_fd = fd3,
+ .relative_fd = fd1,
+ .expected_revision = 4,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ detach_fd = fd2;
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = fd2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE | BPF_F_AFTER,
+ .replace_prog_fd = fd2,
+ .relative_fd = fd1,
+ .expected_revision = 5,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_attach");
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE,
+ .replace_prog_fd = fd2,
+ .relative_fd = fd1,
+ .expected_revision = 5,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_attach");
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_id = id1,
+ .expected_revision = 5,
+ );
+
+cleanup_target2:
+ err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup_target:
+ LIBBPF_OPTS_RESET(optd);
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_replace(void)
+{
+ test_tc_opts_replace_target(BPF_TCX_INGRESS);
+ test_tc_opts_replace_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_invalid_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ __u32 fd1, fd2, id1, id2;
+ struct test_tc_link *skel;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .relative_fd = fd2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_fd = fd2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ID,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach_x1");
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_invalid(void)
+{
+ test_tc_opts_invalid_target(BPF_TCX_INGRESS);
+ test_tc_opts_invalid_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_prepend_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target3;
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+
+cleanup_target4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup_target3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup_target2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup_target:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_prepend(void)
+{
+ test_tc_opts_prepend_target(BPF_TCX_INGRESS);
+ test_tc_opts_prepend_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_append_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target;
+
+ assert_mprog_count(target, 2);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target2;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 3, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target2;
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_target3;
+
+ assert_mprog_count(target, 4);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup_target4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
+ ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
+ ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+
+cleanup_target4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup_target3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup_target2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup_target:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_append(void)
+{
+ test_tc_opts_append_target(BPF_TCX_INGRESS);
+ test_tc_opts_append_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_dev_cleanup_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ int err, ifindex;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+
+ err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+
+ err = bpf_prog_attach_opts(fd3, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count_ifindex(ifindex, target, 3);
+
+ err = bpf_prog_attach_opts(fd4, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count_ifindex(ifindex, target, 4);
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+ return;
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count_ifindex(ifindex, target, 2);
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count_ifindex(ifindex, target, 1);
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void test_ns_tc_opts_dev_cleanup(void)
+{
+ test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS);
+ test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_mixed_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ __u32 pid1, pid2, pid3, pid4, lid2, lid4;
+ __u32 prog_flags[4], link_flags[4];
+ __u32 prog_ids[4], link_ids[4];
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ int err, detach_fd;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
+ 0, "tc3_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
+ 0, "tc4_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
+ pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
+
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+ ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
+ ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
+ loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ detach_fd = bpf_program__fd(skel->progs.tc1);
+
+ assert_mprog_count(target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup1;
+ skel->links.tc2 = link;
+
+ lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
+ loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
+ loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = bpf_program__fd(skel->progs.tc2),
+ );
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
+ loopback, target, &opta);
+ ASSERT_EQ(err, -EBUSY, "prog_attach");
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = bpf_program__fd(skel->progs.tc1),
+ );
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
+ loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ detach_fd = bpf_program__fd(skel->progs.tc3);
+
+ assert_mprog_count(target, 2);
+
+ link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup1;
+ skel->links.tc4 = link;
+
+ lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
+
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = bpf_program__fd(skel->progs.tc4),
+ );
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
+ loopback, target, &opta);
+ ASSERT_EQ(err, -EEXIST, "prog_attach");
+
+ optq.prog_ids = prog_ids;
+ optq.prog_attach_flags = prog_flags;
+ optq.link_ids = link_ids;
+ optq.link_attach_flags = link_flags;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ memset(prog_flags, 0, sizeof(prog_flags));
+ memset(link_ids, 0, sizeof(link_ids));
+ memset(link_flags, 0, sizeof(link_flags));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup1;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
+ ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]");
+ ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]");
+ ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
+ ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
+ ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]");
+ ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
+ ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]");
+ ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]");
+ ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]");
+ ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]");
+
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+cleanup1:
+ err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_opts_mixed(void)
+{
+ test_tc_opts_mixed_target(BPF_TCX_INGRESS);
+ test_tc_opts_mixed_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_demixed_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_tcx_opts, optl);
+ struct test_tc_link *skel;
+ struct bpf_link *link;
+ __u32 pid1, pid2;
+ int err;
+
+ skel = test_tc_link__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
+ 0, "tc1_attach_type");
+ ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
+ 0, "tc2_attach_type");
+
+ err = test_tc_link__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
+ pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
+ ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
+ loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
+ if (!ASSERT_OK_PTR(link, "link_attach"))
+ goto cleanup1;
+ skel->links.tc2 = link;
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_EQ(err, -EBUSY, "prog_detach");
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 1);
+ goto cleanup;
+
+cleanup1:
+ err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1),
+ loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup:
+ test_tc_link__destroy(skel);
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_opts_demixed(void)
+{
+ test_tc_opts_demixed_target(BPF_TCX_INGRESS);
+ test_tc_opts_demixed_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_detach_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 3);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 6, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 7, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ LIBBPF_OPTS_RESET(optd);
+
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+ goto cleanup;
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_detach(void)
+{
+ test_tc_opts_detach_target(BPF_TCX_INGRESS);
+ test_tc_opts_detach_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_detach_before_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd2,
+ );
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 3);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 6, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd2,
+ );
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd4,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_EQ(err, -ERANGE, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd3,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 7, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ .relative_fd = fd4,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 8, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_BEFORE,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 0);
+ goto cleanup;
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_detach_before(void)
+{
+ test_tc_opts_detach_before_target(BPF_TCX_INGRESS);
+ test_tc_opts_detach_before_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_detach_after_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id3, id4, "prog_ids_3_4");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ optq.prog_ids = prog_ids;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 3);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 3, "count");
+ ASSERT_EQ(optq.revision, 6, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd4,
+ );
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_EQ(err, -ERANGE, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd3,
+ );
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_EQ(err, -ERANGE, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_EQ(err, -ERANGE, "prog_detach");
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 2);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 2, "count");
+ ASSERT_EQ(optq.revision, 7, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 1);
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 8, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+ LIBBPF_OPTS_RESET(optd,
+ .flags = BPF_F_AFTER,
+ );
+
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+
+ assert_mprog_count(target, 0);
+ goto cleanup;
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_detach_after(void)
+{
+ test_tc_opts_detach_after_target(BPF_TCX_INGRESS);
+ test_tc_opts_detach_after_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ int err;
+
+ assert_mprog_count(target, 0);
+ if (chain_tc_old) {
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(&tc_hook);
+ ASSERT_OK(err, "bpf_tc_hook_create");
+ assert_mprog_count(target, 0);
+ }
+ err = bpf_prog_detach_opts(0, loopback, target, &optd);
+ ASSERT_EQ(err, -ENOENT, "prog_detach");
+ if (chain_tc_old) {
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+ }
+ assert_mprog_count(target, 0);
+}
+
+void test_ns_tc_opts_delete_empty(void)
+{
+ test_tc_opts_delete_empty(BPF_TCX_INGRESS, false);
+ test_tc_opts_delete_empty(BPF_TCX_EGRESS, false);
+ test_tc_opts_delete_empty(BPF_TCX_INGRESS, true);
+ test_tc_opts_delete_empty(BPF_TCX_EGRESS, true);
+}
+
+static void test_tc_chain_mixed(int target)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ __u32 fd1, fd2, fd3, id1, id2, id3;
+ struct test_tc_link *skel;
+ int err, detach_fd;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc4);
+ fd2 = bpf_program__fd(skel->progs.tc5);
+ fd3 = bpf_program__fd(skel->progs.tc6);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+
+ ASSERT_NEQ(id1, id2, "prog_ids_1_2");
+ ASSERT_NEQ(id2, id3, "prog_ids_2_3");
+
+ assert_mprog_count(target, 0);
+
+ tc_hook.attach_point = target == BPF_TCX_INGRESS ?
+ BPF_TC_INGRESS : BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(&tc_hook);
+ err = err == -EEXIST ? 0 : err;
+ if (!ASSERT_OK(err, "bpf_tc_hook_create"))
+ goto cleanup;
+
+ tc_opts.prog_fd = fd2;
+ err = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(err, "bpf_tc_attach"))
+ goto cleanup_hook;
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_filter;
+
+ detach_fd = fd3;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_REPLACE,
+ .replace_prog_fd = fd3,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup_opts;
+
+ detach_fd = fd1;
+
+ assert_mprog_count(target, 1);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
+
+cleanup_opts:
+ err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+ tc_skel_reset_all_seen(skel);
+ ASSERT_OK(system(ping_cmd), ping_cmd);
+
+ ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
+ ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
+ ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
+
+cleanup_filter:
+ tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
+ err = bpf_tc_detach(&tc_hook, &tc_opts);
+ ASSERT_OK(err, "bpf_tc_detach");
+
+cleanup_hook:
+ tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ bpf_tc_hook_destroy(&tc_hook);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_chain_mixed(void)
+{
+ test_tc_chain_mixed(BPF_TCX_INGRESS);
+ test_tc_chain_mixed(BPF_TCX_EGRESS);
+}
+
+static int generate_dummy_prog(void)
+{
+ const struct bpf_insn prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 256;
+ char log_buf[log_buf_sz];
+ int fd = -1;
+
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL",
+ prog_insns, prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "log_0");
+ ASSERT_GE(fd, 0, "prog_fd");
+ return fd;
+}
+
+static void test_tc_opts_max_target(int target, int flags, bool relative)
+{
+ int err, ifindex, i, prog_fd, last_fd = -1;
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ const int max_progs = 63;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ for (i = 0; i < max_progs; i++) {
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+ assert_mprog_count_ifindex(ifindex, target, i + 1);
+ if (i == max_progs - 1 && relative)
+ last_fd = prog_fd;
+ else
+ close(prog_fd);
+ }
+
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ opta.flags = flags;
+ if (last_fd > 0)
+ opta.relative_fd = last_fd;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_64_attach");
+ assert_mprog_count_ifindex(ifindex, target, max_progs);
+ close(prog_fd);
+cleanup:
+ if (last_fd > 0)
+ close(last_fd);
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void test_ns_tc_opts_max(void)
+{
+ test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
+}
+
+static void test_tc_opts_query_target(int target)
+{
+ const size_t attr_size = offsetofend(union bpf_attr, query);
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ union bpf_attr attr;
+ __u32 prog_ids[10];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ /* Test 1: Double query via libbpf API */
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ /* Test 2: Double query via bpf_attr & bpf(2) directly */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 3: Query with smaller prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 2;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail");
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 4: Query with larger prog_ids array */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.count = 10;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 5: Query with NULL prog_ids array but with count > 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.count = sizeof(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 6: Query with non-NULL prog_ids array but with count == 0 */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ /* Test 7: Query with invalid flags */
+ attr.query.attach_flags = 0;
+ attr.query.query_flags = 1;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
+ attr.query.attach_flags = 1;
+ attr.query.query_flags = 0;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ ASSERT_EQ(err, -1, "prog_query_should_fail");
+ ASSERT_EQ(errno, EINVAL, "prog_query_should_fail");
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_query(void)
+{
+ test_tc_opts_query_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_query_attach_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ struct test_tc_link *skel;
+ __u32 prog_ids[2];
+ __u32 fd1, id1;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ id1 = id_from_prog_fd(fd1);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 0, "count");
+ ASSERT_EQ(optq.revision, 1, "revision");
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = optq.revision,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup1;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void test_ns_tc_opts_query_attach(void)
+{
+ test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
new file mode 100644
index 000000000000..76d72a59365e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -0,0 +1,1303 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
+ * between src and dst. The netns fwd has veth links to each src and dst. The
+ * client is in src and server in dst. The test installs a TC BPF program to each
+ * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
+ * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
+ * switch from ingress side; it also installs a checker prog on the egress side
+ * to drop unexpected traffic.
+ */
+
+#include <arpa/inet.h>
+#include <linux/if_tun.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/time_types.h>
+#include <linux/net_tstamp.h>
+#include <net/if.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "netlink_helpers.h"
+#include "test_tc_neigh_fib.skel.h"
+#include "test_tc_neigh.skel.h"
+#include "test_tc_peer.skel.h"
+#include "test_tc_dtime.skel.h"
+
+#ifndef TCP_TX_DELAY
+#define TCP_TX_DELAY 37
+#endif
+
+#define NS_SRC "ns_src"
+#define NS_FWD "ns_fwd"
+#define NS_DST "ns_dst"
+
+#define IP4_SRC "172.16.1.100"
+#define IP4_DST "172.16.2.100"
+#define IP4_TUN_SRC "172.17.1.100"
+#define IP4_TUN_FWD "172.17.1.200"
+#define IP4_PORT 9004
+
+#define IP6_SRC "0::1:dead:beef:cafe"
+#define IP6_DST "0::2:dead:beef:cafe"
+#define IP6_TUN_SRC "1::1:dead:beef:cafe"
+#define IP6_TUN_FWD "1::2:dead:beef:cafe"
+#define IP6_PORT 9006
+
+#define IP4_SLL "169.254.0.1"
+#define IP4_DLL "169.254.0.2"
+#define IP4_NET "169.254.0.0"
+
+#define MAC_DST_FWD "00:11:22:33:44:55"
+#define MAC_DST "00:22:33:44:55:66"
+#define MAC_SRC_FWD "00:33:44:55:66:77"
+#define MAC_SRC "00:44:55:66:77:88"
+
+#define IFADDR_STR_LEN 18
+#define PING_ARGS "-i 0.2 -c 3 -w 10 -q"
+
+#define TIMEOUT_MILLIS 10000
+#define NSEC_PER_SEC 1000000000ULL
+
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
+static struct netns_obj *netns_objs[3];
+
+static int write_file(const char *path, const char *newval)
+{
+ FILE *f;
+
+ f = fopen(path, "r+");
+ if (!f)
+ return -1;
+ if (fwrite(newval, strlen(newval), 1, f) != 1) {
+ log_err("writing to %s failed", path);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+static int netns_setup_namespaces(const char *verb)
+{
+ struct netns_obj **ns_obj = netns_objs;
+ const char * const *ns = namespaces;
+
+ while (*ns) {
+ if (strcmp(verb, "add") == 0) {
+ *ns_obj = netns_new(*ns, false);
+ if (!ASSERT_OK_PTR(*ns_obj, "netns_new"))
+ return -1;
+ } else {
+ if (!ASSERT_OK_PTR(*ns_obj, "netns_obj is NULL"))
+ return -1;
+ netns_free(*ns_obj);
+ *ns_obj = NULL;
+ }
+ ns++;
+ ns_obj++;
+ }
+ return 0;
+}
+
+static void netns_setup_namespaces_nofail(const char *verb)
+{
+ struct netns_obj **ns_obj = netns_objs;
+ const char * const *ns = namespaces;
+
+ while (*ns) {
+ if (strcmp(verb, "add") == 0) {
+ *ns_obj = netns_new(*ns, false);
+ } else {
+ if (*ns_obj)
+ netns_free(*ns_obj);
+ *ns_obj = NULL;
+ }
+ ns++;
+ ns_obj++;
+ }
+}
+
+enum dev_mode {
+ MODE_VETH,
+ MODE_NETKIT,
+};
+
+struct netns_setup_result {
+ enum dev_mode dev_mode;
+ int ifindex_src;
+ int ifindex_src_fwd;
+ int ifindex_dst;
+ int ifindex_dst_fwd;
+};
+
+static int get_ifaddr(const char *name, char *ifaddr)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name);
+ f = fopen(path, "r");
+ if (!ASSERT_OK_PTR(f, path))
+ return -1;
+
+ ret = fread(ifaddr, 1, IFADDR_STR_LEN, f);
+ if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+static int create_netkit(int mode, char *prim, char *peer)
+{
+ struct rtattr *linkinfo, *data, *peer_info;
+ struct rtnl_handle rth = { .fd = -1 };
+ const char *type = "netkit";
+ struct {
+ struct nlmsghdr n;
+ struct ifinfomsg i;
+ char buf[1024];
+ } req = {};
+ int err;
+
+ err = rtnl_open(&rth, 0);
+ if (!ASSERT_OK(err, "open_rtnetlink"))
+ return err;
+
+ memset(&req, 0, sizeof(req));
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ req.n.nlmsg_type = RTM_NEWLINK;
+ req.i.ifi_family = AF_UNSPEC;
+
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, prim, strlen(prim));
+ linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO);
+ addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type));
+ data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA);
+ addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode);
+ peer_info = addattr_nest(&req.n, sizeof(req), IFLA_NETKIT_PEER_INFO);
+ req.n.nlmsg_len += sizeof(struct ifinfomsg);
+ addattr_l(&req.n, sizeof(req), IFLA_IFNAME, peer, strlen(peer));
+ addattr_nest_end(&req.n, peer_info);
+ addattr_nest_end(&req.n, data);
+ addattr_nest_end(&req.n, linkinfo);
+
+ err = rtnl_talk(&rth, &req.n, NULL);
+ ASSERT_OK(err, "talk_rtnetlink");
+ rtnl_close(&rth);
+ return err;
+}
+
+static int netns_setup_links_and_routes(struct netns_setup_result *result)
+{
+ struct nstoken *nstoken = NULL;
+ char src_fwd_addr[IFADDR_STR_LEN+1] = {};
+ char src_addr[IFADDR_STR_LEN + 1] = {};
+ int err;
+
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip link add src address " MAC_SRC " type veth "
+ "peer name src_fwd address " MAC_SRC_FWD);
+ SYS(fail, "ip link add dst address " MAC_DST " type veth "
+ "peer name dst_fwd address " MAC_DST_FWD);
+ } else if (result->dev_mode == MODE_NETKIT) {
+ err = create_netkit(NETKIT_L3, "src", "src_fwd");
+ if (!ASSERT_OK(err, "create_ifindex_src"))
+ goto fail;
+ err = create_netkit(NETKIT_L3, "dst", "dst_fwd");
+ if (!ASSERT_OK(err, "create_ifindex_dst"))
+ goto fail;
+ }
+
+ if (get_ifaddr("src_fwd", src_fwd_addr))
+ goto fail;
+
+ if (get_ifaddr("src", src_addr))
+ goto fail;
+
+ result->ifindex_src = if_nametoindex("src");
+ if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
+ goto fail;
+
+ result->ifindex_src_fwd = if_nametoindex("src_fwd");
+ if (!ASSERT_GT(result->ifindex_src_fwd, 0, "ifindex_src_fwd"))
+ goto fail;
+
+ result->ifindex_dst = if_nametoindex("dst");
+ if (!ASSERT_GT(result->ifindex_dst, 0, "ifindex_dst"))
+ goto fail;
+
+ result->ifindex_dst_fwd = if_nametoindex("dst_fwd");
+ if (!ASSERT_GT(result->ifindex_dst_fwd, 0, "ifindex_dst_fwd"))
+ goto fail;
+
+ SYS(fail, "ip link set src netns " NS_SRC);
+ SYS(fail, "ip link set src_fwd netns " NS_FWD);
+ SYS(fail, "ip link set dst_fwd netns " NS_FWD);
+ SYS(fail, "ip link set dst netns " NS_DST);
+
+ /** setup in 'src' namespace */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto fail;
+
+ SYS(fail, "ip addr add " IP4_SRC "/32 dev src");
+ SYS(fail, "ip addr add " IP6_SRC "/128 dev src nodad");
+ SYS(fail, "ip link set dev src up");
+
+ SYS(fail, "ip route add " IP4_DST "/32 dev src scope global");
+ SYS(fail, "ip route add " IP4_NET "/16 dev src scope global");
+ SYS(fail, "ip route add " IP6_DST "/128 dev src scope global");
+
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_DST " dev src lladdr %s",
+ src_fwd_addr);
+ SYS(fail, "ip neigh add " IP6_DST " dev src lladdr %s",
+ src_fwd_addr);
+ }
+
+ close_netns(nstoken);
+
+ /** setup in 'fwd' namespace */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ goto fail;
+
+ /* The fwd netns automatically gets a v6 LL address / routes, but also
+ * needs v4 one in order to start ARP probing. IP4_NET route is added
+ * to the endpoints so that the ARP processing will reply.
+ */
+ SYS(fail, "ip addr add " IP4_SLL "/32 dev src_fwd");
+ SYS(fail, "ip addr add " IP4_DLL "/32 dev dst_fwd");
+ SYS(fail, "ip link set dev src_fwd up");
+ SYS(fail, "ip link set dev dst_fwd up");
+
+ SYS(fail, "ip route add " IP4_SRC "/32 dev src_fwd scope global");
+ SYS(fail, "ip route add " IP6_SRC "/128 dev src_fwd scope global");
+ SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
+ SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
+
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST);
+ SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST);
+ }
+
+ close_netns(nstoken);
+
+ /** setup in 'dst' namespace */
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ goto fail;
+
+ SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
+ SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
+ SYS(fail, "ip link set dev dst up");
+ SYS(fail, "ip link set dev lo up");
+
+ SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
+ SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
+ SYS(fail, "ip route add " IP6_SRC "/128 dev dst scope global");
+
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev dst lladdr " MAC_DST_FWD);
+ SYS(fail, "ip neigh add " IP6_SRC " dev dst lladdr " MAC_DST_FWD);
+ }
+
+ close_netns(nstoken);
+
+ return 0;
+fail:
+ if (nstoken)
+ close_netns(nstoken);
+ return -1;
+}
+
+static int qdisc_clsact_create(struct bpf_tc_hook *qdisc_hook, int ifindex)
+{
+ char err_str[128], ifname[16];
+ int err;
+
+ qdisc_hook->ifindex = ifindex;
+ qdisc_hook->attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
+ err = bpf_tc_hook_create(qdisc_hook);
+ snprintf(err_str, sizeof(err_str),
+ "qdisc add dev %s clsact",
+ if_indextoname(qdisc_hook->ifindex, ifname) ? : "<unknown_iface>");
+ err_str[sizeof(err_str) - 1] = 0;
+ ASSERT_OK(err, err_str);
+
+ return err;
+}
+
+static int xgress_filter_add(struct bpf_tc_hook *qdisc_hook,
+ enum bpf_tc_attach_point xgress,
+ const struct bpf_program *prog, int priority)
+{
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach);
+ char err_str[128], ifname[16];
+ int err;
+
+ qdisc_hook->attach_point = xgress;
+ tc_attach.prog_fd = bpf_program__fd(prog);
+ tc_attach.priority = priority;
+ err = bpf_tc_attach(qdisc_hook, &tc_attach);
+ snprintf(err_str, sizeof(err_str),
+ "filter add dev %s %s prio %d bpf da %s",
+ if_indextoname(qdisc_hook->ifindex, ifname) ? : "<unknown_iface>",
+ xgress == BPF_TC_INGRESS ? "ingress" : "egress",
+ priority, bpf_program__name(prog));
+ err_str[sizeof(err_str) - 1] = 0;
+ ASSERT_OK(err, err_str);
+
+ return err;
+}
+
+#define QDISC_CLSACT_CREATE(qdisc_hook, ifindex) ({ \
+ if ((err = qdisc_clsact_create(qdisc_hook, ifindex))) \
+ goto fail; \
+})
+
+#define XGRESS_FILTER_ADD(qdisc_hook, xgress, prog, priority) ({ \
+ if ((err = xgress_filter_add(qdisc_hook, xgress, prog, priority))) \
+ goto fail; \
+})
+
+static int netns_load_bpf(const struct bpf_program *src_prog,
+ const struct bpf_program *dst_prog,
+ const struct bpf_program *chk_prog,
+ const struct netns_setup_result *setup_result)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ int err;
+
+ /* tc qdisc add dev src_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
+ /* tc filter add dev src_fwd ingress bpf da src_prog */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, src_prog, 0);
+ /* tc filter add dev src_fwd egress bpf da chk_prog */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
+
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress bpf da dst_prog */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
+ /* tc filter add dev dst_fwd egress bpf da chk_prog */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_tcp(int family, const char *addr, __u16 port)
+{
+ int listen_fd = -1, accept_fd = -1, client_fd = -1;
+ char buf[] = "testing testing";
+ int n;
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ return;
+
+ listen_fd = start_server(family, SOCK_STREAM, addr, port, 0);
+ if (!ASSERT_GE(listen_fd, 0, "listen"))
+ goto done;
+
+ close_netns(nstoken);
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+
+ client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto done;
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto done;
+
+ if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo"))
+ goto done;
+
+ n = write(client_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+
+ n = read(accept_fd, buf, sizeof(buf));
+ ASSERT_EQ(n, sizeof(buf), "recv from server");
+
+done:
+ if (nstoken)
+ close_netns(nstoken);
+ if (listen_fd >= 0)
+ close(listen_fd);
+ if (accept_fd >= 0)
+ close(accept_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+}
+
+static int test_ping(int family, const char *addr)
+{
+ SYS(fail, "ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr);
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_connectivity(void)
+{
+ test_tcp(AF_INET, IP4_DST, IP4_PORT);
+ test_ping(AF_INET, IP4_DST);
+ test_tcp(AF_INET6, IP6_DST, IP6_PORT);
+ test_ping(AF_INET6, IP6_DST);
+}
+
+static int set_forwarding(bool enable)
+{
+ int err;
+
+ err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv4.ip_forward=0"))
+ return err;
+
+ err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv6.forwarding=0"))
+ return err;
+
+ return 0;
+}
+
+static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
+{
+ struct timespec pkt_ts = {};
+ char ctl[CMSG_SPACE(sizeof(pkt_ts))];
+ struct timespec now_ts;
+ struct msghdr msg = {};
+ __u64 now_ns, pkt_ns;
+ struct cmsghdr *cmsg;
+ struct iovec iov;
+ char data[32];
+ int ret;
+
+ iov.iov_base = data;
+ iov.iov_len = sizeof(data);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &ctl;
+ msg.msg_controllen = sizeof(ctl);
+
+ ret = recvmsg(fd, &msg, 0);
+ if (!ASSERT_EQ(ret, s, "recvmsg"))
+ return -1;
+ ASSERT_STRNEQ(data, expected, s, "expected rcv data");
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg && cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SO_TIMESTAMPNS)
+ memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
+
+ pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
+ if (tstamp) {
+ /* caller will check the tstamp itself */
+ *tstamp = pkt_ns;
+ return 0;
+ }
+
+ ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
+
+ ret = clock_gettime(CLOCK_REALTIME, &now_ts);
+ ASSERT_OK(ret, "clock_gettime");
+ now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec;
+
+ if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
+ ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
+ "check rcv tstamp");
+ return 0;
+}
+
+static void rcv_tstamp(int fd, const char *expected, size_t s)
+{
+ __rcv_tstamp(fd, expected, s, NULL);
+}
+
+static int wait_netstamp_needed_key(void)
+{
+ int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n;
+ char buf[] = "testing testing";
+ struct nstoken *nstoken;
+ __u64 tstamp = 0;
+
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ return -1;
+
+ srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto done;
+
+ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS,
+ &opt, sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
+ goto done;
+
+ cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(cli_fd, 0, "connect_to_fd"))
+ goto done;
+
+again:
+ n = write(cli_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+ err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp);
+ if (!ASSERT_OK(err, "__rcv_tstamp"))
+ goto done;
+ if (!tstamp && nretries++ < 5) {
+ sleep(1);
+ printf("netstamp_needed_key retry#%d\n", nretries);
+ goto again;
+ }
+
+done:
+ if (!tstamp && srv_fd != -1) {
+ close(srv_fd);
+ srv_fd = -1;
+ }
+ if (cli_fd != -1)
+ close(cli_fd);
+ close_netns(nstoken);
+ return srv_fd;
+}
+
+static void snd_tstamp(int fd, char *b, size_t s)
+{
+ struct sock_txtime opt = { .clockid = CLOCK_TAI };
+ char ctl[CMSG_SPACE(sizeof(__u64))];
+ struct timespec now_ts;
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ struct iovec iov;
+ __u64 now_ns;
+ int ret;
+
+ ret = clock_gettime(CLOCK_TAI, &now_ts);
+ ASSERT_OK(ret, "clock_get_time(CLOCK_TAI)");
+ now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec;
+
+ iov.iov_base = b;
+ iov.iov_len = s;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &ctl;
+ msg.msg_controllen = sizeof(ctl);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_TXTIME;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(now_ns));
+ *(__u64 *)CMSG_DATA(cmsg) = now_ns;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_TXTIME, &opt, sizeof(opt));
+ ASSERT_OK(ret, "setsockopt(SO_TXTIME)");
+
+ ret = sendmsg(fd, &msg, 0);
+ ASSERT_EQ(ret, s, "sendmsg");
+}
+
+static void test_inet_dtime(int family, int type, const char *addr, __u16 port)
+{
+ int opt = 1, accept_fd = -1, client_fd = -1, listen_fd, err;
+ char buf[] = "testing testing";
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ return;
+ listen_fd = start_server(family, type, addr, port, 0);
+ close_netns(nstoken);
+
+ if (!ASSERT_GE(listen_fd, 0, "listen"))
+ return;
+
+ /* Ensure the kernel puts the (rcv) timestamp for all skb */
+ err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS,
+ &opt, sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
+ goto done;
+
+ if (type == SOCK_STREAM) {
+ /* Ensure the kernel set EDT when sending out rst/ack
+ * from the kernel's ctl_sk.
+ */
+ err = setsockopt(listen_fd, SOL_TCP, TCP_TX_DELAY, &opt,
+ sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(TCP_TX_DELAY)"))
+ goto done;
+ }
+
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+ client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
+ close_netns(nstoken);
+
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto done;
+
+ if (type == SOCK_STREAM) {
+ int n;
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto done;
+
+ n = write(client_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+ rcv_tstamp(accept_fd, buf, sizeof(buf));
+ } else {
+ snd_tstamp(client_fd, buf, sizeof(buf));
+ rcv_tstamp(listen_fd, buf, sizeof(buf));
+ }
+
+done:
+ close(listen_fd);
+ if (accept_fd != -1)
+ close(accept_fd);
+ if (client_fd != -1)
+ close(client_fd);
+}
+
+static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
+ const struct netns_setup_result *setup_result)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_src);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst);
+ struct nstoken *nstoken;
+ int err;
+
+ /* setup ns_src tc progs */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+ return -1;
+ /* tc qdisc add dev src clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src, setup_result->ifindex_src);
+ /* tc filter add dev src ingress bpf da ingress_host */
+ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+ /* tc filter add dev src egress bpf da egress_host */
+ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ close_netns(nstoken);
+
+ /* setup ns_dst tc progs */
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST))
+ return -1;
+ /* tc qdisc add dev dst clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst, setup_result->ifindex_dst);
+ /* tc filter add dev dst ingress bpf da ingress_host */
+ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+ /* tc filter add dev dst egress bpf da egress_host */
+ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ close_netns(nstoken);
+
+ /* setup ns_fwd tc progs */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+ return -1;
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio100, 100);
+ /* tc filter add dev dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio101, 101);
+ /* tc filter add dev dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio100, 100);
+ /* tc filter add dev dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio101, 101);
+
+ /* tc qdisc add dev src_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
+ /* tc filter add dev src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio100, 100);
+ /* tc filter add dev src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio101, 101);
+ /* tc filter add dev src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio100, 100);
+ /* tc filter add dev src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio101, 101);
+ close_netns(nstoken);
+ return 0;
+
+fail:
+ close_netns(nstoken);
+ return err;
+}
+
+enum {
+ INGRESS_FWDNS_P100,
+ INGRESS_FWDNS_P101,
+ EGRESS_FWDNS_P100,
+ EGRESS_FWDNS_P101,
+ INGRESS_ENDHOST,
+ EGRESS_ENDHOST,
+ SET_DTIME,
+ __MAX_CNT,
+};
+
+const char *cnt_names[] = {
+ "ingress_fwdns_p100",
+ "ingress_fwdns_p101",
+ "egress_fwdns_p100",
+ "egress_fwdns_p101",
+ "ingress_endhost",
+ "egress_endhost",
+ "set_dtime",
+};
+
+enum {
+ TCP_IP6_CLEAR_DTIME,
+ TCP_IP4,
+ TCP_IP6,
+ UDP_IP4,
+ UDP_IP6,
+ TCP_IP4_RT_FWD,
+ TCP_IP6_RT_FWD,
+ UDP_IP4_RT_FWD,
+ UDP_IP6_RT_FWD,
+ UKN_TEST,
+ __NR_TESTS,
+};
+
+const char *test_names[] = {
+ "tcp ip6 clear dtime",
+ "tcp ip4",
+ "tcp ip6",
+ "udp ip4",
+ "udp ip6",
+ "tcp ip4 rt fwd",
+ "tcp ip6 rt fwd",
+ "udp ip4 rt fwd",
+ "udp ip6 rt fwd",
+};
+
+static const char *dtime_cnt_str(int test, int cnt)
+{
+ static char name[64];
+
+ snprintf(name, sizeof(name), "%s %s", test_names[test], cnt_names[cnt]);
+
+ return name;
+}
+
+static const char *dtime_err_str(int test, int cnt)
+{
+ static char name[64];
+
+ snprintf(name, sizeof(name), "%s %s errs", test_names[test],
+ cnt_names[cnt]);
+
+ return name;
+}
+
+static void test_tcp_clear_dtime(struct test_tc_dtime *skel)
+{
+ int i, t = TCP_IP6_CLEAR_DTIME;
+ __u32 *dtimes = skel->bss->dtimes[t];
+ __u32 *errs = skel->bss->errs[t];
+
+ skel->bss->test = t;
+ test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 50000 + t);
+
+ ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
+ dtime_cnt_str(t, INGRESS_FWDNS_P100));
+ ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
+ dtime_cnt_str(t, INGRESS_FWDNS_P101));
+ ASSERT_GT(dtimes[EGRESS_FWDNS_P100], 0,
+ dtime_cnt_str(t, EGRESS_FWDNS_P100));
+ ASSERT_EQ(dtimes[EGRESS_FWDNS_P101], 0,
+ dtime_cnt_str(t, EGRESS_FWDNS_P101));
+ ASSERT_GT(dtimes[EGRESS_ENDHOST], 0,
+ dtime_cnt_str(t, EGRESS_ENDHOST));
+ ASSERT_GT(dtimes[INGRESS_ENDHOST], 0,
+ dtime_cnt_str(t, INGRESS_ENDHOST));
+
+ for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
+ ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
+}
+
+static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
+{
+ __u32 *dtimes, *errs;
+ const char *addr;
+ int i, t;
+
+ if (family == AF_INET) {
+ t = bpf_fwd ? TCP_IP4 : TCP_IP4_RT_FWD;
+ addr = IP4_DST;
+ } else {
+ t = bpf_fwd ? TCP_IP6 : TCP_IP6_RT_FWD;
+ addr = IP6_DST;
+ }
+
+ dtimes = skel->bss->dtimes[t];
+ errs = skel->bss->errs[t];
+
+ skel->bss->test = t;
+ test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t);
+
+ /* fwdns_prio100 prog does not read delivery_time_type, so
+ * kernel puts the (rcv) timestamp in __sk_buff->tstamp
+ */
+ ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
+ dtime_cnt_str(t, INGRESS_FWDNS_P100));
+ for (i = INGRESS_FWDNS_P101; i < SET_DTIME; i++)
+ ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
+
+ for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
+ ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
+}
+
+static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
+{
+ __u32 *dtimes, *errs;
+ const char *addr;
+ int i, t;
+
+ if (family == AF_INET) {
+ t = bpf_fwd ? UDP_IP4 : UDP_IP4_RT_FWD;
+ addr = IP4_DST;
+ } else {
+ t = bpf_fwd ? UDP_IP6 : UDP_IP6_RT_FWD;
+ addr = IP6_DST;
+ }
+
+ dtimes = skel->bss->dtimes[t];
+ errs = skel->bss->errs[t];
+
+ skel->bss->test = t;
+ test_inet_dtime(family, SOCK_DGRAM, addr, 50000 + t);
+
+ ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
+ dtime_cnt_str(t, INGRESS_FWDNS_P100));
+ for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
+ ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
+
+ for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++)
+ ASSERT_EQ(errs[i], 0, dtime_err_str(t, i));
+}
+
+static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
+{
+ struct test_tc_dtime *skel;
+ struct nstoken *nstoken;
+ int hold_tstamp_fd, err;
+
+ /* Hold a sk with the SOCK_TIMESTAMP set to ensure there
+ * is no delay in the kernel net_enable_timestamp().
+ * This ensures the following tests must have
+ * non zero rcv tstamp in the recvmsg().
+ */
+ hold_tstamp_fd = wait_netstamp_needed_key();
+ if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key"))
+ return;
+
+ skel = test_tc_dtime__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_dtime__load(skel);
+ if (!ASSERT_OK(err, "test_tc_dtime__load"))
+ goto done;
+
+ if (netns_load_dtime_bpf(skel, setup_result))
+ goto done;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ goto done;
+ err = set_forwarding(false);
+ close_netns(nstoken);
+ if (!ASSERT_OK(err, "disable forwarding"))
+ goto done;
+
+ test_tcp_clear_dtime(skel);
+
+ test_tcp_dtime(skel, AF_INET, true);
+ test_tcp_dtime(skel, AF_INET6, true);
+ test_udp_dtime(skel, AF_INET, true);
+ test_udp_dtime(skel, AF_INET6, true);
+
+ /* Test the kernel ip[6]_forward path instead
+ * of bpf_redirect_neigh().
+ */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ goto done;
+ err = set_forwarding(true);
+ close_netns(nstoken);
+ if (!ASSERT_OK(err, "enable forwarding"))
+ goto done;
+
+ test_tcp_dtime(skel, AF_INET, false);
+ test_tcp_dtime(skel, AF_INET6, false);
+ test_udp_dtime(skel, AF_INET, false);
+ test_udp_dtime(skel, AF_INET6, false);
+
+done:
+ test_tc_dtime__destroy(skel);
+ close(hold_tstamp_fd);
+}
+
+static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh_fib *skel = NULL;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh_fib__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open"))
+ goto done;
+
+ if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load"))
+ goto done;
+
+ if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
+ skel->progs.tc_chk, setup_result))
+ goto done;
+
+ /* bpf_fib_lookup() checks if forwarding is enabled */
+ if (!ASSERT_OK(set_forwarding(true), "enable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh_fib__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh *skel = NULL;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_neigh__load(skel);
+ if (!ASSERT_OK(err, "test_tc_neigh__load"))
+ goto done;
+
+ if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
+ skel->progs.tc_chk, setup_result))
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken;
+ struct test_tc_peer *skel;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto done;
+
+ if (netns_load_bpf(skel->progs.tc_src, skel->progs.tc_dst,
+ skel->progs.tc_chk, setup_result))
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_peer__destroy(skel);
+ close_netns(nstoken);
+}
+
+static int tun_open(char *name)
+{
+ struct ifreq ifr;
+ int fd, err;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (!ASSERT_GE(fd, 0, "open /dev/net/tun"))
+ return -1;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
+ if (*name)
+ strncpy(ifr.ifr_name, name, IFNAMSIZ);
+
+ err = ioctl(fd, TUNSETIFF, &ifr);
+ if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+ goto fail;
+
+ SYS(fail, "ip link set dev %s up", name);
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+enum {
+ SRC_TO_TARGET = 0,
+ TARGET_TO_SRC = 1,
+};
+
+static int tun_relay_loop(int src_fd, int target_fd)
+{
+ fd_set rfds, wfds;
+
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+
+ for (;;) {
+ char buf[1500];
+ int direction, nread, nwrite;
+
+ FD_SET(src_fd, &rfds);
+ FD_SET(target_fd, &rfds);
+
+ if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) {
+ log_err("select failed");
+ return 1;
+ }
+
+ direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC;
+
+ nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf));
+ if (nread < 0) {
+ log_err("read failed");
+ return 1;
+ }
+
+ nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread);
+ if (nwrite != nread) {
+ log_err("write failed");
+ return 1;
+ }
+ }
+}
+
+static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd);
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ struct test_tc_peer *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ int err;
+ int tunnel_pid = -1;
+ int src_fd, target_fd = -1;
+ int ifindex;
+
+ /* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
+ * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those
+ * expose the L2 headers encapsulating the IP packet to BPF and hence
+ * don't have skb in suitable state for this test. Alternative to TUN/TAP
+ * would be e.g. Wireguard which would appear as a pure L3 device to BPF,
+ * but that requires much more complicated setup.
+ */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+ return;
+
+ src_fd = tun_open("tun_src");
+ if (!ASSERT_GE(src_fd, 0, "tun_open tun_src"))
+ goto fail;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+ goto fail;
+
+ target_fd = tun_open("tun_fwd");
+ if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd"))
+ goto fail;
+
+ tunnel_pid = fork();
+ if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop"))
+ goto fail;
+
+ if (tunnel_pid == 0)
+ exit(tun_relay_loop(src_fd, target_fd));
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto fail;
+
+ ifindex = if_nametoindex("tun_fwd");
+ if (!ASSERT_GT(ifindex, 0, "if_indextoname tun_fwd"))
+ goto fail;
+
+ skel->rodata->IFINDEX_SRC = ifindex;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto fail;
+
+ /* Load "tc_src_l3" to the tun_fwd interface to redirect packets
+ * towards dst, and "tc_dst" to redirect packets
+ * and "tc_chk" on dst_fwd to drop non-redirected packets.
+ */
+ /* tc qdisc add dev tun_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex);
+ /* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */
+ XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0);
+
+ /* tc qdisc add dev dst_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
+ /* tc filter add dev dst_fwd ingress bpf da tc_dst_l3 */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
+ /* tc filter add dev dst_fwd egress bpf da tc_chk */
+ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
+
+ /* Setup route and neigh tables */
+ SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
+ SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24");
+
+ SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
+ SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
+
+ SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev src scope global");
+ SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
+ " dev tun_src scope global");
+ SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev dst scope global");
+ SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev src scope global");
+ SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
+ " dev tun_src scope global");
+ SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev dst scope global");
+
+ SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
+ SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto fail;
+
+ test_connectivity();
+
+fail:
+ if (tunnel_pid > 0) {
+ kill(tunnel_pid, SIGTERM);
+ waitpid(tunnel_pid, NULL, 0);
+ }
+ if (src_fd >= 0)
+ close(src_fd);
+ if (target_fd >= 0)
+ close(target_fd);
+ if (skel)
+ test_tc_peer__destroy(skel);
+ if (nstoken)
+ close_netns(nstoken);
+}
+
+#define RUN_TEST(name, mode) \
+ ({ \
+ struct netns_setup_result setup_result = { .dev_mode = mode, }; \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
+ if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
+ "setup links and routes")) \
+ test_ ## name(&setup_result); \
+ netns_setup_namespaces("delete"); \
+ } \
+ })
+
+static void *test_tc_redirect_run_tests(void *arg)
+{
+ netns_setup_namespaces_nofail("delete");
+
+ RUN_TEST(tc_redirect_peer, MODE_VETH);
+ RUN_TEST(tc_redirect_peer, MODE_NETKIT);
+ RUN_TEST(tc_redirect_peer_l3, MODE_VETH);
+ RUN_TEST(tc_redirect_peer_l3, MODE_NETKIT);
+ RUN_TEST(tc_redirect_neigh, MODE_VETH);
+ RUN_TEST(tc_redirect_neigh_fib, MODE_VETH);
+ RUN_TEST(tc_redirect_dtime, MODE_VETH);
+ return NULL;
+}
+
+void test_tc_redirect(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
new file mode 100644
index 000000000000..eaf441dc7e79
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdlib.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "test_tcp_custom_syncookie.skel.h"
+
+static struct test_tcp_custom_syncookie_case {
+ int family, type;
+ char addr[16];
+ char name[10];
+} test_cases[] = {
+ {
+ .name = "IPv4 TCP",
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .addr = "127.0.0.1",
+ },
+ {
+ .name = "IPv6 TCP",
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .addr = "::1",
+ },
+};
+
+static int setup_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "ip"))
+ goto err;
+
+ if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"),
+ "write_sysctl"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int setup_tc(struct test_tcp_custom_syncookie *skel)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach,
+ .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie));
+
+ qdisc_lo.ifindex = if_nametoindex("lo");
+ if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact"))
+ goto err;
+
+ if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach),
+ "filter add dev lo ingress"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+#define msg "Hello World"
+#define msglen 11
+
+static void transfer_message(int sender, int receiver)
+{
+ char buf[msglen];
+ int ret;
+
+ ret = send(sender, msg, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "send"))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ ret = recv(receiver, buf, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "recv"))
+ return;
+
+ ret = strncmp(buf, msg, msglen);
+ if (!ASSERT_EQ(ret, 0, "strncmp"))
+ return;
+}
+
+static void create_connection(struct test_tcp_custom_syncookie_case *test_case)
+{
+ int server, client, child;
+
+ server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0);
+ if (!ASSERT_NEQ(server, -1, "start_server"))
+ return;
+
+ client = connect_to_fd(server, 0);
+ if (!ASSERT_NEQ(client, -1, "connect_to_fd"))
+ goto close_server;
+
+ child = accept(server, NULL, 0);
+ if (!ASSERT_NEQ(child, -1, "accept"))
+ goto close_client;
+
+ transfer_message(client, child);
+ transfer_message(child, client);
+
+ close(child);
+close_client:
+ close(client);
+close_server:
+ close(server);
+}
+
+void test_tcp_custom_syncookie(void)
+{
+ struct test_tcp_custom_syncookie *skel;
+ int i;
+
+ if (setup_netns())
+ return;
+
+ skel = test_tcp_custom_syncookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ if (setup_tc(skel))
+ goto destroy_skel;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ if (!test__start_subtest(test_cases[i].name))
+ continue;
+
+ skel->bss->handled_syn = false;
+ skel->bss->handled_ack = false;
+
+ create_connection(&test_cases[i]);
+
+ ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc.");
+ ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc");
+ }
+
+destroy_skel:
+ system("tc qdisc del dev lo clsact");
+
+ test_tcp_custom_syncookie__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
index 594307dffd13..e070bca2b764 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
@@ -3,14 +3,12 @@
void test_tcp_estats(void)
{
- const char *file = "./test_tcp_estats.o";
+ const char *file = "./test_tcp_estats.bpf.o";
int err, prog_fd;
struct bpf_object *obj;
- __u32 duration = 0;
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- CHECK(err, "", "err %d errno %d\n", err, errno);
- if (err)
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (!ASSERT_OK(err, ""))
return;
bpf_object__close(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
index 08d19cafd5e8..56685fc03c7e 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
@@ -42,33 +42,10 @@ struct sk_fds {
static int create_netns(void)
{
- if (CHECK(unshare(CLONE_NEWNET), "create netns",
- "unshare(CLONE_NEWNET): %s (%d)",
- strerror(errno), errno))
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
return -1;
- if (CHECK(system("ip link set dev lo up"), "run ip cmd",
- "failed to bring lo link up\n"))
- return -1;
-
- return 0;
-}
-
-static int write_sysctl(const char *sysctl, const char *value)
-{
- int fd, err, len;
-
- fd = open(sysctl, O_WRONLY);
- if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
- sysctl, strerror(errno), errno))
- return -1;
-
- len = strlen(value);
- err = write(fd, value, len);
- close(fd);
- if (CHECK(err != len, "write sysctl",
- "write(%s, %s): err:%d %s (%d)\n",
- sysctl, value, err, strerror(errno), errno))
+ if (!ASSERT_OK(system("ip link set dev lo up"), "run ip cmd"))
return -1;
return 0;
@@ -100,16 +77,12 @@ static int sk_fds_shutdown(struct sk_fds *sk_fds)
shutdown(sk_fds->active_fd, SHUT_WR);
ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte));
- if (CHECK(ret != 0, "read-after-shutdown(passive_fd):",
- "ret:%d %s (%d)\n",
- ret, strerror(errno), errno))
+ if (!ASSERT_EQ(ret, 0, "read-after-shutdown(passive_fd):"))
return -1;
shutdown(sk_fds->passive_fd, SHUT_WR);
ret = read(sk_fds->active_fd, &abyte, sizeof(abyte));
- if (CHECK(ret != 0, "read-after-shutdown(active_fd):",
- "ret:%d %s (%d)\n",
- ret, strerror(errno), errno))
+ if (!ASSERT_EQ(ret, 0, "read-after-shutdown(active_fd):"))
return -1;
return 0;
@@ -122,8 +95,7 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
socklen_t len;
sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
- if (CHECK(sk_fds->srv_fd == -1, "start_server", "%s (%d)\n",
- strerror(errno), errno))
+ if (!ASSERT_NEQ(sk_fds->srv_fd, -1, "start_server"))
goto error;
if (fast_open)
@@ -132,28 +104,25 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
else
sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0);
- if (CHECK_FAIL(sk_fds->active_fd == -1)) {
+ if (!ASSERT_NEQ(sk_fds->active_fd, -1, "")) {
close(sk_fds->srv_fd);
goto error;
}
len = sizeof(addr6);
- if (CHECK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
- &len), "getsockname(srv_fd)", "%s (%d)\n",
- strerror(errno), errno))
+ if (!ASSERT_OK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
+ &len), "getsockname(srv_fd)"))
goto error_close;
sk_fds->passive_lport = ntohs(addr6.sin6_port);
len = sizeof(addr6);
- if (CHECK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
- &len), "getsockname(active_fd)", "%s (%d)\n",
- strerror(errno), errno))
+ if (!ASSERT_OK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
+ &len), "getsockname(active_fd)"))
goto error_close;
sk_fds->active_lport = ntohs(addr6.sin6_port);
sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0);
- if (CHECK(sk_fds->passive_fd == -1, "accept(srv_fd)", "%s (%d)\n",
- strerror(errno), errno))
+ if (!ASSERT_NEQ(sk_fds->passive_fd, -1, "accept(srv_fd)"))
goto error_close;
if (fast_open) {
@@ -161,8 +130,7 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
int ret;
ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in));
- if (CHECK(ret != sizeof(fast), "read fastopen syn data",
- "expected=%lu actual=%d\n", sizeof(fast), ret)) {
+ if (!ASSERT_EQ(ret, sizeof(fast), "read fastopen syn data")) {
close(sk_fds->passive_fd);
goto error_close;
}
@@ -183,8 +151,7 @@ static int check_hdr_opt(const struct bpf_test_option *exp,
const struct bpf_test_option *act,
const char *hdr_desc)
{
- if (CHECK(memcmp(exp, act, sizeof(*exp)),
- "expected-vs-actual", "unexpected %s\n", hdr_desc)) {
+ if (!ASSERT_EQ(memcmp(exp, act, sizeof(*exp)), 0, hdr_desc)) {
print_option(exp, "expected: ");
print_option(act, " actual: ");
return -1;
@@ -198,13 +165,11 @@ static int check_hdr_stg(const struct hdr_stg *exp, int fd,
{
struct hdr_stg act;
- if (CHECK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
- "map_lookup(hdr_stg_map_fd)", "%s %s (%d)\n",
- stg_desc, strerror(errno), errno))
+ if (!ASSERT_OK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
+ "map_lookup(hdr_stg_map_fd)"))
return -1;
- if (CHECK(memcmp(exp, &act, sizeof(*exp)),
- "expected-vs-actual", "unexpected %s\n", stg_desc)) {
+ if (!ASSERT_EQ(memcmp(exp, &act, sizeof(*exp)), 0, stg_desc)) {
print_hdr_stg(exp, "expected: ");
print_hdr_stg(&act, " actual: ");
return -1;
@@ -248,9 +213,8 @@ static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
if (sk_fds_shutdown(sk_fds))
goto check_linum;
- if (CHECK(expected_inherit_cb_flags != skel->bss->inherit_cb_flags,
- "Unexpected inherit_cb_flags", "0x%x != 0x%x\n",
- skel->bss->inherit_cb_flags, expected_inherit_cb_flags))
+ if (!ASSERT_EQ(expected_inherit_cb_flags, skel->bss->inherit_cb_flags,
+ "inherit_cb_flags"))
goto check_linum;
if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd,
@@ -277,7 +241,7 @@ static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
"active_fin_in");
check_linum:
- CHECK_FAIL(check_error_linum(sk_fds));
+ ASSERT_FALSE(check_error_linum(sk_fds), "check_error_linum");
sk_fds_close(sk_fds);
}
@@ -353,8 +317,7 @@ static void fastopen_estab(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, true)) {
@@ -384,7 +347,7 @@ static void syncookie_estab(void)
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.syncookie = true;
- exp_active_hdr_stg.resend_syn = true,
+ exp_active_hdr_stg.resend_syn = true;
prepare_out();
@@ -398,8 +361,7 @@ static void syncookie_estab(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -431,8 +393,7 @@ static void fin(void)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -471,8 +432,7 @@ static void __simple_estab(bool exprm)
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -509,8 +469,7 @@ static void misc(void)
return;
link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
- if (CHECK(IS_ERR(link), "attach_cgroup(misc_estab)", "err: %ld\n",
- PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_cgroup(misc_estab)"))
return;
if (sk_fds_connect(&sk_fds, false)) {
@@ -522,26 +481,20 @@ static void misc(void)
/* MSG_EOR to ensure skb will not be combined */
ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg),
MSG_EOR);
- if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n",
- ret))
+ if (!ASSERT_EQ(ret, sizeof(send_msg), "send(msg)"))
goto check_linum;
ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
- if (CHECK(ret != sizeof(send_msg), "read(msg)", "ret:%d\n",
- ret))
+ if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
goto check_linum;
}
if (sk_fds_shutdown(&sk_fds))
goto check_linum;
- CHECK(misc_skel->bss->nr_syn != 1, "unexpected nr_syn",
- "expected (1) != actual (%u)\n",
- misc_skel->bss->nr_syn);
+ ASSERT_EQ(misc_skel->bss->nr_syn, 1, "unexpected nr_syn");
- CHECK(misc_skel->bss->nr_data != nr_data, "unexpected nr_data",
- "expected (%u) != actual (%u)\n",
- nr_data, misc_skel->bss->nr_data);
+ ASSERT_EQ(misc_skel->bss->nr_data, nr_data, "unexpected nr_data");
/* The last ACK may have been delayed, so it is either 1 or 2. */
CHECK(misc_skel->bss->nr_pure_ack != 1 &&
@@ -550,12 +503,12 @@ static void misc(void)
"expected (1 or 2) != actual (%u)\n",
misc_skel->bss->nr_pure_ack);
- CHECK(misc_skel->bss->nr_fin != 1, "unexpected nr_fin",
- "expected (1) != actual (%u)\n",
- misc_skel->bss->nr_fin);
+ ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin");
+
+ ASSERT_EQ(misc_skel->bss->nr_hwtstamp, 0, "nr_hwtstamp");
check_linum:
- CHECK_FAIL(check_error_linum(&sk_fds));
+ ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum");
sk_fds_close(&sk_fds);
bpf_link__destroy(link);
}
@@ -580,15 +533,15 @@ void test_tcp_hdr_options(void)
int i;
skel = test_tcp_hdr_options__open_and_load();
- if (CHECK(!skel, "open and load skel", "failed"))
+ if (!ASSERT_OK_PTR(skel, "open and load skel"))
return;
misc_skel = test_misc_tcp_hdr_options__open_and_load();
- if (CHECK(!misc_skel, "open and load misc test skel", "failed"))
+ if (!ASSERT_OK_PTR(misc_skel, "open and load misc test skel"))
goto skel_destroy;
cg_fd = test__join_cgroup(CG_NAME);
- if (CHECK_FAIL(cg_fd < 0))
+ if (!ASSERT_GE(cg_fd, 0, "join_cgroup"))
goto skel_destroy;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
index d207e968e6b1..c38784c1c066 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
+#include "tcp_rtt.skel.h"
struct tcp_rtt_storage {
__u32 invoked;
@@ -9,14 +11,16 @@ struct tcp_rtt_storage {
__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
+
+ __u32 mrtt_us; /* args[0] */
+ __u32 srtt; /* args[1] */
};
static void send_byte(int fd)
{
char b = 0x55;
- if (CHECK_FAIL(write(fd, &b, sizeof(b)) != 1))
- perror("Failed to send single byte");
+ ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
}
static int wait_for_ack(int fd, int retries)
@@ -50,10 +54,8 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked,
int err = 0;
struct tcp_rtt_storage val;
- if (CHECK_FAIL(bpf_map_lookup_elem(map_fd, &client_fd, &val) < 0)) {
- perror("Failed to read socket storage");
+ if (!ASSERT_GE(bpf_map_lookup_elem(map_fd, &client_fd, &val), 0, "read socket storage"))
return -1;
- }
if (val.invoked != invoked) {
log_err("%s: unexpected bpf_tcp_sock.invoked %d != %d",
@@ -85,32 +87,35 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked,
err++;
}
+ /* Precise values of mrtt and srtt are unavailable, just make sure they are nonzero */
+ if (val.mrtt_us == 0) {
+ log_err("%s: unexpected bpf_tcp_sock.args[0] (mrtt_us) %u == 0", msg, val.mrtt_us);
+ err++;
+ }
+
+ if (val.srtt == 0) {
+ log_err("%s: unexpected bpf_tcp_sock.args[1] (srtt) %u == 0", msg, val.srtt);
+ err++;
+ }
+
return err;
}
static int run_test(int cgroup_fd, int server_fd)
{
- struct bpf_prog_load_attr attr = {
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- .file = "./tcp_rtt.o",
- .expected_attach_type = BPF_CGROUP_SOCK_OPS,
- };
- struct bpf_object *obj;
- struct bpf_map *map;
+ struct tcp_rtt *skel;
int client_fd;
int prog_fd;
int map_fd;
int err;
- err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
- if (err) {
- log_err("Failed to load BPF object");
+ skel = tcp_rtt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load"))
return -1;
- }
- map = bpf_map__next(NULL, obj);
- map_fd = bpf_map__fd(map);
+ map_fd = bpf_map__fd(skel->maps.socket_storage_map);
+ prog_fd = bpf_program__fd(skel->progs._sockops);
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
if (err) {
@@ -149,7 +154,7 @@ close_client_fd:
close(client_fd);
close_bpf_object:
- bpf_object__close(obj);
+ tcp_rtt__destroy(skel);
return err;
}
@@ -158,14 +163,14 @@ void test_tcp_rtt(void)
int server_fd, cgroup_fd;
cgroup_fd = test__join_cgroup("/tcp_rtt");
- if (CHECK_FAIL(cgroup_fd < 0))
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /tcp_rtt"))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
- if (CHECK_FAIL(server_fd < 0))
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
goto close_cgroup_fd;
- CHECK_FAIL(run_test(cgroup_fd, server_fd));
+ ASSERT_OK(run_test(cgroup_fd, server_fd), "run_test");
close(server_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
index 87923d2865b7..7e8fe1bad03f 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c
@@ -8,8 +8,6 @@
#define LO_ADDR6 "::1"
#define CG_NAME "/tcpbpf-user-test"
-static __u32 duration;
-
static void verify_result(struct tcpbpf_globals *result)
{
__u32 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
@@ -22,9 +20,7 @@ static void verify_result(struct tcpbpf_globals *result)
(1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
/* check global map */
- CHECK(expected_events != result->event_map, "event_map",
- "unexpected event_map: actual 0x%08x != expected 0x%08x\n",
- result->event_map, expected_events);
+ ASSERT_EQ(expected_events, result->event_map, "event_map");
ASSERT_EQ(result->bytes_received, 501, "bytes_received");
ASSERT_EQ(result->bytes_acked, 1002, "bytes_acked");
@@ -56,18 +52,15 @@ static void run_test(struct tcpbpf_globals *result)
int i, rv;
listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
- if (CHECK(listen_fd == -1, "start_server", "listen_fd:%d errno:%d\n",
- listen_fd, errno))
+ if (!ASSERT_NEQ(listen_fd, -1, "start_server"))
goto done;
cli_fd = connect_to_fd(listen_fd, 0);
- if (CHECK(cli_fd == -1, "connect_to_fd(listen_fd)",
- "cli_fd:%d errno:%d\n", cli_fd, errno))
+ if (!ASSERT_NEQ(cli_fd, -1, "connect_to_fd(listen_fd)"))
goto done;
accept_fd = accept(listen_fd, NULL, NULL);
- if (CHECK(accept_fd == -1, "accept(listen_fd)",
- "accept_fd:%d errno:%d\n", accept_fd, errno))
+ if (!ASSERT_NEQ(accept_fd, -1, "accept(listen_fd)"))
goto done;
/* Send 1000B of '+'s from cli_fd -> accept_fd */
@@ -75,11 +68,11 @@ static void run_test(struct tcpbpf_globals *result)
buf[i] = '+';
rv = send(cli_fd, buf, 1000, 0);
- if (CHECK(rv != 1000, "send(cli_fd)", "rv:%d errno:%d\n", rv, errno))
+ if (!ASSERT_EQ(rv, 1000, "send(cli_fd)"))
goto done;
rv = recv(accept_fd, buf, 1000, 0);
- if (CHECK(rv != 1000, "recv(accept_fd)", "rv:%d errno:%d\n", rv, errno))
+ if (!ASSERT_EQ(rv, 1000, "recv(accept_fd)"))
goto done;
/* Send 500B of '.'s from accept_fd ->cli_fd */
@@ -87,11 +80,11 @@ static void run_test(struct tcpbpf_globals *result)
buf[i] = '.';
rv = send(accept_fd, buf, 500, 0);
- if (CHECK(rv != 500, "send(accept_fd)", "rv:%d errno:%d\n", rv, errno))
+ if (!ASSERT_EQ(rv, 500, "send(accept_fd)"))
goto done;
rv = recv(cli_fd, buf, 500, 0);
- if (CHECK(rv != 500, "recv(cli_fd)", "rv:%d errno:%d\n", rv, errno))
+ if (!ASSERT_EQ(rv, 500, "recv(cli_fd)"))
goto done;
/*
@@ -100,12 +93,12 @@ static void run_test(struct tcpbpf_globals *result)
*/
shutdown(accept_fd, SHUT_WR);
err = recv(cli_fd, buf, 1, 0);
- if (CHECK(err, "recv(cli_fd) for fin", "err:%d errno:%d\n", err, errno))
+ if (!ASSERT_OK(err, "recv(cli_fd) for fin"))
goto done;
shutdown(cli_fd, SHUT_WR);
err = recv(accept_fd, buf, 1, 0);
- CHECK(err, "recv(accept_fd) for fin", "err:%d errno:%d\n", err, errno);
+ ASSERT_OK(err, "recv(accept_fd) for fin");
done:
if (accept_fd != -1)
close(accept_fd);
@@ -124,12 +117,11 @@ void test_tcpbpf_user(void)
int cg_fd = -1;
skel = test_tcpbpf_kern__open_and_load();
- if (CHECK(!skel, "open and load skel", "failed"))
+ if (!ASSERT_OK_PTR(skel, "open and load skel"))
return;
cg_fd = test__join_cgroup(CG_NAME);
- if (CHECK(cg_fd < 0, "test__join_cgroup(" CG_NAME ")",
- "cg_fd:%d errno:%d", cg_fd, errno))
+ if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup(" CG_NAME ")"))
goto err;
skel->links.bpf_testcb = bpf_program__attach_cgroup(skel->progs.bpf_testcb, cg_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c
new file mode 100644
index 000000000000..ccae0b31ac6c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <bpf/btf.h>
+#include <test_progs.h>
+
+#include "test_bpf_ma.skel.h"
+
+static void do_bpf_ma_test(const char *name)
+{
+ struct test_bpf_ma *skel;
+ struct bpf_program *prog;
+ struct btf *btf;
+ int i, err, id;
+ char tname[32];
+
+ skel = test_bpf_ma__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ return;
+
+ btf = bpf_object__btf(skel->obj);
+ if (!ASSERT_OK_PTR(btf, "btf"))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) {
+ snprintf(tname, sizeof(tname), "bin_data_%u", skel->rodata->data_sizes[i]);
+ id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (!ASSERT_GT(id, 0, tname))
+ goto out;
+ skel->rodata->data_btf_ids[i] = id;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(skel->rodata->percpu_data_sizes); i++) {
+ snprintf(tname, sizeof(tname), "percpu_bin_data_%u", skel->rodata->percpu_data_sizes[i]);
+ id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (!ASSERT_GT(id, 0, tname))
+ goto out;
+ skel->rodata->percpu_data_btf_ids[i] = id;
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, name);
+ if (!ASSERT_OK_PTR(prog, "invalid prog name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+
+ err = test_bpf_ma__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto out;
+
+ err = test_bpf_ma__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+ ASSERT_OK(skel->bss->err, "test error");
+out:
+ test_bpf_ma__destroy(skel);
+}
+
+void test_test_bpf_ma(void)
+{
+ if (test__start_subtest("batch_alloc_free"))
+ do_bpf_ma_test("test_batch_alloc_free");
+ if (test__start_subtest("free_through_map_free"))
+ do_bpf_ma_test("test_free_through_map_free");
+ if (test__start_subtest("batch_percpu_alloc_free"))
+ do_bpf_ma_test("test_batch_percpu_alloc_free");
+ if (test__start_subtest("percpu_free_through_map_free"))
+ do_bpf_ma_test("test_percpu_free_through_map_free");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
new file mode 100644
index 000000000000..de22734abc4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <linux/genetlink.h>
+#include "network_helpers.h"
+#include "bpf_smc.skel.h"
+
+#ifndef IPPROTO_SMC
+#define IPPROTO_SMC 256
+#endif
+
+#define CLIENT_IP "127.0.0.1"
+#define SERVER_IP "127.0.1.0"
+#define SERVER_IP_VIA_RISK_PATH "127.0.2.0"
+
+#define SERVICE_1 80
+#define SERVICE_2 443
+#define SERVICE_3 8443
+
+#define TEST_NS "bpf_smc_netns"
+
+static struct netns_obj *test_netns;
+
+struct smc_policy_ip_key {
+ __u32 sip;
+ __u32 dip;
+};
+
+struct smc_policy_ip_value {
+ __u8 mode;
+};
+
+#if defined(__s390x__)
+/* s390x has default seid */
+static bool setup_ueid(void) { return true; }
+static void cleanup_ueid(void) {}
+#else
+enum {
+ SMC_NETLINK_ADD_UEID = 10,
+ SMC_NETLINK_REMOVE_UEID
+};
+
+enum {
+ SMC_NLA_EID_TABLE_UNSPEC,
+ SMC_NLA_EID_TABLE_ENTRY, /* string */
+};
+
+struct msgtemplate {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[1024];
+};
+
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) ((len) - NLA_HDRLEN)
+
+#define SMC_GENL_FAMILY_NAME "SMC_GEN_NETLINK"
+#define SMC_BPFTEST_UEID "SMC-BPFTEST-UEID"
+
+static uint16_t smc_nl_family_id = -1;
+
+static int send_cmd(int fd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u16 nlmsg_flags, __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct nlattr *na;
+ struct sockaddr_nl nladdr;
+ int r, buflen;
+ char *buf;
+
+ struct msgtemplate msg = {0};
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = nlmsg_flags;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 1;
+ na = (struct nlattr *)GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *)&msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+
+ while ((r = sendto(fd, buf, buflen, 0, (struct sockaddr *)&nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static bool get_smc_nl_family_id(void)
+{
+ struct sockaddr_nl nl_src;
+ struct msgtemplate msg;
+ struct nlattr *nl;
+ int fd, ret;
+ pid_t pid;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (!ASSERT_OK_FD(fd, "nl_family socket"))
+ return false;
+
+ pid = getpid();
+
+ memset(&nl_src, 0, sizeof(nl_src));
+ nl_src.nl_family = AF_NETLINK;
+ nl_src.nl_pid = pid;
+
+ ret = bind(fd, (struct sockaddr *)&nl_src, sizeof(nl_src));
+ if (!ASSERT_OK(ret, "nl_family bind"))
+ goto fail;
+
+ ret = send_cmd(fd, GENL_ID_CTRL, pid,
+ NLM_F_REQUEST, CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)SMC_GENL_FAMILY_NAME,
+ strlen(SMC_GENL_FAMILY_NAME));
+ if (!ASSERT_OK(ret, "nl_family query"))
+ goto fail;
+
+ ret = recv(fd, &msg, sizeof(msg), 0);
+ if (!ASSERT_FALSE(msg.n.nlmsg_type == NLMSG_ERROR || ret < 0 ||
+ !NLMSG_OK(&msg.n, ret), "nl_family response"))
+ goto fail;
+
+ nl = (struct nlattr *)GENLMSG_DATA(&msg);
+ nl = (struct nlattr *)((char *)nl + NLA_ALIGN(nl->nla_len));
+ if (!ASSERT_EQ(nl->nla_type, CTRL_ATTR_FAMILY_ID, "nl_family nla type"))
+ goto fail;
+
+ smc_nl_family_id = *(uint16_t *)NLA_DATA(nl);
+ close(fd);
+ return true;
+fail:
+ close(fd);
+ return false;
+}
+
+static bool smc_ueid(int op)
+{
+ struct sockaddr_nl nl_src;
+ struct msgtemplate msg;
+ struct nlmsgerr *err;
+ char test_ueid[32];
+ int fd, ret;
+ pid_t pid;
+
+ /* UEID required */
+ memset(test_ueid, '\x20', sizeof(test_ueid));
+ memcpy(test_ueid, SMC_BPFTEST_UEID, strlen(SMC_BPFTEST_UEID));
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (!ASSERT_OK_FD(fd, "ueid socket"))
+ return false;
+
+ pid = getpid();
+ memset(&nl_src, 0, sizeof(nl_src));
+ nl_src.nl_family = AF_NETLINK;
+ nl_src.nl_pid = pid;
+
+ ret = bind(fd, (struct sockaddr *)&nl_src, sizeof(nl_src));
+ if (!ASSERT_OK(ret, "ueid bind"))
+ goto fail;
+
+ ret = send_cmd(fd, smc_nl_family_id, pid,
+ NLM_F_REQUEST | NLM_F_ACK, op, SMC_NLA_EID_TABLE_ENTRY,
+ (void *)test_ueid, sizeof(test_ueid));
+ if (!ASSERT_OK(ret, "ueid cmd"))
+ goto fail;
+
+ ret = recv(fd, &msg, sizeof(msg), 0);
+ if (!ASSERT_FALSE(ret < 0 ||
+ !NLMSG_OK(&msg.n, ret), "ueid response"))
+ goto fail;
+
+ if (msg.n.nlmsg_type == NLMSG_ERROR) {
+ err = NLMSG_DATA(&msg);
+ switch (op) {
+ case SMC_NETLINK_REMOVE_UEID:
+ if (!ASSERT_FALSE((err->error && err->error != -ENOENT),
+ "ueid remove"))
+ goto fail;
+ break;
+ case SMC_NETLINK_ADD_UEID:
+ if (!ASSERT_OK(err->error, "ueid add"))
+ goto fail;
+ break;
+ default:
+ break;
+ }
+ }
+ close(fd);
+ return true;
+fail:
+ close(fd);
+ return false;
+}
+
+static bool setup_ueid(void)
+{
+ /* get smc nl id */
+ if (!get_smc_nl_family_id())
+ return false;
+ /* clear old ueid for bpftest */
+ smc_ueid(SMC_NETLINK_REMOVE_UEID);
+ /* smc-loopback required ueid */
+ return smc_ueid(SMC_NETLINK_ADD_UEID);
+}
+
+static void cleanup_ueid(void)
+{
+ smc_ueid(SMC_NETLINK_REMOVE_UEID);
+}
+#endif /* __s390x__ */
+
+static bool setup_netns(void)
+{
+ test_netns = netns_new(TEST_NS, true);
+ if (!ASSERT_OK_PTR(test_netns, "open net namespace"))
+ goto fail_netns;
+
+ SYS(fail_ip, "ip addr add 127.0.1.0/8 dev lo");
+ SYS(fail_ip, "ip addr add 127.0.2.0/8 dev lo");
+
+ return true;
+fail_ip:
+ netns_free(test_netns);
+fail_netns:
+ return false;
+}
+
+static void cleanup_netns(void)
+{
+ netns_free(test_netns);
+}
+
+static bool setup_smc(void)
+{
+ if (!setup_ueid())
+ return false;
+
+ if (!setup_netns())
+ goto fail_netns;
+
+ return true;
+fail_netns:
+ cleanup_ueid();
+ return false;
+}
+
+static int set_client_addr_cb(int fd, void *opts)
+{
+ const char *src = (const char *)opts;
+ struct sockaddr_in localaddr;
+
+ localaddr.sin_family = AF_INET;
+ localaddr.sin_port = htons(0);
+ localaddr.sin_addr.s_addr = inet_addr(src);
+ return !ASSERT_OK(bind(fd, &localaddr, sizeof(localaddr)), "client bind");
+}
+
+static void run_link(const char *src, const char *dst, int port)
+{
+ struct network_helper_opts opts = {0};
+ int server, client;
+
+ server = start_server_str(AF_INET, SOCK_STREAM, dst, port, NULL);
+ if (!ASSERT_OK_FD(server, "start service_1"))
+ return;
+
+ opts.proto = IPPROTO_TCP;
+ opts.post_socket_cb = set_client_addr_cb;
+ opts.cb_opts = (void *)src;
+
+ client = connect_to_fd_opts(server, &opts);
+ if (!ASSERT_OK_FD(client, "start connect"))
+ goto fail_client;
+
+ close(client);
+fail_client:
+ close(server);
+}
+
+static void block_link(int map_fd, const char *src, const char *dst)
+{
+ struct smc_policy_ip_value val = { .mode = /* block */ 0 };
+ struct smc_policy_ip_key key = {
+ .sip = inet_addr(src),
+ .dip = inet_addr(dst),
+ };
+
+ bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+}
+
+/*
+ * This test describes a real-life service topology as follows:
+ *
+ * +-------------> service_1
+ * link 1 | |
+ * +--------------------> server | link 2
+ * | | V
+ * | +-------------> service_2
+ * | link 3
+ * client -------------------> server_via_unsafe_path -> service_3
+ *
+ * Among them,
+ * 1. link-1 is very suitable for using SMC.
+ * 2. link-2 is not suitable for using SMC, because the mode of this link is
+ * kind of short-link services.
+ * 3. link-3 is also not suitable for using SMC, because the RDMA link is
+ * unavailable and needs to go through a long timeout before it can fallback
+ * to TCP.
+ * To achieve this goal, we use a customized SMC ip strategy via smc_hs_ctrl.
+ */
+static void test_topo(void)
+{
+ struct bpf_smc *skel;
+ int rc, map_fd;
+
+ skel = bpf_smc__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_smc__open_and_load"))
+ return;
+
+ rc = bpf_smc__attach(skel);
+ if (!ASSERT_OK(rc, "bpf_smc__attach"))
+ goto fail;
+
+ map_fd = bpf_map__fd(skel->maps.smc_policy_ip);
+ if (!ASSERT_OK_FD(map_fd, "bpf_map__fd"))
+ goto fail;
+
+ /* Mock the process of transparent replacement, since we will modify
+ * protocol to ipproto_smc accropding to it via
+ * fmod_ret/update_socket_protocol.
+ */
+ write_sysctl("/proc/sys/net/smc/hs_ctrl", "linkcheck");
+
+ /* Configure ip strat */
+ block_link(map_fd, CLIENT_IP, SERVER_IP_VIA_RISK_PATH);
+ block_link(map_fd, SERVER_IP, SERVER_IP);
+
+ /* should go with smc */
+ run_link(CLIENT_IP, SERVER_IP, SERVICE_1);
+ /* should go with smc fallback */
+ run_link(SERVER_IP, SERVER_IP, SERVICE_2);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 2, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 1, "fallback count");
+
+ /* should go with smc */
+ run_link(CLIENT_IP, SERVER_IP, SERVICE_2);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 3, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 1, "fallback count");
+
+ /* should go with smc fallback */
+ run_link(CLIENT_IP, SERVER_IP_VIA_RISK_PATH, SERVICE_3);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 4, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 2, "fallback count");
+
+fail:
+ bpf_smc__destroy(skel);
+}
+
+void test_bpf_smc(void)
+{
+ if (!setup_smc()) {
+ printf("setup for smc test failed, test SKIP:\n");
+ test__skip();
+ return;
+ }
+
+ if (test__start_subtest("topo"))
+ test_topo();
+
+ cleanup_ueid();
+ cleanup_netns();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
new file mode 100644
index 000000000000..1750c29b94f8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2022 Sony Group Corporation */
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <test_progs.h>
+#include "bpf_syscall_macro.skel.h"
+
+void test_bpf_syscall_macro(void)
+{
+ struct bpf_syscall_macro *skel = NULL;
+ int err;
+ int exp_arg1 = 1001;
+ unsigned long exp_arg2 = 12;
+ unsigned long exp_arg3 = 13;
+ unsigned long exp_arg4 = 14;
+ unsigned long exp_arg5 = 15;
+ loff_t off_in, off_out;
+ ssize_t r;
+
+ /* check whether it can open program */
+ skel = bpf_syscall_macro__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open"))
+ return;
+
+ skel->rodata->filter_pid = getpid();
+
+ /* check whether it can load program */
+ err = bpf_syscall_macro__load(skel);
+ if (!ASSERT_OK(err, "bpf_syscall_macro__load"))
+ goto cleanup;
+
+ /* check whether it can attach kprobe */
+ err = bpf_syscall_macro__attach(skel);
+ if (!ASSERT_OK(err, "bpf_syscall_macro__attach"))
+ goto cleanup;
+
+ /* check whether args of syscall are copied correctly */
+ prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5);
+
+ ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
+ ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2");
+ ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3");
+ /* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */
+#ifdef __x86_64__
+ ASSERT_NEQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx");
+#else
+ ASSERT_EQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx");
+#endif
+ ASSERT_EQ(skel->bss->arg4, exp_arg4, "syscall_arg4");
+ ASSERT_EQ(skel->bss->arg5, exp_arg5, "syscall_arg5");
+
+ /* check whether args of syscall are copied correctly for CORE variants */
+ ASSERT_EQ(skel->bss->arg1_core, exp_arg1, "syscall_arg1_core_variant");
+ ASSERT_EQ(skel->bss->arg2_core, exp_arg2, "syscall_arg2_core_variant");
+ ASSERT_EQ(skel->bss->arg3_core, exp_arg3, "syscall_arg3_core_variant");
+ /* it cannot copy arg4 when uses PT_REGS_PARM4_CORE on x86_64 */
+#ifdef __x86_64__
+ ASSERT_NEQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant");
+#else
+ ASSERT_EQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant");
+#endif
+ ASSERT_EQ(skel->bss->arg4_core, exp_arg4, "syscall_arg4_core_variant");
+ ASSERT_EQ(skel->bss->arg5_core, exp_arg5, "syscall_arg5_core_variant");
+
+ ASSERT_EQ(skel->bss->option_syscall, exp_arg1, "BPF_KPROBE_SYSCALL_option");
+ ASSERT_EQ(skel->bss->arg2_syscall, exp_arg2, "BPF_KPROBE_SYSCALL_arg2");
+ ASSERT_EQ(skel->bss->arg3_syscall, exp_arg3, "BPF_KPROBE_SYSCALL_arg3");
+ ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4");
+ ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5");
+
+ r = splice(-42, &off_in, 42, &off_out, 0x12340000, SPLICE_F_NONBLOCK);
+ err = -errno;
+ ASSERT_EQ(r, -1, "splice_res");
+ ASSERT_EQ(err, -EBADF, "splice_err");
+
+ ASSERT_EQ(skel->bss->splice_fd_in, -42, "splice_arg1");
+ ASSERT_EQ(skel->bss->splice_off_in, (__u64)&off_in, "splice_arg2");
+ ASSERT_EQ(skel->bss->splice_fd_out, 42, "splice_arg3");
+ ASSERT_EQ(skel->bss->splice_off_out, (__u64)&off_out, "splice_arg4");
+ ASSERT_EQ(skel->bss->splice_len, 0x12340000, "splice_arg5");
+ ASSERT_EQ(skel->bss->splice_flags, SPLICE_F_NONBLOCK, "splice_arg6");
+
+cleanup:
+ bpf_syscall_macro__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
index 172c999e523c..ea933fd151c3 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
+#include <stdio.h>
#include <sched.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <test_progs.h>
-#define TDIR "/sys/kernel/debug"
+/* TDIR must be in a location we can create a directory in. */
+#define TDIR "/tmp/test_bpffs_testdir"
static int read_iter(char *file)
{
@@ -18,61 +20,129 @@ static int read_iter(char *file)
fd = open(file, 0);
if (fd < 0)
return -1;
- while ((len = read(fd, buf, sizeof(buf))) > 0)
+ while ((len = read(fd, buf, sizeof(buf))) > 0) {
+ buf[sizeof(buf) - 1] = '\0';
if (strstr(buf, "iter")) {
close(fd);
return 0;
}
+ }
close(fd);
return -1;
}
static int fn(void)
{
- int err, duration = 0;
+ struct stat a, b, c;
+ int err, map;
err = unshare(CLONE_NEWNS);
- if (CHECK(err, "unshare", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "unshare"))
goto out;
err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL);
- if (CHECK(err, "mount /", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount /"))
goto out;
- err = umount(TDIR);
- if (CHECK(err, "umount " TDIR, "failed: %d\n", errno))
+ err = mkdir(TDIR, 0777);
+ /* If the directory already exists we can carry on. It may be left over
+ * from a previous run.
+ */
+ if ((err && errno != EEXIST) && !ASSERT_OK(err, "mkdir " TDIR))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
- if (CHECK(err, "mount", "mount root failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount tmpfs"))
goto out;
err = mkdir(TDIR "/fs1", 0777);
- if (CHECK(err, "mkdir "TDIR"/fs1", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mkdir " TDIR "/fs1"))
goto out;
err = mkdir(TDIR "/fs2", 0777);
- if (CHECK(err, "mkdir "TDIR"/fs2", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mkdir " TDIR "/fs2"))
goto out;
err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL);
- if (CHECK(err, "mount bpffs "TDIR"/fs1", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs1"))
goto out;
err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL);
- if (CHECK(err, "mount bpffs " TDIR "/fs2", "failed: %d\n", errno))
+ if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs2"))
goto out;
err = read_iter(TDIR "/fs1/maps.debug");
- if (CHECK(err, "reading " TDIR "/fs1/maps.debug", "failed\n"))
+ if (!ASSERT_OK(err, "reading " TDIR "/fs1/maps.debug"))
goto out;
err = read_iter(TDIR "/fs2/progs.debug");
- if (CHECK(err, "reading " TDIR "/fs2/progs.debug", "failed\n"))
+ if (!ASSERT_OK(err, "reading " TDIR "/fs2/progs.debug"))
goto out;
+
+ err = mkdir(TDIR "/fs1/a", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/a"))
+ goto out;
+ err = mkdir(TDIR "/fs1/a/1", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/a/1"))
+ goto out;
+ err = mkdir(TDIR "/fs1/b", 0777);
+ if (!ASSERT_OK(err, "creating " TDIR "/fs1/b"))
+ goto out;
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL);
+ if (!ASSERT_GT(map, 0, "create_map(ARRAY)"))
+ goto out;
+ err = bpf_obj_pin(map, TDIR "/fs1/c");
+ if (!ASSERT_OK(err, "pin map"))
+ goto out;
+ close(map);
+
+ /* Check that RENAME_EXCHANGE works for directories. */
+ err = stat(TDIR "/fs1/a", &a);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/a)"))
+ goto out;
+ err = renameat2(0, TDIR "/fs1/a", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "renameat2(/fs1/a, /fs1/b, RENAME_EXCHANGE)"))
+ goto out;
+ err = stat(TDIR "/fs1/b", &b);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
+ goto out;
+ if (!ASSERT_EQ(a.st_ino, b.st_ino, "b should have a's inode"))
+ goto out;
+ err = access(TDIR "/fs1/b/1", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/b/1)"))
+ goto out;
+
+ /* Check that RENAME_EXCHANGE works for mixed file types. */
+ err = stat(TDIR "/fs1/c", &c);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/map)"))
+ goto out;
+ err = renameat2(0, TDIR "/fs1/c", 0, TDIR "/fs1/b", RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "renameat2(/fs1/c, /fs1/b, RENAME_EXCHANGE)"))
+ goto out;
+ err = stat(TDIR "/fs1/b", &b);
+ if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)"))
+ goto out;
+ if (!ASSERT_EQ(c.st_ino, b.st_ino, "b should have c's inode"))
+ goto out;
+ err = access(TDIR "/fs1/c/1", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/c/1)"))
+ goto out;
+
+ /* Check that RENAME_NOREPLACE works. */
+ err = renameat2(0, TDIR "/fs1/b", 0, TDIR "/fs1/a", RENAME_NOREPLACE);
+ if (!ASSERT_ERR(err, "renameat2(RENAME_NOREPLACE)")) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = access(TDIR "/fs1/b", F_OK);
+ if (!ASSERT_OK(err, "access(" TDIR "/fs1/b)"))
+ goto out;
+
out:
umount(TDIR "/fs1");
umount(TDIR "/fs2");
rmdir(TDIR "/fs1");
rmdir(TDIR "/fs2");
umount(TDIR);
+ rmdir(TDIR);
exit(err);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
index 2559bb775762..9c0200c132d9 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
@@ -9,18 +9,10 @@
#include "bprm_opts.skel.h"
#include "network_helpers.h"
-
-#ifndef __NR_pidfd_open
-#define __NR_pidfd_open 434
-#endif
+#include "task_local_storage_helpers.h"
static const char * const bash_envp[] = { "TMPDIR=shouldnotbeset", NULL };
-static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(__NR_pidfd_open, pid, flags);
-}
-
static int update_storage(int map_fd, int secureexec)
{
int task_fd, ret = 0;
@@ -59,7 +51,7 @@ static int run_set_secureexec(int map_fd, int secureexec)
exit(ret);
/* If the binary is executed with securexec=1, the dynamic
- * loader ingores and unsets certain variables like LD_PRELOAD,
+ * loader ignores and unsets certain variables like LD_PRELOAD,
* TMPDIR etc. TMPDIR is used here to simplify the example, as
* LD_PRELOAD requires a real .so file.
*
diff --git a/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c
new file mode 100644
index 000000000000..7d1b478c99a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms Inc. */
+#include <test_progs.h>
+#include "test_btf_ext.skel.h"
+#include "btf_helpers.h"
+
+static void subtest_line_func_info(void)
+{
+ struct test_btf_ext *skel;
+ struct bpf_prog_info info;
+ struct bpf_line_info line_info[128], *libbpf_line_info;
+ struct bpf_func_info func_info[128], *libbpf_func_info;
+ __u32 info_len = sizeof(info), libbbpf_line_info_cnt, libbbpf_func_info_cnt;
+ int err, fd;
+
+ skel = test_btf_ext__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ fd = bpf_program__fd(skel->progs.global_func);
+
+ memset(&info, 0, sizeof(info));
+ info.line_info = ptr_to_u64(&line_info);
+ info.nr_line_info = sizeof(line_info);
+ info.line_info_rec_size = sizeof(*line_info);
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "prog_line_info"))
+ goto out;
+
+ libbpf_line_info = bpf_program__line_info(skel->progs.global_func);
+ libbbpf_line_info_cnt = bpf_program__line_info_cnt(skel->progs.global_func);
+
+ memset(&info, 0, sizeof(info));
+ info.func_info = ptr_to_u64(&func_info);
+ info.nr_func_info = sizeof(func_info);
+ info.func_info_rec_size = sizeof(*func_info);
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "prog_func_info"))
+ goto out;
+
+ libbpf_func_info = bpf_program__func_info(skel->progs.global_func);
+ libbbpf_func_info_cnt = bpf_program__func_info_cnt(skel->progs.global_func);
+
+ if (!ASSERT_OK_PTR(libbpf_line_info, "bpf_program__line_info"))
+ goto out;
+ if (!ASSERT_EQ(libbbpf_line_info_cnt, info.nr_line_info, "line_info_cnt"))
+ goto out;
+ if (!ASSERT_OK_PTR(libbpf_func_info, "bpf_program__func_info"))
+ goto out;
+ if (!ASSERT_EQ(libbbpf_func_info_cnt, info.nr_func_info, "func_info_cnt"))
+ goto out;
+ ASSERT_MEMEQ(libbpf_line_info, line_info, libbbpf_line_info_cnt * sizeof(*line_info),
+ "line_info");
+ ASSERT_MEMEQ(libbpf_func_info, func_info, libbbpf_func_info_cnt * sizeof(*func_info),
+ "func_info");
+out:
+ test_btf_ext__destroy(skel);
+}
+
+void test_btf_ext(void)
+{
+ if (test__start_subtest("line_func_info"))
+ subtest_line_func_info();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_csum_diff.c b/tools/testing/selftests/bpf/prog_tests/test_csum_diff.c
new file mode 100644
index 000000000000..107b20d43e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_csum_diff.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates */
+#include <test_progs.h>
+#include "csum_diff_test.skel.h"
+
+#define BUFF_SZ 512
+
+struct testcase {
+ unsigned long long to_buff[BUFF_SZ / 8];
+ unsigned int to_buff_len;
+ unsigned long long from_buff[BUFF_SZ / 8];
+ unsigned int from_buff_len;
+ unsigned short seed;
+ unsigned short result;
+};
+
+#define NUM_PUSH_TESTS 4
+
+struct testcase push_tests[NUM_PUSH_TESTS] = {
+ {
+ .to_buff = {
+ 0xdeadbeefdeadbeef,
+ },
+ .to_buff_len = 8,
+ .from_buff = {},
+ .from_buff_len = 0,
+ .seed = 0,
+ .result = 0x3b3b
+ },
+ {
+ .to_buff = {
+ 0xdeadbeefdeadbeef,
+ 0xbeefdeadbeefdead,
+ },
+ .to_buff_len = 16,
+ .from_buff = {},
+ .from_buff_len = 0,
+ .seed = 0x1234,
+ .result = 0x88aa
+ },
+ {
+ .to_buff = {
+ 0xdeadbeefdeadbeef,
+ 0xbeefdeadbeefdead,
+ },
+ .to_buff_len = 15,
+ .from_buff = {},
+ .from_buff_len = 0,
+ .seed = 0x1234,
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ .result = 0xcaa9
+#else
+ .result = 0x87fd
+#endif
+ },
+ {
+ .to_buff = {
+ 0x327b23c66b8b4567,
+ 0x66334873643c9869,
+ 0x19495cff74b0dc51,
+ 0x625558ec2ae8944a,
+ 0x46e87ccd238e1f29,
+ 0x507ed7ab3d1b58ba,
+ 0x41b71efb2eb141f2,
+ 0x7545e14679e2a9e3,
+ 0x5bd062c2515f007c,
+ 0x4db127f812200854,
+ 0x1f16e9e80216231b,
+ 0x66ef438d1190cde7,
+ 0x3352255a140e0f76,
+ 0x0ded7263109cf92e,
+ 0x1befd79f7fdcc233,
+ 0x6b68079a41a7c4c9,
+ 0x25e45d324e6afb66,
+ 0x431bd7b7519b500d,
+ 0x7c83e4583f2dba31,
+ 0x62bbd95a257130a3,
+ 0x628c895d436c6125,
+ 0x721da317333ab105,
+ 0x2d1d5ae92443a858,
+ 0x75a2a8d46763845e,
+ 0x79838cb208edbdab,
+ 0x0b03e0c64353d0cd,
+ 0x54e49eb4189a769b,
+ 0x2ca8861171f32454,
+ 0x02901d820836c40e,
+ 0x081386413a95f874,
+ 0x7c3dbd3d1e7ff521,
+ 0x6ceaf087737b8ddc,
+ 0x4516dde922221a70,
+ 0x614fd4a13006c83e,
+ 0x5577f8e1419ac241,
+ 0x05072367440badfc,
+ 0x77465f013804823e,
+ 0x5c482a977724c67e,
+ 0x5e884adc2463b9ea,
+ 0x2d51779651ead36b,
+ 0x153ea438580bd78f,
+ 0x70a64e2a3855585c,
+ 0x2a487cb06a2342ec,
+ 0x725a06fb1d4ed43b,
+ 0x57e4ccaf2cd89a32,
+ 0x4b588f547a6d8d3c,
+ 0x6de91b18542289ec,
+ 0x7644a45c38437fdb,
+ 0x684a481a32fff902,
+ 0x749abb43579478fe,
+ 0x1ba026fa3dc240fb,
+ 0x75c6c33a79a1deaa,
+ 0x70c6a52912e685fb,
+ 0x374a3fe6520eedd1,
+ 0x23f9c13c4f4ef005,
+ 0x275ac794649bb77c,
+ 0x1cf10fd839386575,
+ 0x235ba861180115be,
+ 0x354fe9f947398c89,
+ 0x741226bb15b5af5c,
+ 0x10233c990d34b6a8,
+ 0x615740953f6ab60f,
+ 0x77ae35eb7e0c57b1,
+ 0x310c50b3579be4f1,
+ },
+ .to_buff_len = 512,
+ .from_buff = {},
+ .from_buff_len = 0,
+ .seed = 0xffff,
+ .result = 0xca45
+ },
+};
+
+#define NUM_PULL_TESTS 4
+
+struct testcase pull_tests[NUM_PULL_TESTS] = {
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef,
+ },
+ .from_buff_len = 8,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0,
+ .result = 0xc4c4
+ },
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef,
+ 0xbeefdeadbeefdead,
+ },
+ .from_buff_len = 16,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0x1234,
+ .result = 0x9bbd
+ },
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef,
+ 0xbeefdeadbeefdead,
+ },
+ .from_buff_len = 15,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0x1234,
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ .result = 0x59be
+#else
+ .result = 0x9c6a
+#endif
+ },
+ {
+ .from_buff = {
+ 0x327b23c66b8b4567,
+ 0x66334873643c9869,
+ 0x19495cff74b0dc51,
+ 0x625558ec2ae8944a,
+ 0x46e87ccd238e1f29,
+ 0x507ed7ab3d1b58ba,
+ 0x41b71efb2eb141f2,
+ 0x7545e14679e2a9e3,
+ 0x5bd062c2515f007c,
+ 0x4db127f812200854,
+ 0x1f16e9e80216231b,
+ 0x66ef438d1190cde7,
+ 0x3352255a140e0f76,
+ 0x0ded7263109cf92e,
+ 0x1befd79f7fdcc233,
+ 0x6b68079a41a7c4c9,
+ 0x25e45d324e6afb66,
+ 0x431bd7b7519b500d,
+ 0x7c83e4583f2dba31,
+ 0x62bbd95a257130a3,
+ 0x628c895d436c6125,
+ 0x721da317333ab105,
+ 0x2d1d5ae92443a858,
+ 0x75a2a8d46763845e,
+ 0x79838cb208edbdab,
+ 0x0b03e0c64353d0cd,
+ 0x54e49eb4189a769b,
+ 0x2ca8861171f32454,
+ 0x02901d820836c40e,
+ 0x081386413a95f874,
+ 0x7c3dbd3d1e7ff521,
+ 0x6ceaf087737b8ddc,
+ 0x4516dde922221a70,
+ 0x614fd4a13006c83e,
+ 0x5577f8e1419ac241,
+ 0x05072367440badfc,
+ 0x77465f013804823e,
+ 0x5c482a977724c67e,
+ 0x5e884adc2463b9ea,
+ 0x2d51779651ead36b,
+ 0x153ea438580bd78f,
+ 0x70a64e2a3855585c,
+ 0x2a487cb06a2342ec,
+ 0x725a06fb1d4ed43b,
+ 0x57e4ccaf2cd89a32,
+ 0x4b588f547a6d8d3c,
+ 0x6de91b18542289ec,
+ 0x7644a45c38437fdb,
+ 0x684a481a32fff902,
+ 0x749abb43579478fe,
+ 0x1ba026fa3dc240fb,
+ 0x75c6c33a79a1deaa,
+ 0x70c6a52912e685fb,
+ 0x374a3fe6520eedd1,
+ 0x23f9c13c4f4ef005,
+ 0x275ac794649bb77c,
+ 0x1cf10fd839386575,
+ 0x235ba861180115be,
+ 0x354fe9f947398c89,
+ 0x741226bb15b5af5c,
+ 0x10233c990d34b6a8,
+ 0x615740953f6ab60f,
+ 0x77ae35eb7e0c57b1,
+ 0x310c50b3579be4f1,
+ },
+ .from_buff_len = 512,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0xffff,
+ .result = 0x35ba
+ },
+};
+
+#define NUM_DIFF_TESTS 4
+
+struct testcase diff_tests[NUM_DIFF_TESTS] = {
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef,
+ },
+ .from_buff_len = 8,
+ .to_buff = {
+ 0xabababababababab,
+ },
+ .to_buff_len = 8,
+ .seed = 0,
+ .result = 0x7373
+ },
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef,
+ },
+ .from_buff_len = 7,
+ .to_buff = {
+ 0xabababababababab,
+ },
+ .to_buff_len = 7,
+ .seed = 0,
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ .result = 0xa673
+#else
+ .result = 0x73b7
+#endif
+ },
+ {
+ .from_buff = {
+ 0,
+ },
+ .from_buff_len = 8,
+ .to_buff = {
+ 0xabababababababab,
+ },
+ .to_buff_len = 8,
+ .seed = 0,
+ .result = 0xaeae
+ },
+ {
+ .from_buff = {
+ 0xdeadbeefdeadbeef
+ },
+ .from_buff_len = 8,
+ .to_buff = {
+ 0,
+ },
+ .to_buff_len = 8,
+ .seed = 0xffff,
+ .result = 0xc4c4
+ },
+};
+
+#define NUM_EDGE_TESTS 4
+
+struct testcase edge_tests[NUM_EDGE_TESTS] = {
+ {
+ .from_buff = {},
+ .from_buff_len = 0,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0,
+ .result = 0
+ },
+ {
+ .from_buff = {
+ 0x1234
+ },
+ .from_buff_len = 0,
+ .to_buff = {
+ 0x1234
+ },
+ .to_buff_len = 0,
+ .seed = 0,
+ .result = 0
+ },
+ {
+ .from_buff = {},
+ .from_buff_len = 0,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0x1234,
+ .result = 0x1234
+ },
+ {
+ .from_buff = {},
+ .from_buff_len = 512,
+ .to_buff = {},
+ .to_buff_len = 0,
+ .seed = 0xffff,
+ .result = 0xffff
+ },
+};
+
+static unsigned short trigger_csum_diff(const struct csum_diff_test *skel)
+{
+ u8 tmp_out[64 << 2] = {};
+ u8 tmp_in[64] = {};
+ int err;
+ int pfd;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = tmp_in,
+ .data_size_in = sizeof(tmp_in),
+ .data_out = tmp_out,
+ .data_size_out = sizeof(tmp_out),
+ .repeat = 1,
+ );
+ pfd = bpf_program__fd(skel->progs.compute_checksum);
+ err = bpf_prog_test_run_opts(pfd, &topts);
+ if (err)
+ return -1;
+
+ return skel->bss->result;
+}
+
+static void test_csum_diff(struct testcase *tests, int num_tests)
+{
+ struct csum_diff_test *skel;
+ unsigned short got;
+ int err;
+
+ for (int i = 0; i < num_tests; i++) {
+ skel = csum_diff_test__open();
+ if (!ASSERT_OK_PTR(skel, "csum_diff_test open"))
+ return;
+
+ skel->rodata->to_buff_len = tests[i].to_buff_len;
+ skel->rodata->from_buff_len = tests[i].from_buff_len;
+
+ err = csum_diff_test__load(skel);
+ if (!ASSERT_EQ(err, 0, "csum_diff_test load"))
+ goto out;
+
+ memcpy(skel->bss->to_buff, tests[i].to_buff, tests[i].to_buff_len);
+ memcpy(skel->bss->from_buff, tests[i].from_buff, tests[i].from_buff_len);
+ skel->bss->seed = tests[i].seed;
+
+ got = trigger_csum_diff(skel);
+ ASSERT_EQ(got, tests[i].result, "csum_diff result");
+
+ csum_diff_test__destroy(skel);
+ }
+
+ return;
+out:
+ csum_diff_test__destroy(skel);
+}
+
+void test_test_csum_diff(void)
+{
+ if (test__start_subtest("csum_diff_push"))
+ test_csum_diff(push_tests, NUM_PUSH_TESTS);
+ if (test__start_subtest("csum_diff_pull"))
+ test_csum_diff(pull_tests, NUM_PULL_TESTS);
+ if (test__start_subtest("csum_diff_diff"))
+ test_csum_diff(diff_tests, NUM_DIFF_TESTS);
+ if (test__start_subtest("csum_diff_edge"))
+ test_csum_diff(edge_tests, NUM_EDGE_TESTS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
index 32e4348b714b..e905cbaf6b3d 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
@@ -1,83 +1,162 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
+#include "test_global_func1.skel.h"
+#include "test_global_func2.skel.h"
+#include "test_global_func3.skel.h"
+#include "test_global_func4.skel.h"
+#include "test_global_func5.skel.h"
+#include "test_global_func6.skel.h"
+#include "test_global_func7.skel.h"
+#include "test_global_func8.skel.h"
+#include "test_global_func9.skel.h"
+#include "test_global_func10.skel.h"
+#include "test_global_func11.skel.h"
+#include "test_global_func12.skel.h"
+#include "test_global_func13.skel.h"
+#include "test_global_func14.skel.h"
+#include "test_global_func15.skel.h"
+#include "test_global_func16.skel.h"
+#include "test_global_func17.skel.h"
+#include "test_global_func_ctx_args.skel.h"
-const char *err_str;
-bool found;
+#include "bpf/libbpf_internal.h"
+#include "btf_helpers.h"
-static int libbpf_debug_print(enum libbpf_print_level level,
- const char *format, va_list args)
+static void check_ctx_arg_type(const struct btf *btf, const struct btf_param *p)
{
- char *log_buf;
+ const struct btf_type *t;
+ const char *s;
- if (level != LIBBPF_WARN ||
- strcmp(format, "libbpf: \n%s\n")) {
- vprintf(format, args);
- return 0;
+ t = btf__type_by_id(btf, p->type);
+ if (!ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_t"))
+ return;
+
+ s = btf_type_raw_dump(btf, t->type);
+ if (!ASSERT_HAS_SUBSTR(s, "STRUCT 'bpf_perf_event_data' size=0 vlen=0",
+ "ctx_struct_t"))
+ return;
+}
+
+static void subtest_ctx_arg_rewrite(void)
+{
+ struct test_global_func_ctx_args *skel = NULL;
+ struct bpf_prog_info info;
+ char func_info_buf[1024] __attribute__((aligned(8)));
+ struct bpf_func_info_min *rec;
+ struct btf *btf = NULL;
+ __u32 info_len = sizeof(info);
+ int err, fd, i;
+ struct btf *kern_btf = NULL;
+
+ kern_btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK_PTR(kern_btf, "kern_btf_load"))
+ return;
+
+ /* simple detection of kernel native arg:ctx tag support */
+ if (btf__find_by_name_kind(kern_btf, "bpf_subprog_arg_info", BTF_KIND_STRUCT) > 0) {
+ test__skip();
+ btf__free(kern_btf);
+ return;
}
+ btf__free(kern_btf);
+
+ skel = test_global_func_ctx_args__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
- log_buf = va_arg(args, char *);
- if (!log_buf)
+ bpf_program__set_autoload(skel->progs.arg_tag_ctx_perf, true);
+
+ err = test_global_func_ctx_args__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
goto out;
- if (err_str && strstr(log_buf, err_str) == 0)
- found = true;
-out:
- printf(format, log_buf);
- return 0;
-}
-extern int extra_prog_load_log_flags;
+ memset(&info, 0, sizeof(info));
+ info.func_info = ptr_to_u64(&func_info_buf);
+ info.nr_func_info = 3;
+ info.func_info_rec_size = sizeof(struct bpf_func_info_min);
-static int check_load(const char *file)
-{
- struct bpf_prog_load_attr attr;
- struct bpf_object *obj = NULL;
- int err, prog_fd;
-
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = BPF_PROG_TYPE_UNSPEC;
- attr.log_level = extra_prog_load_log_flags;
- attr.prog_flags = BPF_F_TEST_RND_HI32;
- found = false;
- err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
- bpf_object__close(obj);
- return err;
-}
+ fd = bpf_program__fd(skel->progs.arg_tag_ctx_perf);
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "prog_info"))
+ goto out;
+
+ if (!ASSERT_EQ(info.nr_func_info, 3, "nr_func_info"))
+ goto out;
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!ASSERT_OK_PTR(btf, "obj_kern_btf"))
+ goto out;
+
+ rec = (struct bpf_func_info_min *)func_info_buf;
+ for (i = 0; i < info.nr_func_info; i++, rec = (void *)rec + info.func_info_rec_size) {
+ const struct btf_type *fn_t, *proto_t;
+ const char *name;
+
+ if (rec->insn_off == 0)
+ continue; /* main prog, skip */
+
+ fn_t = btf__type_by_id(btf, rec->type_id);
+ if (!ASSERT_OK_PTR(fn_t, "fn_type"))
+ goto out;
+ if (!ASSERT_EQ(btf_kind(fn_t), BTF_KIND_FUNC, "fn_type_kind"))
+ goto out;
+ proto_t = btf__type_by_id(btf, fn_t->type);
+ if (!ASSERT_OK_PTR(proto_t, "proto_type"))
+ goto out;
+
+ name = btf__name_by_offset(btf, fn_t->name_off);
+ if (strcmp(name, "subprog_ctx_tag") == 0) {
+ /* int subprog_ctx_tag(void *ctx __arg_ctx) */
+ if (!ASSERT_EQ(btf_vlen(proto_t), 1, "arg_cnt"))
+ goto out;
-struct test_def {
- const char *file;
- const char *err_str;
-};
+ /* arg 0 is PTR -> STRUCT bpf_perf_event_data */
+ check_ctx_arg_type(btf, &btf_params(proto_t)[0]);
+ } else if (strcmp(name, "subprog_multi_ctx_tags") == 0) {
+ /* int subprog_multi_ctx_tags(void *ctx1 __arg_ctx,
+ * struct my_struct *mem,
+ * void *ctx2 __arg_ctx)
+ */
+ if (!ASSERT_EQ(btf_vlen(proto_t), 3, "arg_cnt"))
+ goto out;
+
+ /* arg 0 is PTR -> STRUCT bpf_perf_event_data */
+ check_ctx_arg_type(btf, &btf_params(proto_t)[0]);
+ /* arg 2 is PTR -> STRUCT bpf_perf_event_data */
+ check_ctx_arg_type(btf, &btf_params(proto_t)[2]);
+ } else {
+ ASSERT_FAIL("unexpected subprog %s", name);
+ goto out;
+ }
+ }
+
+out:
+ btf__free(btf);
+ test_global_func_ctx_args__destroy(skel);
+}
void test_test_global_funcs(void)
{
- struct test_def tests[] = {
- { "test_global_func1.o", "combined stack size of 4 calls is 544" },
- { "test_global_func2.o" },
- { "test_global_func3.o" , "the call stack of 8 frames" },
- { "test_global_func4.o" },
- { "test_global_func5.o" , "expected pointer to ctx, but got PTR" },
- { "test_global_func6.o" , "modified ctx ptr R2" },
- { "test_global_func7.o" , "foo() doesn't return scalar" },
- { "test_global_func8.o" },
- };
- libbpf_print_fn_t old_print_fn = NULL;
- int err, i, duration = 0;
-
- old_print_fn = libbpf_set_print(libbpf_debug_print);
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- const struct test_def *test = &tests[i];
-
- if (!test__start_subtest(test->file))
- continue;
-
- err_str = test->err_str;
- err = check_load(test->file);
- CHECK_FAIL(!!err ^ !!err_str);
- if (err_str)
- CHECK(found, "", "expected string '%s'", err_str);
- }
- libbpf_set_print(old_print_fn);
+ RUN_TESTS(test_global_func1);
+ RUN_TESTS(test_global_func2);
+ RUN_TESTS(test_global_func3);
+ RUN_TESTS(test_global_func4);
+ RUN_TESTS(test_global_func5);
+ RUN_TESTS(test_global_func6);
+ RUN_TESTS(test_global_func7);
+ RUN_TESTS(test_global_func8);
+ RUN_TESTS(test_global_func9);
+ RUN_TESTS(test_global_func10);
+ RUN_TESTS(test_global_func11);
+ RUN_TESTS(test_global_func12);
+ RUN_TESTS(test_global_func13);
+ RUN_TESTS(test_global_func14);
+ RUN_TESTS(test_global_func15);
+ RUN_TESTS(test_global_func16);
+ RUN_TESTS(test_global_func17);
+ RUN_TESTS(test_global_func_ctx_args);
+
+ if (test__start_subtest("ctx_arg_rewrite"))
+ subtest_ctx_arg_rewrite();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c
index 61fca681d524..810b14981c2e 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_ima.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c
@@ -9,17 +9,21 @@
#include <unistd.h>
#include <sys/wait.h>
#include <test_progs.h>
+#include <linux/ring_buffer.h>
#include "ima.skel.h"
-static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
+#define MAX_SAMPLES 4
+
+static int _run_measured_process(const char *measured_dir, u32 *monitored_pid,
+ const char *cmd)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
- execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir,
+ execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir,
NULL);
exit(errno);
@@ -31,19 +35,53 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
return -EINVAL;
}
+static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
+{
+ return _run_measured_process(measured_dir, monitored_pid, "run");
+}
+
+static u64 ima_hash_from_bpf[MAX_SAMPLES];
+static int ima_hash_from_bpf_idx;
+
+static int process_sample(void *ctx, void *data, size_t len)
+{
+ if (ima_hash_from_bpf_idx >= MAX_SAMPLES)
+ return -ENOSPC;
+
+ ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data);
+ return 0;
+}
+
+static void test_init(struct ima__bss *bss)
+{
+ ima_hash_from_bpf_idx = 0;
+
+ bss->use_ima_file_hash = false;
+ bss->enable_bprm_creds_for_exec = false;
+ bss->enable_kernel_read_file = false;
+ bss->test_deny = false;
+}
+
void test_test_ima(void)
{
char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
+ struct ring_buffer *ringbuf = NULL;
const char *measured_dir;
+ u64 bin_true_sample;
char cmd[256];
- int err, duration = 0;
+ int err, duration = 0, fresh_digest_idx = 0;
struct ima *skel = NULL;
skel = ima__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton failed\n"))
goto close_prog;
+ ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
+ process_sample, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ringbuf"))
+ goto close_prog;
+
err = ima__attach(skel);
if (CHECK(err, "attach", "attach failed: %d\n", err))
goto close_prog;
@@ -53,22 +91,150 @@ void test_test_ima(void)
goto close_prog;
snprintf(cmd, sizeof(cmd), "./ima_setup.sh setup %s", measured_dir);
- if (CHECK_FAIL(system(cmd)))
+ err = system(cmd);
+ if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))
goto close_clean;
+ /*
+ * Test #1
+ * - Goal: obtain a sample with the bpf_ima_inode_hash() helper
+ * - Expected result: 1 sample (/bin/true)
+ */
+ test_init(skel->bss);
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
- if (CHECK(err, "run_measured_process", "err = %d\n", err))
+ if (CHECK(err, "run_measured_process #1", "err = %d\n", err))
goto close_clean;
- CHECK(skel->data->ima_hash_ret < 0, "ima_hash_ret",
- "ima_hash_ret = %ld\n", skel->data->ima_hash_ret);
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 1, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+
+ /*
+ * Test #2
+ * - Goal: obtain samples with the bpf_ima_file_hash() helper
+ * - Expected result: 2 samples (./ima_setup.sh, /bin/true)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #2", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 2, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+ bin_true_sample = ima_hash_from_bpf[1];
+
+ /*
+ * Test #3
+ * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest
+ * - Expected result:
+ * 1 sample (/bin/true: fresh) if commit 62622dab0a28 applied
+ * 2 samples (/bin/true: non-fresh, fresh) if commit 62622dab0a28 is
+ * not applied
+ *
+ * If commit 62622dab0a28 ("ima: return IMA digest value only when
+ * IMA_COLLECTED flag is set") is applied, bpf_ima_inode_hash() refuses
+ * to give a non-fresh digest, hence the correct result is 1 instead of
+ * 2.
+ */
+ test_init(skel->bss);
+
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "modify-bin");
+ if (CHECK(err, "modify-bin #3", "err = %d\n", err))
+ goto close_clean;
+
+ skel->bss->enable_bprm_creds_for_exec = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #3", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_GE(err, 1, "num_samples_or_err");
+ if (err == 2) {
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample,
+ "sample_equal_or_err");
+ fresh_digest_idx = 1;
+ }
+
+ ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], 0, "ima_hash");
+ /* IMA refreshed the digest. */
+ ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], bin_true_sample,
+ "sample_equal_or_err");
+
+ /*
+ * Test #4
+ * - Goal: verify that bpf_ima_file_hash() returns a fresh digest
+ * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh;
+ * /bin/true: fresh, fresh)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ skel->bss->enable_bprm_creds_for_exec = true;
+ err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
+ if (CHECK(err, "run_measured_process #4", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 4, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample,
+ "sample_different_or_err");
+ ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2],
+ "sample_equal_or_err");
+
+ skel->bss->use_ima_file_hash = false;
+ skel->bss->enable_bprm_creds_for_exec = false;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "restore-bin");
+ if (CHECK(err, "restore-bin #3", "err = %d\n", err))
+ goto close_clean;
+
+ /*
+ * Test #5
+ * - Goal: obtain a sample from the kernel_read_file hook
+ * - Expected result: 2 samples (./ima_setup.sh, policy_test)
+ */
+ test_init(skel->bss);
+ skel->bss->use_ima_file_hash = true;
+ skel->bss->enable_kernel_read_file = true;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "load-policy");
+ if (CHECK(err, "run_measured_process #5", "err = %d\n", err))
+ goto close_clean;
+
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 2, "num_samples_or_err");
+ ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
+ ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
+
+ /*
+ * Test #6
+ * - Goal: ensure that the kernel_read_file hook denies an operation
+ * - Expected result: 0 samples
+ */
+ test_init(skel->bss);
+ skel->bss->enable_kernel_read_file = true;
+ skel->bss->test_deny = true;
+ err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
+ "load-policy");
+ if (CHECK(!err, "run_measured_process #6", "err = %d\n", err))
+ goto close_clean;
- CHECK(skel->bss->ima_hash == 0, "ima_hash",
- "ima_hash = %lu\n", skel->bss->ima_hash);
+ err = ring_buffer__consume(ringbuf);
+ ASSERT_EQ(err, 0, "num_samples_or_err");
close_clean:
snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir);
- CHECK_FAIL(system(cmd));
+ err = system(cmd);
+ CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
close_prog:
+ ring_buffer__free(ringbuf);
ima__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
new file mode 100644
index 000000000000..375677c19146
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_ldsx_insn.skel.h"
+
+static void test_map_val_and_probed_memory(void)
+{
+ struct test_ldsx_insn *skel;
+ int err;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ return;
+
+ if (skel->rodata->skip) {
+ test__skip();
+ goto out;
+ }
+
+ bpf_program__set_autoload(skel->progs.rdonly_map_prog, true);
+ bpf_program__set_autoload(skel->progs.map_val_prog, true);
+ bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto out;
+
+ err = test_ldsx_insn__attach(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__attach"))
+ goto out;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->done1, 1, "done1");
+ ASSERT_EQ(skel->bss->ret1, 1, "ret1");
+ ASSERT_EQ(skel->bss->done2, 1, "done2");
+ ASSERT_EQ(skel->bss->ret2, 1, "ret2");
+ ASSERT_EQ(skel->bss->int_member, -1, "int_member");
+
+out:
+ test_ldsx_insn__destroy(skel);
+}
+
+static void test_ctx_member_sign_ext(void)
+{
+ struct test_ldsx_insn *skel;
+ int err, fd, cgroup_fd;
+ char buf[16] = {0};
+ socklen_t optlen;
+
+ cgroup_fd = test__join_cgroup("/ldsx_test");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test"))
+ return;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ goto close_cgroup_fd;
+
+ if (skel->rodata->skip) {
+ test__skip();
+ goto destroy_skel;
+ }
+
+ bpf_program__set_autoload(skel->progs._getsockopt, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto destroy_skel;
+
+ skel->links._getsockopt =
+ bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
+ goto destroy_skel;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ goto destroy_skel;
+
+ optlen = sizeof(buf);
+ (void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen);
+
+ ASSERT_EQ(skel->bss->set_optlen, -1, "optlen");
+ ASSERT_EQ(skel->bss->set_retval, -1, "retval");
+
+ close(fd);
+destroy_skel:
+ test_ldsx_insn__destroy(skel);
+close_cgroup_fd:
+ close(cgroup_fd);
+}
+
+static void test_ctx_member_narrow_sign_ext(void)
+{
+ struct test_ldsx_insn *skel;
+ struct __sk_buff skb = {};
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+ int err, prog_fd;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ return;
+
+ if (skel->rodata->skip) {
+ test__skip();
+ goto out;
+ }
+
+ bpf_program__set_autoload(skel->progs._tc, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs._tc);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+
+ ASSERT_EQ(skel->bss->set_mark, -2, "set_mark");
+
+out:
+ test_ldsx_insn__destroy(skel);
+}
+
+void test_ldsx_insn(void)
+{
+ if (test__start_subtest("map_val and probed_memory"))
+ test_map_val_and_probed_memory();
+ if (test__start_subtest("ctx_member_sign_ext"))
+ test_ctx_member_sign_ext();
+ if (test__start_subtest("ctx_member_narrow_sign_ext"))
+ test_ctx_member_narrow_sign_ext();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
index c0fe73a17ed1..bcf2e1905ed7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
@@ -11,88 +11,19 @@
#include "local_storage.skel.h"
#include "network_helpers.h"
-
-#ifndef __NR_pidfd_open
-#define __NR_pidfd_open 434
-#endif
-
-static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(__NR_pidfd_open, pid, flags);
-}
-
-static unsigned int duration;
+#include "task_local_storage_helpers.h"
#define TEST_STORAGE_VALUE 0xbeefdead
struct storage {
void *inode;
unsigned int value;
- /* Lock ensures that spin locked versions of local stoage operations
- * also work, most operations in this tests are still single threaded
- */
- struct bpf_spin_lock lock;
};
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
- int fd_in, fd_out = -1, ret = 0;
- struct stat stat;
- char *buf = NULL;
-
- fd_in = open("/bin/rm", O_RDONLY);
- if (fd_in < 0)
- return -errno;
-
- fd_out = mkstemp(dest);
- if (fd_out < 0) {
- ret = -errno;
- goto out;
- }
-
- ret = fstat(fd_in, &stat);
- if (ret == -1) {
- ret = -errno;
- goto out;
- }
-
- buf = malloc(stat.st_blksize);
- if (!buf) {
- ret = -errno;
- goto out;
- }
-
- while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
- ret = write(fd_out, buf, ret);
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
- }
- if (ret < 0) {
- ret = -errno;
- goto out;
-
- }
-
- /* Set executable permission on the copied file */
- ret = chmod(dest, 0100);
- if (ret == -1)
- ret = -errno;
-
-out:
- free(buf);
- close(fd_in);
- close(fd_out);
- return ret;
-}
-
/* Fork and exec the provided rm binary and return the exit code of the
* forked process and its pid.
*/
-static int run_self_unlink(int *monitored_pid, const char *rm_path)
+static int run_self_unlink(struct local_storage *skel, const char *rm_path)
{
int child_pid, child_status, ret;
int null_fd;
@@ -104,7 +35,7 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path)
dup2(null_fd, STDERR_FILENO);
close(null_fd);
- *monitored_pid = getpid();
+ skel->bss->monitored_pid = getpid();
/* Use the copied /usr/bin/rm to delete itself
* /tmp/copy_of_rm /tmp/copy_of_rm.
*/
@@ -113,6 +44,7 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path)
exit(errno);
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
+ ASSERT_EQ(skel->data->task_storage_result, 0, "task_storage_result");
return WEXITSTATUS(child_status);
}
@@ -121,46 +53,36 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path)
static bool check_syscall_operations(int map_fd, int obj_fd)
{
- struct storage val = { .value = TEST_STORAGE_VALUE, .lock = { 0 } },
- lookup_val = { .value = 0, .lock = { 0 } };
+ struct storage val = { .value = TEST_STORAGE_VALUE },
+ lookup_val = { .value = 0 };
int err;
/* Looking up an existing element should fail initially */
- err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
- BPF_F_LOCK);
- if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
- "err:%d errno:%d\n", err, errno))
+ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
+ if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem"))
return false;
/* Create a new element */
- err = bpf_map_update_elem(map_fd, &obj_fd, &val,
- BPF_NOEXIST | BPF_F_LOCK);
- if (CHECK(err < 0, "bpf_map_update_elem", "err:%d errno:%d\n", err,
- errno))
+ err = bpf_map_update_elem(map_fd, &obj_fd, &val, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
return false;
/* Lookup the newly created element */
- err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
- BPF_F_LOCK);
- if (CHECK(err < 0, "bpf_map_lookup_elem", "err:%d errno:%d", err,
- errno))
+ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
return false;
/* Check the value of the newly created element */
- if (CHECK(lookup_val.value != val.value, "bpf_map_lookup_elem",
- "value got = %x errno:%d", lookup_val.value, val.value))
+ if (!ASSERT_EQ(lookup_val.value, val.value, "bpf_map_lookup_elem"))
return false;
err = bpf_map_delete_elem(map_fd, &obj_fd);
- if (CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n", err,
- errno))
+ if (!ASSERT_OK(err, "bpf_map_delete_elem()"))
return false;
/* The lookup should fail, now that the element has been deleted */
- err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
- BPF_F_LOCK);
- if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
- "err:%d errno:%d\n", err, errno))
+ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
+ if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem"))
return false;
return true;
@@ -168,74 +90,80 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
void test_test_local_storage(void)
{
- char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+ char tmp_dir_path[] = "/tmp/local_storageXXXXXX";
int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
struct local_storage *skel = NULL;
+ char tmp_exec_path[64];
+ char cmd[256];
skel = local_storage__open_and_load();
- if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
goto close_prog;
err = local_storage__attach(skel);
- if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "attach"))
goto close_prog;
task_fd = sys_pidfd_open(getpid(), 0);
- if (CHECK(task_fd < 0, "pidfd_open",
- "failed to get pidfd err:%d, errno:%d", task_fd, errno))
+ if (!ASSERT_GE(task_fd, 0, "pidfd_open"))
goto close_prog;
if (!check_syscall_operations(bpf_map__fd(skel->maps.task_storage_map),
task_fd))
goto close_prog;
- err = copy_rm(tmp_exec_path);
- if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+ if (!ASSERT_OK_PTR(mkdtemp(tmp_dir_path), "mkdtemp"))
goto close_prog;
+ snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+ tmp_dir_path);
+ snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+ if (!ASSERT_OK(system(cmd), "system(cp)"))
+ goto close_prog_rmdir;
+
rm_fd = open(tmp_exec_path, O_RDONLY);
- if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
- tmp_exec_path, rm_fd, errno))
- goto close_prog;
+ if (!ASSERT_GE(rm_fd, 0, "open(tmp_exec_path)"))
+ goto close_prog_rmdir;
if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
rm_fd))
- goto close_prog;
+ goto close_prog_rmdir;
/* Sets skel->bss->monitored_pid to the pid of the forked child
* forks a child process that executes tmp_exec_path and tries to
* unlink its executable. This operation should be denied by the loaded
* LSM program.
*/
- err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
- if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
- goto close_prog_unlink;
+ err = run_self_unlink(skel, tmp_exec_path);
+ if (!ASSERT_EQ(err, EPERM, "run_self_unlink"))
+ goto close_prog_rmdir;
/* Set the process being monitored to be the current process */
skel->bss->monitored_pid = getpid();
- /* Remove the temporary created executable */
- err = unlink(tmp_exec_path);
- if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
- errno))
- goto close_prog_unlink;
+ /* Move copy_of_rm to a new location so that it triggers the
+ * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+ */
+ snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+ tmp_dir_path, tmp_dir_path);
+ if (!ASSERT_OK(system(cmd), "system(mv)"))
+ goto close_prog_rmdir;
- CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
- "inode_local_storage not set\n");
+ ASSERT_EQ(skel->data->inode_storage_result, 0, "inode_storage_result");
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
- if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
- goto close_prog;
+ if (!ASSERT_GE(serv_sk, 0, "start_server"))
+ goto close_prog_rmdir;
- CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
- "sk_local_storage not set\n");
+ ASSERT_EQ(skel->data->sk_storage_result, 0, "sk_storage_result");
if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
serv_sk))
- goto close_prog;
+ goto close_prog_rmdir;
-close_prog_unlink:
- unlink(tmp_exec_path);
+close_prog_rmdir:
+ snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+ system(cmd);
close_prog:
close(serv_sk);
close(rm_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 6ab29226c99b..bdc4fc06bc5a 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -10,9 +10,9 @@
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
-#include <unistd.h>
#include "lsm.skel.h"
+#include "lsm_tailcall.skel.h"
char *CMD_ARGS[] = {"true", NULL};
@@ -52,44 +52,109 @@ int exec_cmd(int *monitored_pid)
return -EINVAL;
}
-void test_test_lsm(void)
+static int test_lsm(struct lsm *skel)
{
- struct lsm *skel = NULL;
- int err, duration = 0;
+ struct bpf_link *link;
int buf = 1234;
-
- skel = lsm__open_and_load();
- if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
- goto close_prog;
+ int err;
err = lsm__attach(skel);
- if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
- goto close_prog;
+ if (!ASSERT_OK(err, "attach"))
+ return err;
+
+ /* Check that already linked program can't be attached again. */
+ link = bpf_program__attach(skel->progs.test_int_hook);
+ if (!ASSERT_ERR_PTR(link, "attach_link"))
+ return -1;
err = exec_cmd(&skel->bss->monitored_pid);
- if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
- goto close_prog;
+ if (!ASSERT_OK(err, "exec_cmd"))
+ return err;
- CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n",
- skel->bss->bprm_count);
+ ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count");
skel->bss->monitored_pid = getpid();
err = stack_mprotect();
- if (CHECK(errno != EPERM, "stack_mprotect", "want err=EPERM, got %d\n",
- errno))
- goto close_prog;
+ if (!ASSERT_EQ(err, -1, "stack_mprotect") ||
+ !ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ return err;
- CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
- "mprotect_count = %d\n", skel->bss->mprotect_count);
+ ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count");
syscall(__NR_setdomainname, &buf, -2L);
syscall(__NR_setdomainname, 0, -3L);
syscall(__NR_setdomainname, ~0L, -4L);
- CHECK(skel->bss->copy_test != 3, "copy_test",
- "copy_test = %d\n", skel->bss->copy_test);
+ ASSERT_EQ(skel->bss->copy_test, 3, "copy_test");
+
+ lsm__detach(skel);
+
+ skel->bss->copy_test = 0;
+ skel->bss->bprm_count = 0;
+ skel->bss->mprotect_count = 0;
+ return 0;
+}
+
+static void test_lsm_basic(void)
+{
+ struct lsm *skel = NULL;
+ int err;
+
+ skel = lsm__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lsm_skel_load"))
+ goto close_prog;
+
+ err = test_lsm(skel);
+ if (!ASSERT_OK(err, "test_lsm_first_attach"))
+ goto close_prog;
+
+ err = test_lsm(skel);
+ ASSERT_OK(err, "test_lsm_second_attach");
close_prog:
lsm__destroy(skel);
}
+
+static void test_lsm_tailcall(void)
+{
+ struct lsm_tailcall *skel = NULL;
+ int map_fd, prog_fd;
+ int err, key;
+
+ skel = lsm_tailcall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lsm_tailcall__skel_load"))
+ goto close_prog;
+
+ map_fd = bpf_map__fd(skel->maps.jmp_table);
+ if (CHECK_FAIL(map_fd < 0))
+ goto close_prog;
+
+ prog_fd = bpf_program__fd(skel->progs.lsm_file_permission_prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto close_prog;
+
+ key = 0;
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(!err))
+ goto close_prog;
+
+ prog_fd = bpf_program__fd(skel->progs.lsm_kernfs_init_security_prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto close_prog;
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto close_prog;
+
+close_prog:
+ lsm_tailcall__destroy(skel);
+}
+
+void test_test_lsm(void)
+{
+ if (test__start_subtest("lsm_basic"))
+ test_lsm_basic();
+ if (test__start_subtest("lsm_tailcall"))
+ test_lsm_tailcall();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c
new file mode 100644
index 000000000000..ce745776ed18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include "mmap_inner_array.skel.h"
+
+void test_mmap_inner_array(void)
+{
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+ struct mmap_inner_array *skel;
+ int inner_array_fd, err;
+ void *tmp;
+ __u64 *val;
+
+ skel = mmap_inner_array__open_and_load();
+
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ inner_array_fd = bpf_map__fd(skel->maps.inner_array);
+ tmp = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, inner_array_fd, 0);
+ if (!ASSERT_OK_PTR(tmp, "inner array mmap"))
+ goto out;
+ val = (void *)tmp;
+
+ err = mmap_inner_array__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out_unmap;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+
+ /* pid is set, pid_match == true and outer_map_match == false */
+ ASSERT_TRUE(skel->bss->pid_match, "pid match 1");
+ ASSERT_FALSE(skel->bss->outer_map_match, "outer map match 1");
+ ASSERT_FALSE(skel->bss->done, "done 1");
+ ASSERT_EQ(*val, 0, "value match 1");
+
+ err = bpf_map__update_elem(skel->maps.outer_map,
+ &skel->bss->pid, sizeof(skel->bss->pid),
+ &inner_array_fd, sizeof(inner_array_fd),
+ BPF_ANY);
+ if (!ASSERT_OK(err, "update elem"))
+ goto out_unmap;
+ usleep(1);
+
+ /* outer map key is set, outer_map_match == true */
+ ASSERT_TRUE(skel->bss->pid_match, "pid match 2");
+ ASSERT_TRUE(skel->bss->outer_map_match, "outer map match 2");
+ ASSERT_TRUE(skel->bss->done, "done 2");
+ ASSERT_EQ(*val, skel->data->match_value, "value match 2");
+
+out_unmap:
+ munmap(tmp, page_size);
+out:
+ mmap_inner_array__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
index 9966685866fd..f27013e38d03 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
@@ -56,11 +56,11 @@ static void setaffinity(void)
void test_test_overhead(void)
{
- const char *kprobe_name = "kprobe/__set_task_comm";
- const char *kretprobe_name = "kretprobe/__set_task_comm";
- const char *raw_tp_name = "raw_tp/task_rename";
- const char *fentry_name = "fentry/__set_task_comm";
- const char *fexit_name = "fexit/__set_task_comm";
+ const char *kprobe_name = "prog1";
+ const char *kretprobe_name = "prog2";
+ const char *raw_tp_name = "prog3";
+ const char *fentry_name = "prog4";
+ const char *fexit_name = "prog5";
const char *kprobe_func = "__set_task_comm";
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
struct bpf_program *fentry_prog, *fexit_prog;
@@ -72,27 +72,27 @@ void test_test_overhead(void)
if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L)))
return;
- obj = bpf_object__open_file("./test_overhead.o", NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ obj = bpf_object__open_file("./test_overhead.bpf.o", NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
- kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
+ kprobe_prog = bpf_object__find_program_by_name(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
goto cleanup;
- kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
+ kretprobe_prog = bpf_object__find_program_by_name(obj, kretprobe_name);
if (CHECK(!kretprobe_prog, "find_probe",
"prog '%s' not found\n", kretprobe_name))
goto cleanup;
- raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name);
+ raw_tp_prog = bpf_object__find_program_by_name(obj, raw_tp_name);
if (CHECK(!raw_tp_prog, "find_probe",
"prog '%s' not found\n", raw_tp_name))
goto cleanup;
- fentry_prog = bpf_object__find_program_by_title(obj, fentry_name);
+ fentry_prog = bpf_object__find_program_by_name(obj, fentry_name);
if (CHECK(!fentry_prog, "find_probe",
"prog '%s' not found\n", fentry_name))
goto cleanup;
- fexit_prog = bpf_object__find_program_by_title(obj, fexit_name);
+ fexit_prog = bpf_object__find_program_by_name(obj, fexit_name);
if (CHECK(!fexit_prog, "find_probe",
"prog '%s' not found\n", fexit_name))
goto cleanup;
@@ -108,7 +108,7 @@ void test_test_overhead(void)
/* attach kprobe */
link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
kprobe_func);
- if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_kprobe"))
goto cleanup;
test_run("kprobe");
bpf_link__destroy(link);
@@ -116,28 +116,28 @@ void test_test_overhead(void)
/* attach kretprobe */
link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
kprobe_func);
- if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_kretprobe"))
goto cleanup;
test_run("kretprobe");
bpf_link__destroy(link);
/* attach raw_tp */
link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
- if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto cleanup;
test_run("raw_tp");
bpf_link__destroy(link);
/* attach fentry */
link = bpf_program__attach_trace(fentry_prog);
- if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
test_run("fentry");
bpf_link__destroy(link);
/* attach fexit */
link = bpf_program__attach_trace(fexit_prog);
- if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "attach_fexit"))
goto cleanup;
test_run("fexit");
bpf_link__destroy(link);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_profiler.c b/tools/testing/selftests/bpf/prog_tests/test_profiler.c
index 4ca275101ee0..de24e8f0e738 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_profiler.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_profiler.c
@@ -8,20 +8,20 @@
static int sanity_run(struct bpf_program *prog)
{
- struct bpf_prog_test_run_attr test_attr = {};
+ LIBBPF_OPTS(bpf_test_run_opts, test_attr);
__u64 args[] = {1, 2, 3};
- __u32 duration = 0;
int err, prog_fd;
prog_fd = bpf_program__fd(prog);
- test_attr.prog_fd = prog_fd;
test_attr.ctx_in = args;
test_attr.ctx_size_in = sizeof(args);
- err = bpf_prog_test_run_xattr(&test_attr);
- if (CHECK(err || test_attr.retval, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, test_attr.retval, duration))
+ err = bpf_prog_test_run_opts(prog_fd, &test_attr);
+ if (!ASSERT_OK(err, "test_run"))
+ return -1;
+
+ if (!ASSERT_OK(test_attr.retval, "test_run retval"))
return -1;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
index cf1215531920..09ca13bdf6ca 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
@@ -6,15 +6,19 @@
static int sanity_run(struct bpf_program *prog)
{
- __u32 duration, retval;
int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ .flags = BPF_F_TEST_SKB_CHECKSUM_COMPLETE,
+ );
prog_fd = bpf_program__fd(prog);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval != 123, "test_run",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration))
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run"))
+ return -1;
+ if (!ASSERT_EQ(topts.retval, 123, "test_run retval"))
return -1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
new file mode 100644
index 000000000000..baceb0de9d49
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+#include "strncmp_test.skel.h"
+
+static int trigger_strncmp(const struct strncmp_test *skel)
+{
+ int cmp;
+
+ usleep(1);
+
+ cmp = skel->bss->cmp_ret;
+ if (cmp > 0)
+ return 1;
+ if (cmp < 0)
+ return -1;
+ return 0;
+}
+
+/*
+ * Compare str and target after making str[i] != target[i].
+ * When exp is -1, make str[i] < target[i] and delta = -1.
+ */
+static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
+ int exp)
+{
+ size_t nr = sizeof(skel->bss->str);
+ char *str = skel->bss->str;
+ int delta = exp;
+ int got;
+ size_t i;
+
+ memcpy(str, skel->rodata->target, nr);
+ for (i = 0; i < nr - 1; i++) {
+ str[i] += delta;
+
+ got = trigger_strncmp(skel);
+ ASSERT_EQ(got, exp, name);
+
+ str[i] -= delta;
+ }
+}
+
+static void test_strncmp_ret(void)
+{
+ struct strncmp_test *skel;
+ int err, got;
+
+ skel = strncmp_test__open();
+ if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.do_strncmp, true);
+
+ err = strncmp_test__load(skel);
+ if (!ASSERT_EQ(err, 0, "strncmp_test load"))
+ goto out;
+
+ err = strncmp_test__attach(skel);
+ if (!ASSERT_EQ(err, 0, "strncmp_test attach"))
+ goto out;
+
+ skel->bss->target_pid = getpid();
+
+ /* Empty str */
+ skel->bss->str[0] = '\0';
+ got = trigger_strncmp(skel);
+ ASSERT_EQ(got, -1, "strncmp: empty str");
+
+ /* Same string */
+ memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
+ got = trigger_strncmp(skel);
+ ASSERT_EQ(got, 0, "strncmp: same str");
+
+ /* Not-null-terminated string */
+ memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
+ skel->bss->str[sizeof(skel->bss->str) - 1] = 'A';
+ got = trigger_strncmp(skel);
+ ASSERT_EQ(got, 1, "strncmp: not-null-term str");
+
+ strncmp_full_str_cmp(skel, "strncmp: less than", -1);
+ strncmp_full_str_cmp(skel, "strncmp: greater than", 1);
+out:
+ strncmp_test__destroy(skel);
+}
+
+static void test_strncmp_bad_not_const_str_size(void)
+{
+ struct strncmp_test *skel;
+ int err;
+
+ skel = strncmp_test__open();
+ if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true);
+
+ err = strncmp_test__load(skel);
+ ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
+
+ strncmp_test__destroy(skel);
+}
+
+static void test_strncmp_bad_writable_target(void)
+{
+ struct strncmp_test *skel;
+ int err;
+
+ skel = strncmp_test__open();
+ if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true);
+
+ err = strncmp_test__load(skel);
+ ASSERT_ERR(err, "strncmp_test load bad_writable_target");
+
+ strncmp_test__destroy(skel);
+}
+
+static void test_strncmp_bad_not_null_term_target(void)
+{
+ struct strncmp_test *skel;
+ int err;
+
+ skel = strncmp_test__open();
+ if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true);
+
+ err = strncmp_test__load(skel);
+ ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
+
+ strncmp_test__destroy(skel);
+}
+
+void test_test_strncmp(void)
+{
+ if (test__start_subtest("strncmp_ret"))
+ test_strncmp_ret();
+ if (test__start_subtest("strncmp_bad_not_const_str_size"))
+ test_strncmp_bad_not_const_str_size();
+ if (test__start_subtest("strncmp_bad_writable_target"))
+ test_strncmp_bad_writable_target();
+ if (test__start_subtest("strncmp_bad_not_null_term_target"))
+ test_strncmp_bad_not_null_term_target();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c
new file mode 100644
index 000000000000..fd8762ba4b67
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_id_ops_mapping1.skel.h"
+#include "struct_ops_id_ops_mapping2.skel.h"
+
+static void test_st_ops_id_ops_mapping(void)
+{
+ struct struct_ops_id_ops_mapping1 *skel1 = NULL;
+ struct struct_ops_id_ops_mapping2 *skel2 = NULL;
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int err, pid, prog1_fd, prog2_fd;
+
+ skel1 = struct_ops_id_ops_mapping1__open_and_load();
+ if (!ASSERT_OK_PTR(skel1, "struct_ops_id_ops_mapping1__open"))
+ goto out;
+
+ skel2 = struct_ops_id_ops_mapping2__open_and_load();
+ if (!ASSERT_OK_PTR(skel2, "struct_ops_id_ops_mapping2__open"))
+ goto out;
+
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel1->maps.st_ops_map),
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto out;
+
+ skel1->bss->st_ops_id = info.id;
+
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel2->maps.st_ops_map),
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto out;
+
+ skel2->bss->st_ops_id = info.id;
+
+ err = struct_ops_id_ops_mapping1__attach(skel1);
+ if (!ASSERT_OK(err, "struct_ops_id_ops_mapping1__attach"))
+ goto out;
+
+ err = struct_ops_id_ops_mapping2__attach(skel2);
+ if (!ASSERT_OK(err, "struct_ops_id_ops_mapping2__attach"))
+ goto out;
+
+ /* run tracing prog that calls .test_1 and checks return */
+ pid = getpid();
+ skel1->bss->test_pid = pid;
+ skel2->bss->test_pid = pid;
+ sys_gettid();
+ skel1->bss->test_pid = 0;
+ skel2->bss->test_pid = 0;
+
+ /* run syscall_prog that calls .test_1 and checks return */
+ prog1_fd = bpf_program__fd(skel1->progs.syscall_prog);
+ err = bpf_prog_test_run_opts(prog1_fd, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ prog2_fd = bpf_program__fd(skel2->progs.syscall_prog);
+ err = bpf_prog_test_run_opts(prog2_fd, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ ASSERT_EQ(skel1->bss->test_err, 0, "skel1->bss->test_err");
+ ASSERT_EQ(skel2->bss->test_err, 0, "skel2->bss->test_err");
+
+out:
+ struct_ops_id_ops_mapping1__destroy(skel1);
+ struct_ops_id_ops_mapping2__destroy(skel2);
+}
+
+void test_struct_ops_id_ops_mapping(void)
+{
+ if (test__start_subtest("st_ops_id_ops_mapping"))
+ test_st_ops_id_ops_mapping();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c
new file mode 100644
index 000000000000..467cc72a3588
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_kptr_return.c
@@ -0,0 +1,16 @@
+#include <test_progs.h>
+
+#include "struct_ops_kptr_return.skel.h"
+#include "struct_ops_kptr_return_fail__wrong_type.skel.h"
+#include "struct_ops_kptr_return_fail__invalid_scalar.skel.h"
+#include "struct_ops_kptr_return_fail__nonzero_offset.skel.h"
+#include "struct_ops_kptr_return_fail__local_kptr.skel.h"
+
+void test_struct_ops_kptr_return(void)
+{
+ RUN_TESTS(struct_ops_kptr_return);
+ RUN_TESTS(struct_ops_kptr_return_fail__wrong_type);
+ RUN_TESTS(struct_ops_kptr_return_fail__invalid_scalar);
+ RUN_TESTS(struct_ops_kptr_return_fail__nonzero_offset);
+ RUN_TESTS(struct_ops_kptr_return_fail__local_kptr);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
new file mode 100644
index 000000000000..01dc2613c8a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "struct_ops_maybe_null.skel.h"
+#include "struct_ops_maybe_null_fail.skel.h"
+
+/* Test that the verifier accepts a program that access a nullable pointer
+ * with a proper check.
+ */
+static void maybe_null(void)
+{
+ struct struct_ops_maybe_null *skel;
+
+ skel = struct_ops_maybe_null__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_and_load"))
+ return;
+
+ struct_ops_maybe_null__destroy(skel);
+}
+
+/* Test that the verifier rejects a program that access a nullable pointer
+ * without a check beforehand.
+ */
+static void maybe_null_fail(void)
+{
+ struct struct_ops_maybe_null_fail *skel;
+
+ skel = struct_ops_maybe_null_fail__open_and_load();
+ if (ASSERT_ERR_PTR(skel, "struct_ops_module_fail__open_and_load"))
+ return;
+
+ struct_ops_maybe_null_fail__destroy(skel);
+}
+
+void test_struct_ops_maybe_null(void)
+{
+ /* The verifier verifies the programs at load time, so testing both
+ * programs in the same compile-unit is complicated. We run them in
+ * separate objects to simplify the testing.
+ */
+ if (test__start_subtest("maybe_null"))
+ maybe_null();
+ if (test__start_subtest("maybe_null_fail"))
+ maybe_null_fail();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
new file mode 100644
index 000000000000..75a0dea511b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <time.h>
+
+#include <sys/epoll.h>
+
+#include "struct_ops_module.skel.h"
+#include "struct_ops_nulled_out_cb.skel.h"
+#include "struct_ops_forgotten_cb.skel.h"
+#include "struct_ops_detach.skel.h"
+#include "unsupported_ops.skel.h"
+
+static void check_map_info(struct bpf_map_info *info)
+{
+ struct bpf_btf_info btf_info;
+ char btf_name[256];
+ u32 btf_info_len = sizeof(btf_info);
+ int err, fd;
+
+ fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id);
+ if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd"))
+ return;
+
+ memset(&btf_info, 0, sizeof(btf_info));
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len);
+ if (!ASSERT_OK(err, "get_value_type_btf_obj_info"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name"))
+ goto cleanup;
+
+cleanup:
+ close(fd);
+}
+
+static int attach_ops_and_check(struct struct_ops_module *skel,
+ struct bpf_map *map,
+ int expected_test_2_result)
+{
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(map);
+ ASSERT_OK_PTR(link, "attach_test_mod_1");
+ if (!link)
+ return -1;
+
+ /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */
+ ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result");
+ ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result");
+
+ bpf_link__destroy(link);
+ return 0;
+}
+
+static void test_struct_ops_load(void)
+{
+ struct struct_ops_module *skel;
+ struct bpf_map_info info = {};
+ int err;
+ u32 len;
+
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ skel->struct_ops.testmod_1->data = 13;
+ skel->struct_ops.testmod_1->test_2 = skel->progs.test_3;
+ /* Since test_2() is not being used, it should be disabled from
+ * auto-loading, or it will fail to load.
+ */
+ bpf_program__set_autoload(skel->progs.test_2, false);
+ bpf_map__set_autocreate(skel->maps.testmod_zeroed, false);
+
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_module_load"))
+ goto cleanup;
+
+ len = sizeof(info);
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info,
+ &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto cleanup;
+
+ check_map_info(&info);
+ /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
+ *
+ * In bpf_testmod.c it will pass 4 and 13 (the value of data) to
+ * .test_2. So, the value of test_2_result should be 20 (4 + 13 +
+ * 3).
+ */
+ if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20))
+ goto cleanup;
+ if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12))
+ goto cleanup;
+
+cleanup:
+ struct_ops_module__destroy(skel);
+}
+
+static void test_struct_ops_not_zeroed(void)
+{
+ struct struct_ops_module *skel;
+ int err;
+
+ /* zeroed is 0, and zeroed_op is null */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ skel->struct_ops.testmod_zeroed->zeroed = 0;
+ /* zeroed_op prog should be not loaded automatically now */
+ skel->struct_ops.testmod_zeroed->zeroed_op = NULL;
+
+ err = struct_ops_module__load(skel);
+ ASSERT_OK(err, "struct_ops_module_load");
+
+ struct_ops_module__destroy(skel);
+
+ /* zeroed is not 0 */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed"))
+ return;
+
+ /* libbpf should reject the testmod_zeroed since struct
+ * bpf_testmod_ops in the kernel has no "zeroed" field and the
+ * value of "zeroed" is non-zero.
+ */
+ skel->struct_ops.testmod_zeroed->zeroed = 0xdeadbeef;
+ skel->struct_ops.testmod_zeroed->zeroed_op = NULL;
+ err = struct_ops_module__load(skel);
+ ASSERT_ERR(err, "struct_ops_module_load_not_zeroed");
+
+ struct_ops_module__destroy(skel);
+
+ /* zeroed_op is not null */
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_not_zeroed_op"))
+ return;
+
+ /* libbpf should reject the testmod_zeroed since the value of its
+ * "zeroed_op" is not null.
+ */
+ skel->struct_ops.testmod_zeroed->zeroed_op = skel->progs.test_3;
+ err = struct_ops_module__load(skel);
+ ASSERT_ERR(err, "struct_ops_module_load_not_zeroed_op");
+
+ struct_ops_module__destroy(skel);
+}
+
+/* The signature of an implementation might not match the signature of the
+ * function pointer prototype defined in the BPF program. This mismatch
+ * should be allowed as long as the behavior of the operator program
+ * adheres to the signature in the kernel. Libbpf should not enforce the
+ * signature; rather, let the kernel verifier handle the enforcement.
+ */
+static void test_struct_ops_incompatible(void)
+{
+ struct struct_ops_module *skel;
+ struct bpf_link *link;
+ int err;
+
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ bpf_map__set_autocreate(skel->maps.testmod_zeroed, false);
+
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_incompatible);
+ if (ASSERT_OK_PTR(link, "attach_struct_ops"))
+ bpf_link__destroy(link);
+
+cleanup:
+ struct_ops_module__destroy(skel);
+}
+
+/* validate that it's ok to "turn off" callback that kernel supports */
+static void test_struct_ops_nulled_out_cb(void)
+{
+ struct struct_ops_nulled_out_cb *skel;
+ int err;
+
+ skel = struct_ops_nulled_out_cb__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ /* kernel knows about test_1, but we still null it out */
+ skel->struct_ops.ops->test_1 = NULL;
+
+ err = struct_ops_nulled_out_cb__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.test_1_turn_off), "prog_autoload");
+ ASSERT_LT(bpf_program__fd(skel->progs.test_1_turn_off), 0, "prog_fd");
+
+cleanup:
+ struct_ops_nulled_out_cb__destroy(skel);
+}
+
+/* validate that libbpf generates reasonable error message if struct_ops is
+ * not referenced in any struct_ops map
+ */
+static void test_struct_ops_forgotten_cb(void)
+{
+ struct struct_ops_forgotten_cb *skel;
+ char *log;
+ int err;
+
+ skel = struct_ops_forgotten_cb__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ start_libbpf_log_capture();
+
+ err = struct_ops_forgotten_cb__load(skel);
+ if (!ASSERT_ERR(err, "skel_load"))
+ goto cleanup;
+
+ log = stop_libbpf_log_capture();
+ ASSERT_HAS_SUBSTR(log,
+ "prog 'test_1_forgotten': SEC(\"struct_ops\") program isn't referenced anywhere, did you forget to use it?",
+ "libbpf_log");
+ free(log);
+
+ struct_ops_forgotten_cb__destroy(skel);
+
+ /* now let's programmatically use it, we should be fine now */
+ skel = struct_ops_forgotten_cb__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->struct_ops.ops->test_1 = skel->progs.test_1_forgotten; /* not anymore */
+
+ err = struct_ops_forgotten_cb__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+cleanup:
+ struct_ops_forgotten_cb__destroy(skel);
+}
+
+/* Detach a link from a user space program */
+static void test_detach_link(void)
+{
+ struct epoll_event ev, events[2];
+ struct struct_ops_detach *skel;
+ struct bpf_link *link = NULL;
+ int fd, epollfd = -1, nfds;
+ int err;
+
+ skel = struct_ops_detach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+ goto cleanup;
+
+ fd = bpf_link__fd(link);
+ if (!ASSERT_GE(fd, 0, "link_fd"))
+ goto cleanup;
+
+ epollfd = epoll_create1(0);
+ if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+ goto cleanup;
+
+ ev.events = EPOLLHUP;
+ ev.data.fd = fd;
+ err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ if (!ASSERT_OK(err, "epoll_ctl"))
+ goto cleanup;
+
+ err = bpf_link__detach(link);
+ if (!ASSERT_OK(err, "detach_link"))
+ goto cleanup;
+
+ /* Wait for EPOLLHUP */
+ nfds = epoll_wait(epollfd, events, 2, 500);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+ goto cleanup;
+ if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+ goto cleanup;
+
+cleanup:
+ if (epollfd >= 0)
+ close(epollfd);
+ bpf_link__destroy(link);
+ struct_ops_detach__destroy(skel);
+}
+
+void serial_test_struct_ops_module(void)
+{
+ if (test__start_subtest("struct_ops_load"))
+ test_struct_ops_load();
+ if (test__start_subtest("struct_ops_not_zeroed"))
+ test_struct_ops_not_zeroed();
+ if (test__start_subtest("struct_ops_incompatible"))
+ test_struct_ops_incompatible();
+ if (test__start_subtest("struct_ops_null_out_cb"))
+ test_struct_ops_nulled_out_cb();
+ if (test__start_subtest("struct_ops_forgotten_cb"))
+ test_struct_ops_forgotten_cb();
+ if (test__start_subtest("test_detach_link"))
+ test_detach_link();
+ RUN_TESTS(unsupported_ops);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c
new file mode 100644
index 000000000000..645d32b5160c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "struct_ops_multi_pages.skel.h"
+
+static void do_struct_ops_multi_pages(void)
+{
+ struct struct_ops_multi_pages *skel;
+ struct bpf_link *link;
+
+ /* The size of all trampolines of skel->maps.multi_pages should be
+ * over 1 page (at least for x86).
+ */
+ skel = struct_ops_multi_pages__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_multi_pages_open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.multi_pages);
+ ASSERT_OK_PTR(link, "attach_multi_pages");
+
+ bpf_link__destroy(link);
+ struct_ops_multi_pages__destroy(skel);
+}
+
+void test_struct_ops_multi_pages(void)
+{
+ if (test__start_subtest("multi_pages"))
+ do_struct_ops_multi_pages();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
new file mode 100644
index 000000000000..106ea447965a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <testing_helpers.h>
+
+static void load_bpf_test_no_cfi(void)
+{
+ int fd;
+ int err;
+
+ fd = open("bpf_test_no_cfi.ko", O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "open"))
+ return;
+
+ /* The module will try to register a struct_ops type without
+ * cfi_stubs and with cfi_stubs.
+ *
+ * The one without cfi_stub should fail. The module will be loaded
+ * successfully only if the result of the registration is as
+ * expected, or it fails.
+ */
+ err = finit_module(fd, "", 0);
+ close(fd);
+ if (!ASSERT_OK(err, "finit_module"))
+ return;
+
+ err = delete_module("bpf_test_no_cfi", 0);
+ ASSERT_OK(err, "delete_module");
+}
+
+void test_struct_ops_no_cfi(void)
+{
+ if (test__start_subtest("load_bpf_test_no_cfi"))
+ load_bpf_test_no_cfi();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c
new file mode 100644
index 000000000000..da60c715fc59
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_refcounted.c
@@ -0,0 +1,14 @@
+#include <test_progs.h>
+
+#include "struct_ops_refcounted.skel.h"
+#include "struct_ops_refcounted_fail__ref_leak.skel.h"
+#include "struct_ops_refcounted_fail__global_subprog.skel.h"
+#include "struct_ops_refcounted_fail__tail_call.skel.h"
+
+void test_struct_ops_refcounted(void)
+{
+ RUN_TESTS(struct_ops_refcounted);
+ RUN_TESTS(struct_ops_refcounted_fail__ref_leak);
+ RUN_TESTS(struct_ops_refcounted_fail__global_subprog);
+ RUN_TESTS(struct_ops_refcounted_fail__tail_call);
+}
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c
index a20a919244c0..273dd41ca09e 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c
@@ -1,21 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <fcntl.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <linux/filter.h>
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
-#include "bpf_util.h"
+#include "test_progs.h"
#include "cgroup_helpers.h"
#define CG_PATH "/foo"
@@ -124,7 +110,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:write sysctl:write read ok narrow",
.insns = {
/* u64 w = (u16)write & 1; */
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)),
#else
@@ -184,7 +170,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = {
/* If (file_pos == X) */
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)),
#else
@@ -1372,7 +1358,7 @@ static struct sysctl_test tests[] = {
},
{
"C prog: deny all writes",
- .prog_file = "./test_sysctl_prog.o",
+ .prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_WRONLY,
@@ -1381,7 +1367,7 @@ static struct sysctl_test tests[] = {
},
{
"C prog: deny access by name",
- .prog_file = "./test_sysctl_prog.o",
+ .prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/route/mtu_expires",
.open_flags = O_RDONLY,
@@ -1389,7 +1375,7 @@ static struct sysctl_test tests[] = {
},
{
"C prog: read tcp_mem",
- .prog_file = "./test_sysctl_prog.o",
+ .prog_file = "./test_sysctl_prog.bpf.o",
.attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "net/ipv4/tcp_mem",
.open_flags = O_RDONLY,
@@ -1435,14 +1421,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
const char *sysctl_path)
{
struct bpf_insn *prog = test->insns;
- struct bpf_load_program_attr attr;
- int ret;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ int ret, insn_cnt;
- memset(&attr, 0, sizeof(struct bpf_load_program_attr));
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
- attr.insns = prog;
- attr.insns_cnt = probe_prog_length(attr.insns);
- attr.license = "GPL";
+ insn_cnt = probe_prog_length(prog);
if (test->fixup_value_insn) {
char buf[128];
@@ -1465,7 +1447,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
return -1;
}
- ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ opts.log_buf = bpf_log_buf;
+ opts.log_size = BPF_LOG_BUF_SIZE;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SYSCTL, NULL, "GPL", prog, insn_cnt, &opts);
if (ret < 0 && test->result != LOAD_REJECT) {
log_err(">>> Loading program error.\n"
">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
@@ -1476,15 +1461,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
static int load_sysctl_prog_file(struct sysctl_test *test)
{
- struct bpf_prog_load_attr attr;
struct bpf_object *obj;
int prog_fd;
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = test->prog_file;
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
-
- if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) {
+ if (bpf_prog_test_load(test->prog_file, BPF_PROG_TYPE_CGROUP_SYSCTL, &obj, &prog_fd)) {
if (test->result != LOAD_REJECT)
log_err(">>> Loading program (%s) error.\n",
test->prog_file);
@@ -1566,7 +1546,7 @@ static int run_test_case(int cgfd, struct sysctl_test *test)
goto err;
}
- if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) == -1) {
+ if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -1614,23 +1594,19 @@ static int run_tests(int cgfd)
return fails ? -1 : 0;
}
-int main(int argc, char **argv)
+void test_sysctl(void)
{
- int cgfd = -1;
- int err = 0;
+ int cgfd;
cgfd = cgroup_setup_and_join(CG_PATH);
- if (cgfd < 0)
- goto err;
+ if (!ASSERT_OK_FD(cgfd < 0, "create_cgroup"))
+ goto out;
- if (run_tests(cgfd))
- goto err;
+ if (!ASSERT_OK(run_tests(cgfd), "run_tests"))
+ goto out;
- goto out;
-err:
- err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
- return err;
+ return;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
new file mode 100644
index 000000000000..9fd6306b455c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <pthread.h>
+#include <bpf/btf.h>
+#include <test_progs.h>
+
+#define TLD_FREE_DATA_ON_THREAD_EXIT
+#define TLD_DYN_DATA_SIZE 4096
+#include "task_local_data.h"
+
+struct test_tld_struct {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+ __u64 d;
+};
+
+#include "test_task_local_data.skel.h"
+
+TLD_DEFINE_KEY(value0_key, "value0", sizeof(int));
+
+/*
+ * Reset task local data between subtests by clearing metadata other
+ * than the statically defined value0. This is safe as subtests run
+ * sequentially. Users of task local data library should not touch
+ * library internal.
+ */
+static void reset_tld(void)
+{
+ if (TLD_READ_ONCE(tld_meta_p)) {
+ /* Remove TLDs created by tld_create_key() */
+ tld_meta_p->cnt = 1;
+ tld_meta_p->size = TLD_DYN_DATA_SIZE;
+ memset(&tld_meta_p->metadata[1], 0,
+ (TLD_MAX_DATA_CNT - 1) * sizeof(struct tld_metadata));
+ }
+}
+
+/* Serialize access to bpf program's global variables */
+static pthread_mutex_t global_mutex;
+
+static tld_key_t *tld_keys;
+
+#define TEST_BASIC_THREAD_NUM 32
+
+void *test_task_local_data_basic_thread(void *arg)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct test_task_local_data *skel = (struct test_task_local_data *)arg;
+ int fd, err, tid, *value0, *value1;
+ struct test_tld_struct *value2;
+
+ fd = bpf_map__fd(skel->maps.tld_data_map);
+
+ value0 = tld_get_data(fd, value0_key);
+ if (!ASSERT_OK_PTR(value0, "tld_get_data"))
+ goto out;
+
+ value1 = tld_get_data(fd, tld_keys[1]);
+ if (!ASSERT_OK_PTR(value1, "tld_get_data"))
+ goto out;
+
+ value2 = tld_get_data(fd, tld_keys[2]);
+ if (!ASSERT_OK_PTR(value2, "tld_get_data"))
+ goto out;
+
+ tid = sys_gettid();
+
+ *value0 = tid + 0;
+ *value1 = tid + 1;
+ value2->a = tid + 2;
+ value2->b = tid + 3;
+ value2->c = tid + 4;
+ value2->d = tid + 5;
+
+ pthread_mutex_lock(&global_mutex);
+ /* Run task_main that read task local data and save to global variables */
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+
+ ASSERT_EQ(skel->bss->test_value0, tid + 0, "tld_get_data value0");
+ ASSERT_EQ(skel->bss->test_value1, tid + 1, "tld_get_data value1");
+ ASSERT_EQ(skel->bss->test_value2.a, tid + 2, "tld_get_data value2.a");
+ ASSERT_EQ(skel->bss->test_value2.b, tid + 3, "tld_get_data value2.b");
+ ASSERT_EQ(skel->bss->test_value2.c, tid + 4, "tld_get_data value2.c");
+ ASSERT_EQ(skel->bss->test_value2.d, tid + 5, "tld_get_data value2.d");
+ pthread_mutex_unlock(&global_mutex);
+
+ /* Make sure valueX are indeed local to threads */
+ ASSERT_EQ(*value0, tid + 0, "value0");
+ ASSERT_EQ(*value1, tid + 1, "value1");
+ ASSERT_EQ(value2->a, tid + 2, "value2.a");
+ ASSERT_EQ(value2->b, tid + 3, "value2.b");
+ ASSERT_EQ(value2->c, tid + 4, "value2.c");
+ ASSERT_EQ(value2->d, tid + 5, "value2.d");
+
+ *value0 = tid + 5;
+ *value1 = tid + 4;
+ value2->a = tid + 3;
+ value2->b = tid + 2;
+ value2->c = tid + 1;
+ value2->d = tid + 0;
+
+ /* Run task_main again */
+ pthread_mutex_lock(&global_mutex);
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+
+ ASSERT_EQ(skel->bss->test_value0, tid + 5, "tld_get_data value0");
+ ASSERT_EQ(skel->bss->test_value1, tid + 4, "tld_get_data value1");
+ ASSERT_EQ(skel->bss->test_value2.a, tid + 3, "tld_get_data value2.a");
+ ASSERT_EQ(skel->bss->test_value2.b, tid + 2, "tld_get_data value2.b");
+ ASSERT_EQ(skel->bss->test_value2.c, tid + 1, "tld_get_data value2.c");
+ ASSERT_EQ(skel->bss->test_value2.d, tid + 0, "tld_get_data value2.d");
+ pthread_mutex_unlock(&global_mutex);
+
+out:
+ pthread_exit(NULL);
+}
+
+static void test_task_local_data_basic(void)
+{
+ struct test_task_local_data *skel;
+ pthread_t thread[TEST_BASIC_THREAD_NUM];
+ char dummy_key_name[TLD_NAME_LEN];
+ tld_key_t key;
+ int i, err;
+
+ reset_tld();
+
+ ASSERT_OK(pthread_mutex_init(&global_mutex, NULL), "pthread_mutex_init");
+
+ skel = test_task_local_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ tld_keys = calloc(TLD_MAX_DATA_CNT, sizeof(tld_key_t));
+ if (!ASSERT_OK_PTR(tld_keys, "calloc tld_keys"))
+ goto out;
+
+ ASSERT_FALSE(tld_key_is_err(value0_key), "TLD_DEFINE_KEY");
+ tld_keys[1] = tld_create_key("value1", sizeof(int));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[1]), "tld_create_key");
+ tld_keys[2] = tld_create_key("value2", sizeof(struct test_tld_struct));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[2]), "tld_create_key");
+
+ /*
+ * Shouldn't be able to store data exceed a page. Create a TLD just big
+ * enough to exceed a page. TLDs already created are int value0, int
+ * value1, and struct test_tld_struct value2.
+ */
+ key = tld_create_key("value_not_exist",
+ TLD_PAGE_SIZE - 2 * sizeof(int) - sizeof(struct test_tld_struct) + 1);
+ ASSERT_EQ(tld_key_err_or_zero(key), -E2BIG, "tld_create_key");
+
+ key = tld_create_key("value2", sizeof(struct test_tld_struct));
+ ASSERT_EQ(tld_key_err_or_zero(key), -EEXIST, "tld_create_key");
+
+ /* Shouldn't be able to create the (TLD_MAX_DATA_CNT+1)-th TLD */
+ for (i = 3; i < TLD_MAX_DATA_CNT; i++) {
+ snprintf(dummy_key_name, TLD_NAME_LEN, "dummy_value%d", i);
+ tld_keys[i] = tld_create_key(dummy_key_name, sizeof(int));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[i]), "tld_create_key");
+ }
+ key = tld_create_key("value_not_exist", sizeof(struct test_tld_struct));
+ ASSERT_EQ(tld_key_err_or_zero(key), -ENOSPC, "tld_create_key");
+
+ /* Access TLDs from multiple threads and check if they are thread-specific */
+ for (i = 0; i < TEST_BASIC_THREAD_NUM; i++) {
+ err = pthread_create(&thread[i], NULL, test_task_local_data_basic_thread, skel);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto out;
+ }
+
+out:
+ for (i = 0; i < TEST_BASIC_THREAD_NUM; i++)
+ pthread_join(thread[i], NULL);
+
+ if (tld_keys) {
+ free(tld_keys);
+ tld_keys = NULL;
+ }
+ tld_free();
+ test_task_local_data__destroy(skel);
+}
+
+#define TEST_RACE_THREAD_NUM (TLD_MAX_DATA_CNT - 3)
+
+void *test_task_local_data_race_thread(void *arg)
+{
+ int err = 0, id = (intptr_t)arg;
+ char key_name[32];
+ tld_key_t key;
+
+ key = tld_create_key("value_not_exist", TLD_PAGE_SIZE + 1);
+ if (tld_key_err_or_zero(key) != -E2BIG) {
+ err = 1;
+ goto out;
+ }
+
+ /* Only one thread will succeed in creating value1 */
+ key = tld_create_key("value1", sizeof(int));
+ if (!tld_key_is_err(key))
+ tld_keys[1] = key;
+
+ /* Only one thread will succeed in creating value2 */
+ key = tld_create_key("value2", sizeof(struct test_tld_struct));
+ if (!tld_key_is_err(key))
+ tld_keys[2] = key;
+
+ snprintf(key_name, 32, "thread_%d", id);
+ tld_keys[id] = tld_create_key(key_name, sizeof(int));
+ if (tld_key_is_err(tld_keys[id]))
+ err = 2;
+out:
+ return (void *)(intptr_t)err;
+}
+
+static void test_task_local_data_race(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ pthread_t thread[TEST_RACE_THREAD_NUM];
+ struct test_task_local_data *skel;
+ int fd, i, j, err, *data;
+ void *ret = NULL;
+
+ skel = test_task_local_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ tld_keys = calloc(TLD_MAX_DATA_CNT, sizeof(tld_key_t));
+ if (!ASSERT_OK_PTR(tld_keys, "calloc tld_keys"))
+ goto out;
+
+ fd = bpf_map__fd(skel->maps.tld_data_map);
+
+ ASSERT_FALSE(tld_key_is_err(value0_key), "TLD_DEFINE_KEY");
+ tld_keys[0] = value0_key;
+
+ for (j = 0; j < 100; j++) {
+ reset_tld();
+
+ for (i = 0; i < TEST_RACE_THREAD_NUM; i++) {
+ /*
+ * Try to make tld_create_key() race with each other. Call
+ * tld_create_key(), both valid and invalid, from different threads.
+ */
+ err = pthread_create(&thread[i], NULL, test_task_local_data_race_thread,
+ (void *)(intptr_t)(i + 3));
+ if (CHECK_FAIL(err))
+ break;
+ }
+
+ /* Wait for all tld_create_key() to return */
+ for (i = 0; i < TEST_RACE_THREAD_NUM; i++) {
+ pthread_join(thread[i], &ret);
+ if (CHECK_FAIL(ret))
+ break;
+ }
+
+ /* Write a unique number to each TLD */
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+ data = tld_get_data(fd, tld_keys[i]);
+ if (CHECK_FAIL(!data))
+ break;
+ *data = i;
+ }
+
+ /* Read TLDs and check the value to see if any address collides with another */
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+ data = tld_get_data(fd, tld_keys[i]);
+ if (CHECK_FAIL(*data != i))
+ break;
+ }
+
+ /* Run task_main to make sure no invalid TLDs are added */
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+ }
+out:
+ if (tld_keys) {
+ free(tld_keys);
+ tld_keys = NULL;
+ }
+ tld_free();
+ test_task_local_data__destroy(skel);
+}
+
+void test_task_local_data(void)
+{
+ if (test__start_subtest("task_local_data_basic"))
+ test_task_local_data_basic();
+ if (test__start_subtest("task_local_data_race"))
+ test_task_local_data_race();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_work.c b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
new file mode 100644
index 000000000000..774b31a5f6ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work.skel.h"
+#include "task_work_fail.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+
+static int perf_event_open(__u32 type, __u64 config, int pid)
+{
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .size = sizeof(struct perf_event_attr),
+ .sample_period = 100000,
+ };
+
+ return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0);
+}
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+static int verify_map(struct bpf_map *map, const char *expected_data)
+{
+ int err;
+ struct elem value;
+ int processed_values = 0;
+ int k, sz;
+
+ sz = bpf_map__max_entries(map);
+ for (k = 0; k < sz; ++k) {
+ err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
+ if (err)
+ continue;
+ if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) {
+ fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data,
+ value.data, bpf_map__name(map));
+ return 2;
+ }
+ processed_values++;
+ }
+
+ return processed_values == 0;
+}
+
+static void task_work_run(const char *prog_name, const char *map_name)
+{
+ struct task_work *skel;
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ struct bpf_link *link = NULL;
+ int err, pe_fd = -1, pid, status, pipefd[2];
+ char user_string[] = "hello world";
+
+ if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe"))
+ return;
+
+ pid = fork();
+ if (pid == 0) {
+ __u64 num = 1;
+ int i;
+ char buf;
+
+ close(pipefd[1]);
+ read(pipefd[0], &buf, sizeof(buf));
+ close(pipefd[0]);
+
+ for (i = 0; i < 10000; ++i)
+ num *= time(0) % 7;
+ (void)num;
+ exit(0);
+ }
+ if (!ASSERT_GT(pid, 0, "fork() failed")) {
+ close(pipefd[0]);
+ close(pipefd[1]);
+ return;
+ }
+
+ skel = task_work__open();
+ if (!ASSERT_OK_PTR(skel, "task_work__open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ bpf_program__set_autoload(prog, false);
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "prog_name"))
+ goto cleanup;
+ bpf_program__set_autoload(prog, true);
+ skel->bss->user_ptr = (char *)user_string;
+
+ err = task_work__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pe_fd = perf_event_open(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, pid);
+ if (pe_fd == -1 && (errno == ENOENT || errno == EOPNOTSUPP)) {
+ printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_NEQ(pe_fd, -1, "pe_fd")) {
+ fprintf(stderr, "perf_event_open errno: %d, pid: %d\n", errno, pid);
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_perf_event(prog, pe_fd);
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
+ goto cleanup;
+
+ /* perf event fd ownership is passed to bpf_link */
+ pe_fd = -1;
+ close(pipefd[0]);
+ write(pipefd[1], user_string, 1);
+ close(pipefd[1]);
+ /* Wait to collect some samples */
+ waitpid(pid, &status, 0);
+ pid = 0;
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ if (!ASSERT_OK_PTR(map, "find map_name"))
+ goto cleanup;
+ if (!ASSERT_OK(verify_map(map, user_string), "verify map"))
+ goto cleanup;
+cleanup:
+ if (pe_fd >= 0)
+ close(pe_fd);
+ bpf_link__destroy(link);
+ task_work__destroy(skel);
+ if (pid > 0) {
+ close(pipefd[0]);
+ write(pipefd[1], user_string, 1);
+ close(pipefd[1]);
+ waitpid(pid, &status, 0);
+ }
+}
+
+void test_task_work(void)
+{
+ if (test__start_subtest("test_task_work_hash_map"))
+ task_work_run("oncpu_hash_map", "hmap");
+
+ if (test__start_subtest("test_task_work_array_map"))
+ task_work_run("oncpu_array_map", "arrmap");
+
+ if (test__start_subtest("test_task_work_lru_map"))
+ task_work_run("oncpu_lru_map", "lrumap");
+
+ RUN_TESTS(task_work_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c
new file mode 100644
index 000000000000..462512fb191f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * BPF-based flow shaping
+ *
+ * The test brings up two veth in two isolated namespaces, attach some flow
+ * shaping program onto it, and ensures that a manual speedtest maximum
+ * value matches the rate set in the BPF shapers.
+ */
+
+#include <asm-generic/socket.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <math.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+#include <bpf/libbpf.h>
+#include <pthread.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_edt.skel.h"
+
+#define SERVER_NS "tc-edt-server-ns"
+#define CLIENT_NS "tc-edt-client-ns"
+#define IP4_ADDR_VETH1 "192.168.1.1"
+#define IP4_ADDR_VETH2 "192.168.1.2"
+#define IP4_ADDR_VETH2_HEX 0xC0A80102
+
+#define TIMEOUT_MS 2000
+#define TEST_PORT 9000
+#define TARGET_RATE_MBPS 5.0
+#define TX_BYTES_COUNT (1 * 1000 * 1000)
+#define RATE_ERROR_PERCENT 2.0
+
+struct connection {
+ int server_listen_fd;
+ int server_conn_fd;
+ int client_conn_fd;
+};
+
+static int setup(struct test_tc_edt *skel)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int ret;
+
+ if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns"))
+ goto fail;
+ if (!ASSERT_OK(make_netns(SERVER_NS), "create server ns"))
+ goto fail_delete_client_ns;
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail_delete_server_ns;
+ SYS(fail_close_client_ns, "ip link add veth1 type veth peer name %s",
+ "veth2 netns " SERVER_NS);
+ SYS(fail_close_client_ns, "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1");
+ SYS(fail_close_client_ns, "ip link set veth1 up");
+
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "enter server ns"))
+ goto fail_close_client_ns;
+ SYS(fail_close_server_ns, "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2");
+ SYS(fail_close_server_ns, "ip link set veth2 up");
+ SYS(fail_close_server_ns, "tc qdisc add dev veth2 root fq");
+ ret = tc_prog_attach("veth2", -1, bpf_program__fd(skel->progs.tc_prog));
+ if (!ASSERT_OK(ret, "attach bpf prog"))
+ goto fail_close_server_ns;
+ skel->bss->target_rate = TARGET_RATE_MBPS * 1000 * 1000;
+ close_netns(nstoken_server);
+ close_netns(nstoken_client);
+
+ return 0;
+
+fail_close_server_ns:
+ close_netns(nstoken_server);
+fail_close_client_ns:
+ close_netns(nstoken_client);
+fail_delete_server_ns:
+ remove_netns(SERVER_NS);
+fail_delete_client_ns:
+ remove_netns(CLIENT_NS);
+fail:
+ return -1;
+}
+
+static void cleanup(void)
+{
+ remove_netns(CLIENT_NS);
+ remove_netns(SERVER_NS);
+}
+
+static void run_test(void)
+{
+ int server_fd, client_fd, err;
+ double rate_mbps, rate_error;
+ struct nstoken *nstoken;
+ __u64 ts_start, ts_end;
+
+ nstoken = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return;
+ server_fd = start_server(AF_INET, SOCK_STREAM, IP4_ADDR_VETH2,
+ TEST_PORT, TIMEOUT_MS);
+ if (!ASSERT_OK_FD(server_fd, "start server"))
+ return;
+
+ close_netns(nstoken);
+ nstoken = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open client ns"))
+ return;
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_OK_FD(client_fd, "connect client"))
+ return;
+
+ ts_start = get_time_ns();
+ err = send_recv_data(server_fd, client_fd, TX_BYTES_COUNT);
+ ts_end = get_time_ns();
+ close_netns(nstoken);
+ ASSERT_OK(err, "send_recv_data");
+
+ rate_mbps = TX_BYTES_COUNT / ((ts_end - ts_start) / 1000.0);
+ rate_error =
+ fabs((rate_mbps - TARGET_RATE_MBPS) * 100.0 / TARGET_RATE_MBPS);
+
+ ASSERT_LE(rate_error, RATE_ERROR_PERCENT,
+ "rate error is lower than threshold");
+}
+
+void test_tc_edt(void)
+{
+ struct test_tc_edt *skel;
+
+ skel = test_tc_edt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open and load"))
+ return;
+
+ if (!ASSERT_OK(setup(skel), "global setup"))
+ return;
+
+ run_test();
+
+ cleanup();
+ test_tc_edt__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
new file mode 100644
index 000000000000..0fe0a8f62486
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * End-to-end eBPF tunnel test suite
+ * The file tests BPF network tunnels implementation. For each tunnel
+ * type, the test validates that:
+ * - basic communication can first be established between the two veths
+ * - when adding a BPF-based encapsulation on client egress, it now fails
+ * to communicate with the server
+ * - when adding a kernel-based decapsulation on server ingress, client
+ * can now connect
+ * - when replacing the kernel-based decapsulation with a BPF-based one,
+ * the client can still connect
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <bpf/libbpf.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_tunnel.skel.h"
+
+#define SERVER_NS "tc-tunnel-server-ns"
+#define CLIENT_NS "tc-tunnel-client-ns"
+#define MAC_ADDR_VETH1 "00:11:22:33:44:55"
+#define IP4_ADDR_VETH1 "192.168.1.1"
+#define IP6_ADDR_VETH1 "fd::1"
+#define MAC_ADDR_VETH2 "66:77:88:99:AA:BB"
+#define IP4_ADDR_VETH2 "192.168.1.2"
+#define IP6_ADDR_VETH2 "fd::2"
+
+#define TEST_NAME_MAX_LEN 64
+#define PROG_NAME_MAX_LEN 64
+#define TUNNEL_ARGS_MAX_LEN 128
+#define BUFFER_LEN 2000
+#define DEFAULT_TEST_DATA_SIZE 100
+#define GSO_TEST_DATA_SIZE BUFFER_LEN
+
+#define TIMEOUT_MS 1000
+#define TEST_PORT 8000
+#define UDP_PORT 5555
+#define MPLS_UDP_PORT 6635
+#define FOU_MPLS_PROTO 137
+#define VXLAN_ID 1
+#define VXLAN_PORT 8472
+#define MPLS_TABLE_ENTRIES_COUNT 65536
+
+static char tx_buffer[BUFFER_LEN], rx_buffer[BUFFER_LEN];
+
+struct subtest_cfg {
+ char *ebpf_tun_type;
+ char *iproute_tun_type;
+ char *mac_tun_type;
+ int ipproto;
+ void (*extra_decap_mod_args_cb)(struct subtest_cfg *cfg, char *dst);
+ bool tunnel_need_veth_mac;
+ bool configure_fou_rx_port;
+ char *tmode;
+ bool expect_kern_decap_failure;
+ bool configure_mpls;
+ bool test_gso;
+ char *tunnel_client_addr;
+ char *tunnel_server_addr;
+ char name[TEST_NAME_MAX_LEN];
+ char *server_addr;
+ int client_egress_prog_fd;
+ int server_ingress_prog_fd;
+ char extra_decap_mod_args[TUNNEL_ARGS_MAX_LEN];
+ int server_fd;
+};
+
+struct connection {
+ int client_fd;
+ int server_fd;
+};
+
+static int build_subtest_name(struct subtest_cfg *cfg, char *dst, size_t size)
+{
+ int ret;
+
+ ret = snprintf(dst, size, "%s_%s", cfg->ebpf_tun_type,
+ cfg->mac_tun_type);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int set_subtest_progs(struct subtest_cfg *cfg, struct test_tc_tunnel *skel)
+{
+ char prog_name[PROG_NAME_MAX_LEN];
+ struct bpf_program *prog;
+ int ret;
+
+ ret = snprintf(prog_name, PROG_NAME_MAX_LEN, "__encap_");
+ if (ret < 0)
+ return ret;
+ ret = build_subtest_name(cfg, prog_name + ret, PROG_NAME_MAX_LEN - ret);
+ if (ret < 0)
+ return ret;
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog)
+ return -1;
+
+ cfg->client_egress_prog_fd = bpf_program__fd(prog);
+ cfg->server_ingress_prog_fd = bpf_program__fd(skel->progs.decap_f);
+ return 0;
+}
+
+static void set_subtest_addresses(struct subtest_cfg *cfg)
+{
+ if (cfg->ipproto == 6)
+ cfg->server_addr = IP6_ADDR_VETH2;
+ else
+ cfg->server_addr = IP4_ADDR_VETH2;
+
+ /* Some specific tunnel types need specific addressing, it then
+ * has been already set in the configuration table. Otherwise,
+ * deduce the relevant addressing from the ipproto
+ */
+ if (cfg->tunnel_client_addr && cfg->tunnel_server_addr)
+ return;
+
+ if (cfg->ipproto == 6) {
+ cfg->tunnel_client_addr = IP6_ADDR_VETH1;
+ cfg->tunnel_server_addr = IP6_ADDR_VETH2;
+ } else {
+ cfg->tunnel_client_addr = IP4_ADDR_VETH1;
+ cfg->tunnel_server_addr = IP4_ADDR_VETH2;
+ }
+}
+
+static int run_server(struct subtest_cfg *cfg)
+{
+ int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET;
+ struct nstoken *nstoken;
+ struct network_helper_opts opts = {
+ .timeout_ms = TIMEOUT_MS
+ };
+
+ nstoken = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return -1;
+
+ cfg->server_fd = start_server_str(family, SOCK_STREAM, cfg->server_addr,
+ TEST_PORT, &opts);
+ close_netns(nstoken);
+ if (!ASSERT_OK_FD(cfg->server_fd, "start server"))
+ return -1;
+
+ return 0;
+}
+
+static int check_server_rx_data(struct subtest_cfg *cfg,
+ struct connection *conn, int len)
+{
+ int err;
+
+ memset(rx_buffer, 0, BUFFER_LEN);
+ err = recv(conn->server_fd, rx_buffer, len, 0);
+ if (!ASSERT_EQ(err, len, "check rx data len"))
+ return 1;
+ if (!ASSERT_MEMEQ(tx_buffer, rx_buffer, len, "check received data"))
+ return 1;
+ return 0;
+}
+
+static struct connection *connect_client_to_server(struct subtest_cfg *cfg)
+{
+ struct network_helper_opts opts = {.timeout_ms = 500};
+ int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET;
+ struct connection *conn = NULL;
+ int client_fd, server_fd;
+
+ conn = malloc(sizeof(struct connection));
+ if (!conn)
+ return conn;
+
+ client_fd = connect_to_addr_str(family, SOCK_STREAM, cfg->server_addr,
+ TEST_PORT, &opts);
+
+ if (client_fd < 0) {
+ free(conn);
+ return NULL;
+ }
+
+ server_fd = accept(cfg->server_fd, NULL, NULL);
+ if (server_fd < 0) {
+ close(client_fd);
+ free(conn);
+ return NULL;
+ }
+
+ conn->server_fd = server_fd;
+ conn->client_fd = client_fd;
+
+ return conn;
+}
+
+static void disconnect_client_from_server(struct subtest_cfg *cfg,
+ struct connection *conn)
+{
+ close(conn->server_fd);
+ close(conn->client_fd);
+ free(conn);
+}
+
+static int send_and_test_data(struct subtest_cfg *cfg, bool must_succeed)
+{
+ struct connection *conn;
+ int err, res = -1;
+
+ conn = connect_client_to_server(cfg);
+ if (!must_succeed && !ASSERT_ERR_PTR(conn, "connection that must fail"))
+ goto end;
+ else if (!must_succeed)
+ return 0;
+
+ if (!ASSERT_OK_PTR(conn, "connection that must succeed"))
+ return -1;
+
+ err = send(conn->client_fd, tx_buffer, DEFAULT_TEST_DATA_SIZE, 0);
+ if (!ASSERT_EQ(err, DEFAULT_TEST_DATA_SIZE, "send data from client"))
+ goto end;
+ if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE))
+ goto end;
+
+ if (!cfg->test_gso) {
+ res = 0;
+ goto end;
+ }
+
+ err = send(conn->client_fd, tx_buffer, GSO_TEST_DATA_SIZE, 0);
+ if (!ASSERT_EQ(err, GSO_TEST_DATA_SIZE, "send (large) data from client"))
+ goto end;
+ if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE))
+ goto end;
+
+ res = 0;
+end:
+ disconnect_client_from_server(cfg, conn);
+ return res;
+}
+
+static void vxlan_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst)
+{
+ snprintf(dst, TUNNEL_ARGS_MAX_LEN, "id %d dstport %d udp6zerocsumrx",
+ VXLAN_ID, VXLAN_PORT);
+}
+
+static void udp_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst)
+{
+ bool is_mpls = !strcmp(cfg->mac_tun_type, "mpls");
+
+ snprintf(dst, TUNNEL_ARGS_MAX_LEN,
+ "encap fou encap-sport auto encap-dport %d",
+ is_mpls ? MPLS_UDP_PORT : UDP_PORT);
+}
+
+static int configure_fou_rx_port(struct subtest_cfg *cfg, bool add)
+{
+ bool is_mpls = strcmp(cfg->mac_tun_type, "mpls") == 0;
+ int fou_proto;
+
+ if (is_mpls)
+ fou_proto = FOU_MPLS_PROTO;
+ else
+ fou_proto = cfg->ipproto == 6 ? 41 : 4;
+
+ SYS(fail, "ip fou %s port %d ipproto %d%s", add ? "add" : "del",
+ is_mpls ? MPLS_UDP_PORT : UDP_PORT, fou_proto,
+ cfg->ipproto == 6 ? " -6" : "");
+
+ return 0;
+fail:
+ return 1;
+}
+
+static int add_fou_rx_port(struct subtest_cfg *cfg)
+{
+ return configure_fou_rx_port(cfg, true);
+}
+
+static int del_fou_rx_port(struct subtest_cfg *cfg)
+{
+ return configure_fou_rx_port(cfg, false);
+}
+
+static int update_tunnel_intf_addr(struct subtest_cfg *cfg)
+{
+ SYS(fail, "ip link set dev testtun0 address " MAC_ADDR_VETH2);
+ return 0;
+fail:
+ return -1;
+}
+
+static int configure_kernel_for_mpls(struct subtest_cfg *cfg)
+{
+ SYS(fail, "sysctl -qw net.mpls.platform_labels=%d",
+ MPLS_TABLE_ENTRIES_COUNT);
+ SYS(fail, "ip -f mpls route add 1000 dev lo");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "sysctl -qw net.mpls.conf.testtun0.input=1");
+ SYS(fail, "sysctl -qw net.ipv4.conf.lo.rp_filter=0");
+ return 0;
+fail:
+ return -1;
+}
+
+static int configure_encapsulation(struct subtest_cfg *cfg)
+{
+ int ret;
+
+ ret = tc_prog_attach("veth1", -1, cfg->client_egress_prog_fd);
+
+ return ret;
+}
+
+static int configure_kernel_decapsulation(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken = open_netns(SERVER_NS);
+ int ret = -1;
+
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return ret;
+
+ if (cfg->configure_fou_rx_port &&
+ !ASSERT_OK(add_fou_rx_port(cfg), "configure FOU RX port"))
+ goto fail;
+ SYS(fail, "ip link add name testtun0 type %s %s remote %s local %s %s",
+ cfg->iproute_tun_type, cfg->tmode ? cfg->tmode : "",
+ cfg->tunnel_client_addr, cfg->tunnel_server_addr,
+ cfg->extra_decap_mod_args);
+ if (cfg->tunnel_need_veth_mac &&
+ !ASSERT_OK(update_tunnel_intf_addr(cfg), "update testtun0 mac"))
+ goto fail;
+ if (cfg->configure_mpls &&
+ (!ASSERT_OK(configure_kernel_for_mpls(cfg),
+ "configure MPLS decap")))
+ goto fail;
+ SYS(fail, "sysctl -qw net.ipv4.conf.all.rp_filter=0");
+ SYS(fail, "sysctl -qw net.ipv4.conf.testtun0.rp_filter=0");
+ SYS(fail, "ip link set dev testtun0 up");
+
+ ret = 0;
+fail:
+ close_netns(nstoken);
+ return ret;
+}
+
+static void remove_kernel_decapsulation(struct subtest_cfg *cfg)
+{
+ SYS_NOFAIL("ip link del testtun0");
+ if (cfg->configure_mpls)
+ SYS_NOFAIL("ip -f mpls route del 1000 dev lo");
+ if (cfg->configure_fou_rx_port)
+ del_fou_rx_port(cfg);
+}
+
+static int configure_ebpf_decapsulation(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken = open_netns(SERVER_NS);
+ int ret = -1;
+
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return ret;
+
+ if (!cfg->expect_kern_decap_failure)
+ SYS(fail, "ip link del testtun0");
+
+ if (!ASSERT_OK(tc_prog_attach("veth2", cfg->server_ingress_prog_fd, -1),
+ "attach_program"))
+ goto fail;
+
+ ret = 0;
+fail:
+ close_netns(nstoken);
+ return ret;
+}
+
+static void run_test(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken;
+
+ if (!ASSERT_OK(run_server(cfg), "run server"))
+ return;
+
+ nstoken = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open client ns"))
+ goto fail;
+
+ /* Basic communication must work */
+ if (!ASSERT_OK(send_and_test_data(cfg, true), "connect without any encap"))
+ goto fail;
+
+ /* Attach encapsulation program to client */
+ if (!ASSERT_OK(configure_encapsulation(cfg), "configure encapsulation"))
+ goto fail;
+
+ /* If supported, insert kernel decap module, connection must succeed */
+ if (!cfg->expect_kern_decap_failure) {
+ if (!ASSERT_OK(configure_kernel_decapsulation(cfg),
+ "configure kernel decapsulation"))
+ goto fail;
+ if (!ASSERT_OK(send_and_test_data(cfg, true),
+ "connect with encap prog and kern decap"))
+ goto fail;
+ }
+
+ /* Replace kernel decapsulation with BPF decapsulation, test must pass */
+ if (!ASSERT_OK(configure_ebpf_decapsulation(cfg), "configure ebpf decapsulation"))
+ goto fail;
+ ASSERT_OK(send_and_test_data(cfg, true), "connect with encap and decap progs");
+
+fail:
+ close_netns(nstoken);
+ close(cfg->server_fd);
+}
+
+static int setup(void)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int fd, err;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "open urandom"))
+ goto fail;
+ err = read(fd, tx_buffer, BUFFER_LEN);
+ close(fd);
+
+ if (!ASSERT_EQ(err, BUFFER_LEN, "read random bytes"))
+ goto fail;
+
+ /* Configure the testing network */
+ if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns") ||
+ !ASSERT_OK(make_netns(SERVER_NS), "create server ns"))
+ goto fail;
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail_delete_ns;
+ SYS(fail_close_ns_client, "ip link add %s type veth peer name %s",
+ "veth1 mtu 1500 netns " CLIENT_NS " address " MAC_ADDR_VETH1,
+ "veth2 mtu 1500 netns " SERVER_NS " address " MAC_ADDR_VETH2);
+ SYS(fail_close_ns_client, "ethtool -K veth1 tso off");
+ SYS(fail_close_ns_client, "ip link set veth1 up");
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "open server ns"))
+ goto fail_close_ns_client;
+ SYS(fail_close_ns_server, "ip link set veth2 up");
+
+ close_netns(nstoken_server);
+ close_netns(nstoken_client);
+ return 0;
+
+fail_close_ns_server:
+ close_netns(nstoken_server);
+fail_close_ns_client:
+ close_netns(nstoken_client);
+fail_delete_ns:
+ SYS_NOFAIL("ip netns del " CLIENT_NS);
+ SYS_NOFAIL("ip netns del " SERVER_NS);
+fail:
+ return -1;
+}
+
+static int subtest_setup(struct test_tc_tunnel *skel, struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int ret = -1;
+
+ set_subtest_addresses(cfg);
+ if (!ASSERT_OK(set_subtest_progs(cfg, skel),
+ "find subtest progs"))
+ goto fail;
+ if (cfg->extra_decap_mod_args_cb)
+ cfg->extra_decap_mod_args_cb(cfg, cfg->extra_decap_mod_args);
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail;
+ SYS(fail_close_client_ns,
+ "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1");
+ SYS(fail_close_client_ns, "ip -4 route flush table main");
+ SYS(fail_close_client_ns,
+ "ip -4 route add " IP4_ADDR_VETH2 " mtu 1450 dev veth1");
+ SYS(fail_close_client_ns,
+ "ip -6 addr add " IP6_ADDR_VETH1 "/64 dev veth1 nodad");
+ SYS(fail_close_client_ns, "ip -6 route flush table main");
+ SYS(fail_close_client_ns,
+ "ip -6 route add " IP6_ADDR_VETH2 " mtu 1430 dev veth1");
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "open server ns"))
+ goto fail_close_client_ns;
+ SYS(fail_close_server_ns,
+ "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2");
+ SYS(fail_close_server_ns,
+ "ip -6 addr add " IP6_ADDR_VETH2 "/64 dev veth2 nodad");
+
+ ret = 0;
+
+fail_close_server_ns:
+ close_netns(nstoken_server);
+fail_close_client_ns:
+ close_netns(nstoken_client);
+fail:
+ return ret;
+}
+
+
+static void subtest_cleanup(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(CLIENT_NS);
+ if (ASSERT_OK_PTR(nstoken, "open clien ns")) {
+ SYS_NOFAIL("tc qdisc delete dev veth1 parent ffff:fff1");
+ SYS_NOFAIL("ip a flush veth1");
+ close_netns(nstoken);
+ }
+ nstoken = open_netns(SERVER_NS);
+ if (ASSERT_OK_PTR(nstoken, "open clien ns")) {
+ SYS_NOFAIL("tc qdisc delete dev veth2 parent ffff:fff1");
+ SYS_NOFAIL("ip a flush veth2");
+ if (!cfg->expect_kern_decap_failure)
+ remove_kernel_decapsulation(cfg);
+ close_netns(nstoken);
+ }
+}
+
+static void cleanup(void)
+{
+ remove_netns(CLIENT_NS);
+ remove_netns(SERVER_NS);
+}
+
+static struct subtest_cfg subtests_cfg[] = {
+ {
+ .ebpf_tun_type = "ipip",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ },
+ {
+ .ebpf_tun_type = "ipip6",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 4,
+ .tunnel_client_addr = IP6_ADDR_VETH1,
+ .tunnel_server_addr = IP6_ADDR_VETH2,
+ },
+ {
+ .ebpf_tun_type = "ip6tnl",
+ .iproute_tun_type = "ip6tnl",
+ .mac_tun_type = "none",
+ .ipproto = 6,
+ },
+ {
+ .mac_tun_type = "none",
+ .ebpf_tun_type = "sit",
+ .iproute_tun_type = "sit",
+ .ipproto = 6,
+ .tunnel_client_addr = IP4_ADDR_VETH1,
+ .tunnel_server_addr = IP4_ADDR_VETH2,
+ },
+ {
+ .ebpf_tun_type = "vxlan",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "vxlan",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb,
+ .tunnel_need_veth_mac = true
+ },
+ {
+ .ebpf_tun_type = "ip6vxlan",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "vxlan",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb,
+ .tunnel_need_veth_mac = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "gre",
+ .ipproto = 4,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "gretap",
+ .ipproto = 4,
+ .tunnel_need_veth_mac = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "gre",
+ .ipproto = 4,
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6gre",
+ .ipproto = 6,
+ .test_gso = true,
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ip6gretap",
+ .ipproto = 6,
+ .tunnel_need_veth_mac = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ip6gre",
+ .ipproto = 6,
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .tmode = "mode any ttl 255",
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .tmode = "mode any ttl 255",
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+};
+
+void test_tc_tunnel(void)
+{
+ struct test_tc_tunnel *skel;
+ struct subtest_cfg *cfg;
+ int i, ret;
+
+ skel = test_tc_tunnel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open and load"))
+ return;
+
+ if (!ASSERT_OK(setup(), "global setup"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) {
+ cfg = &subtests_cfg[i];
+ ret = build_subtest_name(cfg, cfg->name, TEST_NAME_MAX_LEN);
+ if (ret < 0 || !test__start_subtest(cfg->name))
+ continue;
+ if (subtest_setup(skel, cfg) == 0)
+ run_test(cfg);
+ subtest_cleanup(cfg);
+ }
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
new file mode 100644
index 000000000000..eb9309931272
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * End-to-end eBPF tunnel test suite
+ * The file tests BPF network tunnel implementation.
+ *
+ * Topology:
+ * ---------
+ * root namespace | at_ns0 namespace
+ * |
+ * ----------- | -----------
+ * | tnl dev | | | tnl dev | (overlay network)
+ * ----------- | -----------
+ * metadata-mode | metadata-mode
+ * with bpf | with bpf
+ * |
+ * ---------- | ----------
+ * | veth1 | --------- | veth0 | (underlay network)
+ * ---------- peer ----------
+ *
+ *
+ * Device Configuration
+ * --------------------
+ * root namespace with metadata-mode tunnel + BPF
+ * Device names and addresses:
+ * veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay)
+ * IP 2: 172.16.1.20, IPv6: 00::bb (underlay)
+ * tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay)
+ *
+ * Namespace at_ns0 with native tunnel
+ * Device names and addresses:
+ * veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay)
+ * tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay)
+ *
+ *
+ * End-to-end ping packet flow
+ * ---------------------------
+ * Most of the tests start by namespace creation, device configuration,
+ * then ping the underlay and overlay network. When doing 'ping 10.1.1.100'
+ * from root namespace, the following operations happen:
+ * 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev.
+ * 2) Tnl device's egress BPF program is triggered and set the tunnel metadata,
+ * with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose
+ * the primary or secondary ip of veth1 as the local ip of tunnel. The
+ * choice is made based on the value of bpf map local_ip_map.
+ * 3) Outer tunnel header is prepended and route the packet to veth1's egress.
+ * 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0.
+ * 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet.
+ * 6) Forward the packet to the overlay tnl dev.
+ */
+
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+#include <linux/if_tun.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/time_types.h>
+#include <linux/net_tstamp.h>
+#include <net/if.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tunnel_kern.skel.h"
+
+#define IP4_ADDR_VETH0 "172.16.1.100"
+#define IP4_ADDR1_VETH1 "172.16.1.200"
+#define IP4_ADDR2_VETH1 "172.16.1.20"
+#define IP4_ADDR_TUNL_DEV0 "10.1.1.100"
+#define IP4_ADDR_TUNL_DEV1 "10.1.1.200"
+#define IP6_ADDR_TUNL_DEV0 "fc80::100"
+#define IP6_ADDR_TUNL_DEV1 "fc80::200"
+
+#define IP6_ADDR_VETH0 "::11"
+#define IP6_ADDR1_VETH1 "::22"
+#define IP6_ADDR2_VETH1 "::bb"
+
+#define IP4_ADDR1_HEX_VETH1 0xac1001c8
+#define IP4_ADDR2_HEX_VETH1 0xac100114
+#define IP6_ADDR1_HEX_VETH1 0x22
+#define IP6_ADDR2_HEX_VETH1 0xbb
+
+#define MAC_TUNL_DEV0 "52:54:00:d9:01:00"
+#define MAC_TUNL_DEV1 "52:54:00:d9:02:00"
+#define MAC_VETH1 "52:54:00:d9:03:00"
+
+#define VXLAN_TUNL_DEV0 "vxlan00"
+#define VXLAN_TUNL_DEV1 "vxlan11"
+#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00"
+#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11"
+
+#define IPIP_TUNL_DEV0 "ipip00"
+#define IPIP_TUNL_DEV1 "ipip11"
+
+#define XFRM_AUTH "0x1111111111111111111111111111111111111111"
+#define XFRM_ENC "0x22222222222222222222222222222222"
+#define XFRM_SPI_IN_TO_OUT 0x1
+#define XFRM_SPI_OUT_TO_IN 0x2
+
+#define GRE_TUNL_DEV0 "gre00"
+#define GRE_TUNL_DEV1 "gre11"
+
+#define IP6GRE_TUNL_DEV0 "ip6gre00"
+#define IP6GRE_TUNL_DEV1 "ip6gre11"
+
+#define ERSPAN_TUNL_DEV0 "erspan00"
+#define ERSPAN_TUNL_DEV1 "erspan11"
+
+#define IP6ERSPAN_TUNL_DEV0 "ip6erspan00"
+#define IP6ERSPAN_TUNL_DEV1 "ip6erspan11"
+
+#define GENEVE_TUNL_DEV0 "geneve00"
+#define GENEVE_TUNL_DEV1 "geneve11"
+
+#define IP6GENEVE_TUNL_DEV0 "ip6geneve00"
+#define IP6GENEVE_TUNL_DEV1 "ip6geneve11"
+
+#define IP6TNL_TUNL_DEV0 "ip6tnl00"
+#define IP6TNL_TUNL_DEV1 "ip6tnl11"
+
+#define PING_ARGS "-i 0.01 -c 3 -w 10 -q"
+
+static int config_device(void)
+{
+ SYS(fail, "ip netns add at_ns0");
+ SYS(fail, "ip link add veth0 address " MAC_VETH1 " type veth peer name veth1");
+ SYS(fail, "ip link set veth0 netns at_ns0");
+ SYS(fail, "ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1");
+ SYS(fail, "ip link set dev veth1 up mtu 1500");
+ SYS(fail, "ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0");
+ SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up mtu 1500");
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void cleanup(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0");
+ SYS_NOFAIL("ip link del veth1");
+ SYS_NOFAIL("ip link del %s", VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del %s", IP6VXLAN_TUNL_DEV1);
+}
+
+static int add_vxlan_tunnel(void)
+{
+ /* at_ns0 namespace */
+ SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789",
+ VXLAN_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up",
+ VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24",
+ VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s",
+ IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0",
+ IP4_ADDR2_VETH1, MAC_VETH1);
+
+ /* root namespace */
+ SYS(fail, "ip link add dev %s type vxlan external gbp dstport 4789",
+ VXLAN_TUNL_DEV1);
+ SYS(fail, "ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
+ SYS(fail, "ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
+ SYS(fail, "ip neigh add %s lladdr %s dev %s",
+ IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_vxlan_tunnel(void)
+{
+ SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
+ VXLAN_TUNL_DEV0);
+ SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1);
+}
+
+static int add_ip6vxlan_tunnel(void)
+{
+ SYS(fail, "ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0",
+ IP6_ADDR_VETH0);
+ SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up");
+ SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1);
+ SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1);
+ SYS(fail, "ip link set dev veth1 up");
+
+ /* at_ns0 namespace */
+ SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789",
+ IP6VXLAN_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24",
+ IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up",
+ IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
+
+ /* root namespace */
+ SYS(fail, "ip link add dev %s type vxlan external dstport 4789",
+ IP6VXLAN_TUNL_DEV1);
+ SYS(fail, "ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
+ SYS(fail, "ip link set dev %s address %s up",
+ IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_ip6vxlan_tunnel(void)
+{
+ SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0",
+ IP6_ADDR_VETH0);
+ SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1);
+ SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1);
+ SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
+ IP6VXLAN_TUNL_DEV0);
+ SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1);
+}
+
+enum ipip_encap {
+ NONE = 0,
+ FOU = 1,
+ GUE = 2,
+};
+
+static int set_ipip_encap(const char *ipproto, const char *type)
+{
+ SYS(fail, "ip -n at_ns0 fou add port 5555 %s", ipproto);
+ SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap %s",
+ IPIP_TUNL_DEV0, type);
+ SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap-dport 5555",
+ IPIP_TUNL_DEV0);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int set_ipv4_addr(const char *dev0, const char *dev1)
+{
+ SYS(fail, "ip -n at_ns0 link set dev %s up", dev0);
+ SYS(fail, "ip -n at_ns0 addr add dev %s %s/24", dev0, IP4_ADDR_TUNL_DEV0);
+ SYS(fail, "ip link set dev %s up", dev1);
+ SYS(fail, "ip addr add dev %s %s/24", dev1, IP4_ADDR_TUNL_DEV1);
+
+ return 0;
+fail:
+ return 1;
+}
+
+static int add_ipip_tunnel(enum ipip_encap encap)
+{
+ int err;
+ const char *ipproto, *type;
+
+ switch (encap) {
+ case FOU:
+ ipproto = "ipproto 4";
+ type = "fou";
+ break;
+ case GUE:
+ ipproto = "gue";
+ type = ipproto;
+ break;
+ default:
+ ipproto = NULL;
+ type = ipproto;
+ }
+
+ /* at_ns0 namespace */
+ SYS(fail, "ip -n at_ns0 link add dev %s type ipip local %s remote %s",
+ IPIP_TUNL_DEV0, IP4_ADDR_VETH0, IP4_ADDR1_VETH1);
+
+ if (type && ipproto) {
+ err = set_ipip_encap(ipproto, type);
+ if (!ASSERT_OK(err, "set_ipip_encap"))
+ goto fail;
+ }
+
+ SYS(fail, "ip -n at_ns0 link set dev %s up", IPIP_TUNL_DEV0);
+ SYS(fail, "ip -n at_ns0 addr add dev %s %s/24",
+ IPIP_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
+
+ /* root namespace */
+ if (type && ipproto)
+ SYS(fail, "ip fou add port 5555 %s", ipproto);
+ SYS(fail, "ip link add dev %s type ipip external", IPIP_TUNL_DEV1);
+ SYS(fail, "ip link set dev %s up", IPIP_TUNL_DEV1);
+ SYS(fail, "ip addr add dev %s %s/24", IPIP_TUNL_DEV1,
+ IP4_ADDR_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_ipip_tunnel(void)
+{
+ SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0);
+ SYS_NOFAIL("ip -n at_ns0 fou del port 5555");
+ SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1);
+ SYS_NOFAIL("ip fou del port 5555");
+}
+
+static int add_xfrm_tunnel(void)
+{
+ /* at_ns0 namespace
+ * at_ns0 -> root
+ */
+ SYS(fail,
+ "ip netns exec at_ns0 "
+ "ip xfrm state add src %s dst %s proto esp "
+ "spi %d reqid 1 mode tunnel replay-window 42 "
+ "auth-trunc 'hmac(sha1)' %s 96 enc 'cbc(aes)' %s",
+ IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT, XFRM_AUTH, XFRM_ENC);
+ SYS(fail,
+ "ip netns exec at_ns0 "
+ "ip xfrm policy add src %s/32 dst %s/32 dir out "
+ "tmpl src %s dst %s proto esp reqid 1 "
+ "mode tunnel",
+ IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1, IP4_ADDR_VETH0, IP4_ADDR1_VETH1);
+
+ /* root -> at_ns0 */
+ SYS(fail,
+ "ip netns exec at_ns0 "
+ "ip xfrm state add src %s dst %s proto esp "
+ "spi %d reqid 2 mode tunnel "
+ "auth-trunc 'hmac(sha1)' %s 96 enc 'cbc(aes)' %s",
+ IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN, XFRM_AUTH, XFRM_ENC);
+ SYS(fail,
+ "ip netns exec at_ns0 "
+ "ip xfrm policy add src %s/32 dst %s/32 dir in "
+ "tmpl src %s dst %s proto esp reqid 2 "
+ "mode tunnel",
+ IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0, IP4_ADDR1_VETH1, IP4_ADDR_VETH0);
+
+ /* address & route */
+ SYS(fail, "ip netns exec at_ns0 ip addr add dev veth0 %s/32",
+ IP4_ADDR_TUNL_DEV0);
+ SYS(fail, "ip netns exec at_ns0 ip route add %s dev veth0 via %s src %s",
+ IP4_ADDR_TUNL_DEV1, IP4_ADDR1_VETH1, IP4_ADDR_TUNL_DEV0);
+
+ /* root namespace
+ * at_ns0 -> root
+ */
+ SYS(fail,
+ "ip xfrm state add src %s dst %s proto esp "
+ "spi %d reqid 1 mode tunnel replay-window 42 "
+ "auth-trunc 'hmac(sha1)' %s 96 enc 'cbc(aes)' %s",
+ IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT, XFRM_AUTH, XFRM_ENC);
+ SYS(fail,
+ "ip xfrm policy add src %s/32 dst %s/32 dir in "
+ "tmpl src %s dst %s proto esp reqid 1 "
+ "mode tunnel",
+ IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1, IP4_ADDR_VETH0, IP4_ADDR1_VETH1);
+
+ /* root -> at_ns0 */
+ SYS(fail,
+ "ip xfrm state add src %s dst %s proto esp "
+ "spi %d reqid 2 mode tunnel "
+ "auth-trunc 'hmac(sha1)' %s 96 enc 'cbc(aes)' %s",
+ IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN, XFRM_AUTH, XFRM_ENC);
+ SYS(fail,
+ "ip xfrm policy add src %s/32 dst %s/32 dir out "
+ "tmpl src %s dst %s proto esp reqid 2 "
+ "mode tunnel",
+ IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0, IP4_ADDR1_VETH1, IP4_ADDR_VETH0);
+
+ /* address & route */
+ SYS(fail, "ip addr add dev veth1 %s/32", IP4_ADDR_TUNL_DEV1);
+ SYS(fail, "ip route add %s dev veth1 via %s src %s",
+ IP4_ADDR_TUNL_DEV0, IP4_ADDR_VETH0, IP4_ADDR_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_xfrm_tunnel(void)
+{
+ SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32",
+ IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0);
+ SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32",
+ IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1);
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
+ IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT);
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
+ IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN);
+}
+
+static int add_ipv4_tunnel(const char *dev0, const char *dev1,
+ const char *type, const char *opt)
+{
+ if (!type || !opt || !dev0 || !dev1)
+ return -1;
+
+ SYS(fail, "ip -n at_ns0 link add dev %s type %s %s local %s remote %s",
+ dev0, type, opt, IP4_ADDR_VETH0, IP4_ADDR1_VETH1);
+
+ SYS(fail, "ip link add dev %s type %s external", dev1, type);
+
+ return set_ipv4_addr(dev0, dev1);
+fail:
+ return -1;
+}
+
+static void delete_tunnel(const char *dev0, const char *dev1)
+{
+ if (!dev0 || !dev1)
+ return;
+
+ SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", dev0);
+ SYS_NOFAIL("ip link delete dev %s", dev1);
+}
+
+static int set_ipv6_addr(const char *dev0, const char *dev1)
+{
+ /* disable IPv6 DAD because it might take too long and fail tests */
+ SYS(fail, "ip -n at_ns0 addr add %s/96 dev veth0 nodad", IP6_ADDR_VETH0);
+ SYS(fail, "ip -n at_ns0 link set dev veth0 up");
+ SYS(fail, "ip addr add %s/96 dev veth1 nodad", IP6_ADDR1_VETH1);
+ SYS(fail, "ip link set dev veth1 up");
+
+ SYS(fail, "ip -n at_ns0 addr add dev %s %s/24", dev0, IP4_ADDR_TUNL_DEV0);
+ SYS(fail, "ip -n at_ns0 addr add dev %s %s/96 nodad", dev0, IP6_ADDR_TUNL_DEV0);
+ SYS(fail, "ip -n at_ns0 link set dev %s up", dev0);
+
+ SYS(fail, "ip addr add dev %s %s/24", dev1, IP4_ADDR_TUNL_DEV1);
+ SYS(fail, "ip addr add dev %s %s/96 nodad", dev1, IP6_ADDR_TUNL_DEV1);
+ SYS(fail, "ip link set dev %s up", dev1);
+ return 0;
+fail:
+ return 1;
+}
+
+static int add_ipv6_tunnel(const char *dev0, const char *dev1,
+ const char *type, const char *opt)
+{
+ if (!type || !opt || !dev0 || !dev1)
+ return -1;
+
+ SYS(fail, "ip -n at_ns0 link add dev %s type %s %s local %s remote %s",
+ dev0, type, opt, IP6_ADDR_VETH0, IP6_ADDR1_VETH1);
+
+ SYS(fail, "ip link add dev %s type %s external", dev1, type);
+
+ return set_ipv6_addr(dev0, dev1);
+fail:
+ return -1;
+}
+
+static int add_geneve_tunnel(const char *dev0, const char *dev1,
+ const char *type, const char *opt)
+{
+ if (!type || !opt || !dev0 || !dev1)
+ return -1;
+
+ SYS(fail, "ip -n at_ns0 link add dev %s type %s id 2 %s remote %s",
+ dev0, type, opt, IP4_ADDR1_VETH1);
+
+ SYS(fail, "ip link add dev %s type %s %s external", dev1, type, opt);
+
+ return set_ipv4_addr(dev0, dev1);
+fail:
+ return -1;
+}
+
+static int add_ip6geneve_tunnel(const char *dev0, const char *dev1,
+ const char *type, const char *opt)
+{
+ if (!type || !opt || !dev0 || !dev1)
+ return -1;
+
+ SYS(fail, "ip -n at_ns0 link add dev %s type %s id 22 %s remote %s",
+ dev0, type, opt, IP6_ADDR1_VETH1);
+
+ SYS(fail, "ip link add dev %s type %s %s external", dev1, type, opt);
+
+ return set_ipv6_addr(dev0, dev1);
+fail:
+ return -1;
+}
+
+static int test_ping(int family, const char *addr)
+{
+ SYS(fail, "%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr);
+ return 0;
+fail:
+ return -1;
+}
+
+static void ping_dev0(void)
+{
+ /* ping from root namespace test */
+ test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
+}
+
+static void ping_dev1(void)
+{
+ struct nstoken *nstoken;
+
+ /* ping from at_ns0 namespace test */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ return;
+
+ test_ping(AF_INET, IP4_ADDR_TUNL_DEV1);
+ close_netns(nstoken);
+}
+
+static void ping6_veth0(void)
+{
+ test_ping(AF_INET6, IP6_ADDR_VETH0);
+}
+
+static void ping6_dev0(void)
+{
+ test_ping(AF_INET6, IP6_ADDR_TUNL_DEV0);
+}
+
+static void ping6_dev1(void)
+{
+ struct nstoken *nstoken;
+
+ /* ping from at_ns0 namespace test */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ return;
+
+ test_ping(AF_INET, IP6_ADDR_TUNL_DEV1);
+ close_netns(nstoken);
+}
+
+static void test_vxlan_tunnel(void)
+{
+ struct test_tunnel_kern *skel = NULL;
+ struct nstoken *nstoken;
+ int local_ip_map_fd = -1;
+ int set_src_prog_fd, get_src_prog_fd;
+ int set_dst_prog_fd;
+ int key = 0;
+ uint local_ip;
+ int err;
+
+ /* add vxlan tunnel */
+ err = add_vxlan_tunnel();
+ if (!ASSERT_OK(err, "add vxlan tunnel"))
+ goto done;
+
+ /* load and attach bpf prog to tunnel dev tc hook point */
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+ get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src);
+ set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src);
+ if (tc_prog_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ goto done;
+
+ /* load and attach bpf prog to veth dev tc hook point */
+ set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
+ if (tc_prog_attach("veth1", set_dst_prog_fd, -1))
+ goto done;
+
+ /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst);
+ if (tc_prog_attach(VXLAN_TUNL_DEV0, -1, set_dst_prog_fd))
+ goto done;
+ close_netns(nstoken);
+
+ /* use veth1 ip 2 as tunnel source ip */
+ local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
+ if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd"))
+ goto done;
+ local_ip = IP4_ADDR2_HEX_VETH1;
+ err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
+ if (!ASSERT_OK(err, "update bpf local_ip_map"))
+ goto done;
+
+ /* ping test */
+ ping_dev0();
+
+done:
+ /* delete vxlan tunnel */
+ delete_vxlan_tunnel();
+ if (local_ip_map_fd >= 0)
+ close(local_ip_map_fd);
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_ip6vxlan_tunnel(void)
+{
+ struct test_tunnel_kern *skel = NULL;
+ struct nstoken *nstoken;
+ int local_ip_map_fd = -1;
+ int set_src_prog_fd, get_src_prog_fd;
+ int set_dst_prog_fd;
+ int key = 0;
+ uint local_ip;
+ int err;
+
+ /* add vxlan tunnel */
+ err = add_ip6vxlan_tunnel();
+ if (!ASSERT_OK(err, "add_ip6vxlan_tunnel"))
+ goto done;
+
+ /* load and attach bpf prog to tunnel dev tc hook point */
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+ get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src);
+ set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src);
+ if (tc_prog_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ goto done;
+
+ /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst);
+ if (tc_prog_attach(IP6VXLAN_TUNL_DEV0, -1, set_dst_prog_fd))
+ goto done;
+ close_netns(nstoken);
+
+ /* use veth1 ip 2 as tunnel source ip */
+ local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
+ if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd"))
+ goto done;
+ local_ip = IP6_ADDR2_HEX_VETH1;
+ err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
+ if (!ASSERT_OK(err, "update bpf local_ip_map"))
+ goto done;
+
+ /* ping test */
+ ping_dev0();
+
+done:
+ /* delete ipv6 vxlan tunnel */
+ delete_ip6vxlan_tunnel();
+ if (local_ip_map_fd >= 0)
+ close(local_ip_map_fd);
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_ipip_tunnel(enum ipip_encap encap)
+{
+ struct test_tunnel_kern *skel = NULL;
+ int set_src_prog_fd, get_src_prog_fd;
+ int err;
+
+ /* add ipip tunnel */
+ err = add_ipip_tunnel(encap);
+ if (!ASSERT_OK(err, "add_ipip_tunnel"))
+ goto done;
+
+ /* load and attach bpf prog to tunnel dev tc hook point */
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+
+ switch (encap) {
+ case FOU:
+ get_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_encap_get_tunnel);
+ set_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_fou_set_tunnel);
+ break;
+ case GUE:
+ get_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_encap_get_tunnel);
+ set_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_gue_set_tunnel);
+ break;
+ default:
+ get_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_get_tunnel);
+ set_src_prog_fd = bpf_program__fd(
+ skel->progs.ipip_set_tunnel);
+ }
+
+ if (tc_prog_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ goto done;
+
+ ping_dev0();
+ ping_dev1();
+
+done:
+ /* delete ipip tunnel */
+ delete_ipip_tunnel();
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_xfrm_tunnel(void)
+{
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
+ struct test_tunnel_kern *skel = NULL;
+ int xdp_prog_fd;
+ int tc_prog_fd;
+ int ifindex;
+ int err;
+
+ err = add_xfrm_tunnel();
+ if (!ASSERT_OK(err, "add_xfrm_tunnel"))
+ return;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+
+
+ /* attach tc prog to tunnel dev */
+ tc_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state);
+ if (tc_prog_attach("veth1", tc_prog_fd, -1))
+ goto done;
+
+ /* attach xdp prog to tunnel dev */
+ ifindex = if_nametoindex("veth1");
+ if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex"))
+ goto done;
+ xdp_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state_xdp);
+ if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ err = bpf_xdp_attach(ifindex, xdp_prog_fd, XDP_FLAGS_REPLACE, &opts);
+ if (!ASSERT_OK(err, "bpf_xdp_attach"))
+ goto done;
+
+ ping_dev1();
+
+ if (!ASSERT_EQ(skel->bss->xfrm_reqid, 1, "req_id"))
+ goto done;
+ if (!ASSERT_EQ(skel->bss->xfrm_spi, XFRM_SPI_IN_TO_OUT, "spi"))
+ goto done;
+ if (!ASSERT_EQ(skel->bss->xfrm_remote_ip, 0xac100164, "remote_ip"))
+ goto done;
+ if (!ASSERT_EQ(skel->bss->xfrm_replay_window, 42, "replay_window"))
+ goto done;
+
+done:
+ delete_xfrm_tunnel();
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+enum gre_test {
+ GRE,
+ GRE_NOKEY,
+ GRETAP,
+ GRETAP_NOKEY,
+};
+
+static void test_gre_tunnel(enum gre_test test)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ switch (test) {
+ case GRE:
+ err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gre", "seq");
+ set_fd = bpf_program__fd(skel->progs.gre_set_tunnel_no_key);
+ get_fd = bpf_program__fd(skel->progs.gre_get_tunnel);
+ break;
+ case GRE_NOKEY:
+ err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gre", "seq key 2");
+ set_fd = bpf_program__fd(skel->progs.gre_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.gre_get_tunnel);
+ break;
+ case GRETAP:
+ err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gretap", "seq");
+ set_fd = bpf_program__fd(skel->progs.gre_set_tunnel_no_key);
+ get_fd = bpf_program__fd(skel->progs.gre_get_tunnel);
+ break;
+ case GRETAP_NOKEY:
+ err = add_ipv4_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1, "gretap", "seq key 2");
+ set_fd = bpf_program__fd(skel->progs.gre_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.gre_get_tunnel);
+ break;
+ }
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ if (tc_prog_attach(GRE_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping_dev0();
+ ping_dev1();
+
+done:
+ delete_tunnel(GRE_TUNL_DEV0, GRE_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+enum ip6gre_test {
+ IP6GRE,
+ IP6GRETAP
+};
+
+static void test_ip6gre_tunnel(enum ip6gre_test test)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ switch (test) {
+ case IP6GRE:
+ err = add_ipv6_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1,
+ "ip6gre", "flowlabel 0xbcdef key 2");
+ break;
+ case IP6GRETAP:
+ err = add_ipv6_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1,
+ "ip6gretap", "flowlabel 0xbcdef key 2");
+ break;
+ }
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ set_fd = bpf_program__fd(skel->progs.ip6gretap_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.ip6gretap_get_tunnel);
+ if (tc_prog_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping6_veth0();
+ ping6_dev1();
+ ping_dev0();
+ ping_dev1();
+done:
+ delete_tunnel(IP6GRE_TUNL_DEV0, IP6GRE_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+enum erspan_test {
+ V1,
+ V2
+};
+
+static void test_erspan_tunnel(enum erspan_test test)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ switch (test) {
+ case V1:
+ err = add_ipv4_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1,
+ "erspan", "seq key 2 erspan_ver 1 erspan 123");
+ break;
+ case V2:
+ err = add_ipv4_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1,
+ "erspan",
+ "seq key 2 erspan_ver 2 erspan_dir egress erspan_hwid 3");
+ break;
+ }
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ set_fd = bpf_program__fd(skel->progs.erspan_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.erspan_get_tunnel);
+ if (tc_prog_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping_dev0();
+ ping_dev1();
+done:
+ delete_tunnel(ERSPAN_TUNL_DEV0, ERSPAN_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_ip6erspan_tunnel(enum erspan_test test)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ switch (test) {
+ case V1:
+ err = add_ipv6_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1,
+ "ip6erspan", "seq key 2 erspan_ver 1 erspan 123");
+ break;
+ case V2:
+ err = add_ipv6_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1,
+ "ip6erspan",
+ "seq key 2 erspan_ver 2 erspan_dir egress erspan_hwid 7");
+ break;
+ }
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ set_fd = bpf_program__fd(skel->progs.ip4ip6erspan_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.ip4ip6erspan_get_tunnel);
+ if (tc_prog_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping6_veth0();
+ ping_dev1();
+done:
+ delete_tunnel(IP6ERSPAN_TUNL_DEV0, IP6ERSPAN_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_geneve_tunnel(void)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ err = add_geneve_tunnel(GENEVE_TUNL_DEV0, GENEVE_TUNL_DEV1,
+ "geneve", "dstport 6081");
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ set_fd = bpf_program__fd(skel->progs.geneve_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.geneve_get_tunnel);
+ if (tc_prog_attach(GENEVE_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping_dev0();
+ ping_dev1();
+done:
+ delete_tunnel(GENEVE_TUNL_DEV0, GENEVE_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_ip6geneve_tunnel(void)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ err = add_ip6geneve_tunnel(IP6GENEVE_TUNL_DEV0, IP6GENEVE_TUNL_DEV1,
+ "geneve", "");
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ set_fd = bpf_program__fd(skel->progs.ip6geneve_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.ip6geneve_get_tunnel);
+ if (tc_prog_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping_dev0();
+ ping_dev1();
+done:
+ delete_tunnel(IP6GENEVE_TUNL_DEV0, IP6GENEVE_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+enum ip6tnl_test {
+ IPIP6,
+ IP6IP6
+};
+
+static void test_ip6tnl_tunnel(enum ip6tnl_test test)
+{
+ struct test_tunnel_kern *skel;
+ int set_fd, get_fd;
+ int err;
+
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ return;
+
+ err = add_ipv6_tunnel(IP6TNL_TUNL_DEV0, IP6TNL_TUNL_DEV1, "ip6tnl", "");
+ if (!ASSERT_OK(err, "add tunnel"))
+ goto done;
+
+ switch (test) {
+ case IPIP6:
+ set_fd = bpf_program__fd(skel->progs.ipip6_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.ipip6_get_tunnel);
+ break;
+ case IP6IP6:
+ set_fd = bpf_program__fd(skel->progs.ip6ip6_set_tunnel);
+ get_fd = bpf_program__fd(skel->progs.ip6ip6_get_tunnel);
+ break;
+ }
+ if (tc_prog_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd))
+ goto done;
+
+ ping6_veth0();
+ switch (test) {
+ case IPIP6:
+ ping_dev0();
+ ping_dev1();
+ break;
+ case IP6IP6:
+ ping6_dev0();
+ ping6_dev1();
+ break;
+ }
+
+done:
+ delete_tunnel(IP6TNL_TUNL_DEV0, IP6TNL_TUNL_DEV1);
+ test_tunnel_kern__destroy(skel);
+}
+
+#define RUN_TEST(name, ...) \
+ ({ \
+ if (test__start_subtest(#name)) { \
+ config_device(); \
+ test_ ## name(__VA_ARGS__); \
+ cleanup(); \
+ } \
+ })
+
+static void *test_tunnel_run_tests(void *arg)
+{
+ RUN_TEST(vxlan_tunnel);
+ RUN_TEST(ip6vxlan_tunnel);
+ RUN_TEST(ipip_tunnel, NONE);
+ RUN_TEST(ipip_tunnel, FOU);
+ RUN_TEST(ipip_tunnel, GUE);
+ RUN_TEST(xfrm_tunnel);
+ RUN_TEST(gre_tunnel, GRE);
+ RUN_TEST(gre_tunnel, GRE_NOKEY);
+ RUN_TEST(gre_tunnel, GRETAP);
+ RUN_TEST(gre_tunnel, GRETAP_NOKEY);
+ RUN_TEST(ip6gre_tunnel, IP6GRE);
+ RUN_TEST(ip6gre_tunnel, IP6GRETAP);
+ RUN_TEST(erspan_tunnel, V1);
+ RUN_TEST(erspan_tunnel, V2);
+ RUN_TEST(ip6erspan_tunnel, V1);
+ RUN_TEST(ip6erspan_tunnel, V2);
+ RUN_TEST(geneve_tunnel);
+ RUN_TEST(ip6geneve_tunnel);
+ RUN_TEST(ip6tnl_tunnel, IPIP6);
+ RUN_TEST(ip6tnl_tunnel, IP6IP6);
+
+ return NULL;
+}
+
+void test_tunnel(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_veristat.c b/tools/testing/selftests/bpf/prog_tests/test_veristat.c
new file mode 100644
index 000000000000..b38c16b4247f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_veristat.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+
+#define __CHECK_STR(str, name) \
+ do { \
+ if (!ASSERT_HAS_SUBSTR(fix->output, (str), (name))) \
+ goto out; \
+ } while (0)
+
+struct fixture {
+ char tmpfile[80];
+ int fd;
+ char *output;
+ size_t sz;
+ char veristat[80];
+};
+
+static struct fixture *init_fixture(void)
+{
+ struct fixture *fix = malloc(sizeof(struct fixture));
+
+ /* for no_alu32 and cpuv4 veristat is in parent folder */
+ if (access("./veristat", F_OK) == 0)
+ strcpy(fix->veristat, "./veristat");
+ else if (access("../veristat", F_OK) == 0)
+ strcpy(fix->veristat, "../veristat");
+ else
+ PRINT_FAIL("Can't find veristat binary");
+
+ snprintf(fix->tmpfile, sizeof(fix->tmpfile), "/tmp/test_veristat.XXXXXX");
+ fix->fd = mkstemp(fix->tmpfile);
+ fix->sz = 1000000;
+ fix->output = malloc(fix->sz);
+ return fix;
+}
+
+static void teardown_fixture(struct fixture *fix)
+{
+ free(fix->output);
+ close(fix->fd);
+ remove(fix->tmpfile);
+ free(fix);
+}
+
+static void test_set_global_vars_succeeds(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS(out,
+ "%s set_global_vars.bpf.o"\
+ " -G \"var_s64 = 0xf000000000000001\" "\
+ " -G \"var_u64 = 0xfedcba9876543210\" "\
+ " -G \"var_s32 = -0x80000000\" "\
+ " -G \"var_u32 = 0x76543210\" "\
+ " -G \"var_s16 = -32768\" "\
+ " -G \"var_u16 = 60652\" "\
+ " -G \"var_s8 = -128\" "\
+ " -G \"var_u8 = 255\" "\
+ " -G \"var_ea = EA2\" "\
+ " -G \"var_eb = EB2\" "\
+ " -G \"var_ec=EC2\" "\
+ " -G \"var_b = 1\" "\
+ " -G \"struct1[2].struct2[1][2].u.var_u8[2]=170\" "\
+ " -G \"union1.struct3.var_u8_l = 0xaa\" "\
+ " -G \"union1.struct3.var_u8_h = 0xaa\" "\
+ " -G \"arr[3]= 171\" " \
+ " -G \"arr[EA2] =172\" " \
+ " -G \"enum_arr[EC2]=EA3\" " \
+ " -G \"three_d[31][7][EA2]=173\"" \
+ " -G \"struct1[2].struct2[1][2].u.mat[5][3]=174\" " \
+ " -G \"struct11 [ 7 ] [ 5 ] .struct2[0][1].u.mat[3][0] = 175\" " \
+ " -vl2 > %s", fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("=0xf000000000000001 ", "var_s64 = 0xf000000000000001");
+ __CHECK_STR("=0xfedcba9876543210 ", "var_u64 = 0xfedcba9876543210");
+ __CHECK_STR("=0x80000000 ", "var_s32 = -0x80000000");
+ __CHECK_STR("=0x76543210 ", "var_u32 = 0x76543210");
+ __CHECK_STR("=0x8000 ", "var_s16 = -32768");
+ __CHECK_STR("=0xecec ", "var_u16 = 60652");
+ __CHECK_STR("=128 ", "var_s8 = -128");
+ __CHECK_STR("=255 ", "var_u8 = 255");
+ __CHECK_STR("=11 ", "var_ea = EA2");
+ __CHECK_STR("=12 ", "var_eb = EB2");
+ __CHECK_STR("=13 ", "var_ec = EC2");
+ __CHECK_STR("=1 ", "var_b = 1");
+ __CHECK_STR("=170 ", "struct1[2].struct2[1][2].u.var_u8[2]=170");
+ __CHECK_STR("=0xaaaa ", "union1.var_u16 = 0xaaaa");
+ __CHECK_STR("=171 ", "arr[3]= 171");
+ __CHECK_STR("=172 ", "arr[EA2] =172");
+ __CHECK_STR("=10 ", "enum_arr[EC2]=EA3");
+ __CHECK_STR("=173 ", "matrix[31][7][11]=173");
+ __CHECK_STR("=174 ", "struct1[2].struct2[1][2].u.mat[5][3]=174");
+ __CHECK_STR("=175 ", "struct11[7][5].struct2[0][1].u.mat[3][0]=175");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_set_global_vars_from_file_succeeds(void)
+{
+ struct fixture *fix = init_fixture();
+ char input_file[80];
+ const char *vars = "var_s16 = -32768\nvar_u16 = 60652";
+ int fd;
+
+ snprintf(input_file, sizeof(input_file), "/tmp/veristat_input.XXXXXX");
+ fd = mkstemp(input_file);
+ if (!ASSERT_GE(fd, 0, "valid fd"))
+ goto out;
+
+ write(fd, vars, strlen(vars));
+ syncfs(fd);
+ SYS(out, "%s set_global_vars.bpf.o -G \"@%s\" -vl2 > %s",
+ fix->veristat, input_file, fix->tmpfile);
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("=0x8000 ", "var_s16 = -32768");
+ __CHECK_STR("=0xecec ", "var_u16 = 60652");
+
+out:
+ close(fd);
+ remove(input_file);
+ teardown_fixture(fix);
+}
+
+static void test_set_global_vars_out_of_range(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"var_s32 = 2147483648\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("is out of range [-2147483648; 2147483647]", "out of range");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_unsupported_ptr_array_type(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"ptr_arr[0] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Can't set ptr_arr[0]. Only ints and enums are supported", "ptr_arr");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_out_of_bounds(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr[99] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Array index 99 is out of bounds", "arr[99]");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_index_not_found(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr[EG2] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Can't resolve enum value EG2", "arr[EG2]");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_index_for_non_array(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"var_b[0] = 1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Array index is not expected for var_b", "var_b[0] = 1");
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"union1.struct3[0].var_u8_l=1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Array index is not expected for struct3", "union1.struct3[0].var_u8_l=1");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_no_array_index_for_array(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr = 1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Can't set arr. Only ints and enums are supported", "arr = 1");
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"struct1[0].struct2.u.var_u8[2]=1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Can't resolve field u for non-composite type", "struct1[0].struct2.u.var_u8[2]=1");
+
+out:
+ teardown_fixture(fix);
+}
+
+void test_veristat(void)
+{
+ if (test__start_subtest("set_global_vars_succeeds"))
+ test_set_global_vars_succeeds();
+
+ if (test__start_subtest("set_global_vars_out_of_range"))
+ test_set_global_vars_out_of_range();
+
+ if (test__start_subtest("set_global_vars_from_file_succeeds"))
+ test_set_global_vars_from_file_succeeds();
+
+ if (test__start_subtest("test_unsupported_ptr_array_type"))
+ test_unsupported_ptr_array_type();
+
+ if (test__start_subtest("test_array_out_of_bounds"))
+ test_array_out_of_bounds();
+
+ if (test__start_subtest("test_array_index_not_found"))
+ test_array_index_not_found();
+
+ if (test__start_subtest("test_array_index_for_non_array"))
+ test_array_index_for_non_array();
+
+ if (test__start_subtest("test_no_array_index_for_array"))
+ test_no_array_index_for_array();
+
+}
+
+#undef __CHECK_STR
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
new file mode 100644
index 000000000000..3e98a1665936
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Create 3 namespaces with 3 veth peers, and forward packets in-between using
+ * native XDP
+ *
+ * Network topology:
+ * ---------- ---------- ----------
+ * | NS1 | | NS2 | | NS3 |
+ * | veth11 | | veth22 | | veth33 |
+ * ----|----- -----|---- -----|----
+ * | | |
+ * ----|------------------|----------------|----
+ * | veth1 veth2 veth3 |
+ * | |
+ * | NSO |
+ * ---------------------------------------------
+ *
+ * Test cases:
+ * - [test_xdp_veth_redirect] : ping veth33 from veth11
+ *
+ * veth11 veth22 veth33
+ * (XDP_PASS) (XDP_TX) (XDP_PASS)
+ * | | |
+ * | | |
+ * veth1 veth2 veth3
+ * (XDP_REDIRECT) (XDP_REDIRECT) (XDP_REDIRECT)
+ * ^ | ^ | ^ |
+ * | | | | | |
+ * | ------------------ ------------------ |
+ * -----------------------------------------
+ *
+ * - [test_xdp_veth_broadcast_redirect]: broadcast from veth11
+ * - IPv4 ping : BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS
+ * -> echo request received by all except veth11
+ * - IPv4 ping : BPF_F_BROADCAST
+ * -> echo request received by all veth
+ * - [test_xdp_veth_egress]:
+ * - all src mac should be the magic mac
+ *
+ * veth11 veth22 veth33
+ * (XDP_PASS) (XDP_PASS) (XDP_PASS)
+ * | | |
+ * | | |
+ * veth1 veth2 veth3
+ * (XDP_REDIRECT) (XDP_REDIRECT) (XDP_REDIRECT)
+ * | ^ ^
+ * | | |
+ * ----------------------------------------
+ *
+ */
+
+#define _GNU_SOURCE
+#include <net/if.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "xdp_dummy.skel.h"
+#include "xdp_redirect_map.skel.h"
+#include "xdp_redirect_multi_kern.skel.h"
+#include "xdp_tx.skel.h"
+#include <uapi/linux/if_link.h>
+
+#define VETH_PAIRS_COUNT 3
+#define VETH_NAME_MAX_LEN 32
+#define IP_MAX_LEN 16
+#define IP_SRC "10.1.1.11"
+#define IP_DST "10.1.1.33"
+#define IP_NEIGH "10.1.1.253"
+#define PROG_NAME_MAX_LEN 128
+#define NS_NAME_MAX_LEN 32
+
+struct veth_configuration {
+ char local_veth[VETH_NAME_MAX_LEN]; /* Interface in main namespace */
+ char remote_veth[VETH_NAME_MAX_LEN]; /* Peer interface in dedicated namespace*/
+ char namespace[NS_NAME_MAX_LEN]; /* Namespace for the remote veth */
+ int next_veth; /* Local interface to redirect traffic to */
+ char remote_addr[IP_MAX_LEN]; /* IP address of the remote veth */
+};
+
+struct net_configuration {
+ char ns0_name[NS_NAME_MAX_LEN];
+ struct veth_configuration veth_cfg[VETH_PAIRS_COUNT];
+};
+
+static const struct net_configuration default_config = {
+ .ns0_name = "ns0-",
+ {
+ {
+ .local_veth = "veth1-",
+ .remote_veth = "veth11",
+ .next_veth = 1,
+ .remote_addr = IP_SRC,
+ .namespace = "ns-veth11-"
+ },
+ {
+ .local_veth = "veth2-",
+ .remote_veth = "veth22",
+ .next_veth = 2,
+ .remote_addr = "",
+ .namespace = "ns-veth22-"
+ },
+ {
+ .local_veth = "veth3-",
+ .remote_veth = "veth33",
+ .next_veth = 0,
+ .remote_addr = IP_DST,
+ .namespace = "ns-veth33-"
+ }
+ }
+};
+
+struct prog_configuration {
+ char local_name[PROG_NAME_MAX_LEN]; /* BPF prog to attach to local_veth */
+ char remote_name[PROG_NAME_MAX_LEN]; /* BPF prog to attach to remote_veth */
+ u32 local_flags; /* XDP flags to use on local_veth */
+ u32 remote_flags; /* XDP flags to use on remote_veth */
+};
+
+static int attach_programs_to_veth_pair(struct bpf_object **objs, size_t nb_obj,
+ struct net_configuration *net_config,
+ struct prog_configuration *prog, int index)
+{
+ struct bpf_program *local_prog, *remote_prog;
+ struct nstoken *nstoken;
+ int interface, ret, i;
+
+ for (i = 0; i < nb_obj; i++) {
+ local_prog = bpf_object__find_program_by_name(objs[i], prog[index].local_name);
+ if (local_prog)
+ break;
+ }
+ if (!ASSERT_OK_PTR(local_prog, "find local program"))
+ return -1;
+
+ for (i = 0; i < nb_obj; i++) {
+ remote_prog = bpf_object__find_program_by_name(objs[i], prog[index].remote_name);
+ if (remote_prog)
+ break;
+ }
+ if (!ASSERT_OK_PTR(remote_prog, "find remote program"))
+ return -1;
+
+ interface = if_nametoindex(net_config->veth_cfg[index].local_veth);
+ if (!ASSERT_NEQ(interface, 0, "non zero interface index"))
+ return -1;
+
+ ret = bpf_xdp_attach(interface, bpf_program__fd(local_prog),
+ prog[index].local_flags, NULL);
+ if (!ASSERT_OK(ret, "attach xdp program to local veth"))
+ return -1;
+
+ nstoken = open_netns(net_config->veth_cfg[index].namespace);
+ if (!ASSERT_OK_PTR(nstoken, "switch to remote veth namespace"))
+ return -1;
+
+ interface = if_nametoindex(net_config->veth_cfg[index].remote_veth);
+ if (!ASSERT_NEQ(interface, 0, "non zero interface index")) {
+ close_netns(nstoken);
+ return -1;
+ }
+
+ ret = bpf_xdp_attach(interface, bpf_program__fd(remote_prog),
+ prog[index].remote_flags, NULL);
+ if (!ASSERT_OK(ret, "attach xdp program to remote veth")) {
+ close_netns(nstoken);
+ return -1;
+ }
+
+ close_netns(nstoken);
+ return 0;
+}
+
+static int create_network(struct net_configuration *net_config)
+{
+ struct nstoken *nstoken = NULL;
+ int i, err;
+
+ memcpy(net_config, &default_config, sizeof(struct net_configuration));
+
+ /* Create unique namespaces */
+ err = append_tid(net_config->ns0_name, NS_NAME_MAX_LEN);
+ if (!ASSERT_OK(err, "append TID to ns0 name"))
+ goto fail;
+ SYS(fail, "ip netns add %s", net_config->ns0_name);
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ err = append_tid(net_config->veth_cfg[i].namespace, NS_NAME_MAX_LEN);
+ if (!ASSERT_OK(err, "append TID to ns name"))
+ goto fail;
+ SYS(fail, "ip netns add %s", net_config->veth_cfg[i].namespace);
+ }
+
+ /* Create interfaces */
+ nstoken = open_netns(net_config->ns0_name);
+ if (!nstoken)
+ goto fail;
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ SYS(fail, "ip link add %s type veth peer name %s netns %s",
+ net_config->veth_cfg[i].local_veth, net_config->veth_cfg[i].remote_veth,
+ net_config->veth_cfg[i].namespace);
+ SYS(fail, "ip link set dev %s up", net_config->veth_cfg[i].local_veth);
+ if (net_config->veth_cfg[i].remote_addr[0])
+ SYS(fail, "ip -n %s addr add %s/24 dev %s",
+ net_config->veth_cfg[i].namespace,
+ net_config->veth_cfg[i].remote_addr,
+ net_config->veth_cfg[i].remote_veth);
+ SYS(fail, "ip -n %s link set dev %s up", net_config->veth_cfg[i].namespace,
+ net_config->veth_cfg[i].remote_veth);
+ }
+
+ close_netns(nstoken);
+ return 0;
+
+fail:
+ close_netns(nstoken);
+ return -1;
+}
+
+static void cleanup_network(struct net_configuration *net_config)
+{
+ int i;
+
+ SYS_NOFAIL("ip netns del %s", net_config->ns0_name);
+ for (i = 0; i < VETH_PAIRS_COUNT; i++)
+ SYS_NOFAIL("ip netns del %s", net_config->veth_cfg[i].namespace);
+}
+
+#define VETH_REDIRECT_SKEL_NB 3
+static void xdp_veth_redirect(u32 flags)
+{
+ struct prog_configuration ping_config[VETH_PAIRS_COUNT] = {
+ {
+ .local_name = "xdp_redirect_map_0",
+ .remote_name = "xdp_dummy_prog",
+ .local_flags = flags,
+ .remote_flags = flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_1",
+ .remote_name = "xdp_tx",
+ .local_flags = flags,
+ .remote_flags = flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_2",
+ .remote_name = "xdp_dummy_prog",
+ .local_flags = flags,
+ .remote_flags = flags,
+ }
+ };
+ struct bpf_object *bpf_objs[VETH_REDIRECT_SKEL_NB];
+ struct xdp_redirect_map *xdp_redirect_map;
+ struct net_configuration net_config;
+ struct nstoken *nstoken = NULL;
+ struct xdp_dummy *xdp_dummy;
+ struct xdp_tx *xdp_tx;
+ int map_fd;
+ int i;
+
+ xdp_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_dummy, "xdp_dummy__open_and_load"))
+ return;
+
+ xdp_tx = xdp_tx__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_tx, "xdp_tx__open_and_load"))
+ goto destroy_xdp_dummy;
+
+ xdp_redirect_map = xdp_redirect_map__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load"))
+ goto destroy_xdp_tx;
+
+ if (!ASSERT_OK(create_network(&net_config), "create network"))
+ goto destroy_xdp_redirect_map;
+
+ /* Then configure the redirect map and attach programs to interfaces */
+ map_fd = bpf_map__fd(xdp_redirect_map->maps.tx_port);
+ if (!ASSERT_OK_FD(map_fd, "open redirect map"))
+ goto destroy_xdp_redirect_map;
+
+ bpf_objs[0] = xdp_dummy->obj;
+ bpf_objs[1] = xdp_tx->obj;
+ bpf_objs[2] = xdp_redirect_map->obj;
+
+ nstoken = open_netns(net_config.ns0_name);
+ if (!ASSERT_OK_PTR(nstoken, "open NS0"))
+ goto destroy_xdp_redirect_map;
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ int next_veth = net_config.veth_cfg[i].next_veth;
+ int interface_id;
+ int err;
+
+ interface_id = if_nametoindex(net_config.veth_cfg[next_veth].local_veth);
+ if (!ASSERT_NEQ(interface_id, 0, "non zero interface index"))
+ goto destroy_xdp_redirect_map;
+ err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY);
+ if (!ASSERT_OK(err, "configure interface redirection through map"))
+ goto destroy_xdp_redirect_map;
+ if (attach_programs_to_veth_pair(bpf_objs, VETH_REDIRECT_SKEL_NB,
+ &net_config, ping_config, i))
+ goto destroy_xdp_redirect_map;
+ }
+
+ /* Test: if all interfaces are properly configured, we must be able to ping
+ * veth33 from veth11
+ */
+ ASSERT_OK(SYS_NOFAIL("ip netns exec %s ping -c 1 -W 1 %s > /dev/null",
+ net_config.veth_cfg[0].namespace, IP_DST), "ping");
+
+destroy_xdp_redirect_map:
+ close_netns(nstoken);
+ xdp_redirect_map__destroy(xdp_redirect_map);
+destroy_xdp_tx:
+ xdp_tx__destroy(xdp_tx);
+destroy_xdp_dummy:
+ xdp_dummy__destroy(xdp_dummy);
+
+ cleanup_network(&net_config);
+}
+
+#define BROADCAST_REDIRECT_SKEL_NB 2
+static void xdp_veth_broadcast_redirect(u32 attach_flags, u64 redirect_flags)
+{
+ struct prog_configuration prog_cfg[VETH_PAIRS_COUNT] = {
+ {
+ .local_name = "xdp_redirect_map_multi_prog",
+ .remote_name = "xdp_count_0",
+ .local_flags = attach_flags,
+ .remote_flags = attach_flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_multi_prog",
+ .remote_name = "xdp_count_1",
+ .local_flags = attach_flags,
+ .remote_flags = attach_flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_multi_prog",
+ .remote_name = "xdp_count_2",
+ .local_flags = attach_flags,
+ .remote_flags = attach_flags,
+ }
+ };
+ struct bpf_object *bpf_objs[BROADCAST_REDIRECT_SKEL_NB];
+ struct xdp_redirect_multi_kern *xdp_redirect_multi_kern;
+ struct xdp_redirect_map *xdp_redirect_map;
+ struct bpf_devmap_val devmap_val = {};
+ struct net_configuration net_config;
+ struct nstoken *nstoken = NULL;
+ u16 protocol = ETH_P_IP;
+ int group_map;
+ int flags_map;
+ int cnt_map;
+ u64 cnt = 0;
+ int i, err;
+
+ xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_redirect_multi_kern, "xdp_redirect_multi_kern__open_and_load"))
+ return;
+
+ xdp_redirect_map = xdp_redirect_map__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load"))
+ goto destroy_xdp_redirect_multi_kern;
+
+ if (!ASSERT_OK(create_network(&net_config), "create network"))
+ goto destroy_xdp_redirect_map;
+
+ group_map = bpf_map__fd(xdp_redirect_multi_kern->maps.map_all);
+ if (!ASSERT_OK_FD(group_map, "open map_all"))
+ goto destroy_xdp_redirect_map;
+
+ flags_map = bpf_map__fd(xdp_redirect_multi_kern->maps.redirect_flags);
+ if (!ASSERT_OK_FD(group_map, "open map_all"))
+ goto destroy_xdp_redirect_map;
+
+ err = bpf_map_update_elem(flags_map, &protocol, &redirect_flags, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "init IP count"))
+ goto destroy_xdp_redirect_map;
+
+ cnt_map = bpf_map__fd(xdp_redirect_map->maps.rxcnt);
+ if (!ASSERT_OK_FD(cnt_map, "open rxcnt map"))
+ goto destroy_xdp_redirect_map;
+
+ bpf_objs[0] = xdp_redirect_multi_kern->obj;
+ bpf_objs[1] = xdp_redirect_map->obj;
+
+ nstoken = open_netns(net_config.ns0_name);
+ if (!ASSERT_OK_PTR(nstoken, "open NS0"))
+ goto destroy_xdp_redirect_map;
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ int ifindex = if_nametoindex(net_config.veth_cfg[i].local_veth);
+
+ if (attach_programs_to_veth_pair(bpf_objs, BROADCAST_REDIRECT_SKEL_NB,
+ &net_config, prog_cfg, i))
+ goto destroy_xdp_redirect_map;
+
+ SYS(destroy_xdp_redirect_map,
+ "ip -n %s neigh add %s lladdr 00:00:00:00:00:01 dev %s",
+ net_config.veth_cfg[i].namespace, IP_NEIGH, net_config.veth_cfg[i].remote_veth);
+
+ devmap_val.ifindex = ifindex;
+ err = bpf_map_update_elem(group_map, &ifindex, &devmap_val, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto destroy_xdp_redirect_map;
+
+ }
+
+ SYS_NOFAIL("ip netns exec %s ping %s -i 0.1 -c 4 -W1 > /dev/null ",
+ net_config.veth_cfg[0].namespace, IP_NEIGH);
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ err = bpf_map_lookup_elem(cnt_map, &i, &cnt);
+ if (!ASSERT_OK(err, "get IP cnt"))
+ goto destroy_xdp_redirect_map;
+
+ if (redirect_flags & BPF_F_EXCLUDE_INGRESS)
+ /* veth11 shouldn't receive the ICMP requests;
+ * others should
+ */
+ ASSERT_EQ(cnt, i ? 4 : 0, "compare IP cnt");
+ else
+ /* All remote veth should receive the ICMP requests */
+ ASSERT_EQ(cnt, 4, "compare IP cnt");
+ }
+
+destroy_xdp_redirect_map:
+ close_netns(nstoken);
+ xdp_redirect_map__destroy(xdp_redirect_map);
+destroy_xdp_redirect_multi_kern:
+ xdp_redirect_multi_kern__destroy(xdp_redirect_multi_kern);
+
+ cleanup_network(&net_config);
+}
+
+#define VETH_EGRESS_SKEL_NB 3
+static void xdp_veth_egress(u32 flags)
+{
+ struct prog_configuration prog_cfg[VETH_PAIRS_COUNT] = {
+ {
+ .local_name = "xdp_redirect_map_all_prog",
+ .remote_name = "xdp_dummy_prog",
+ .local_flags = flags,
+ .remote_flags = flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_all_prog",
+ .remote_name = "store_mac_1",
+ .local_flags = flags,
+ .remote_flags = flags,
+ },
+ {
+ .local_name = "xdp_redirect_map_all_prog",
+ .remote_name = "store_mac_2",
+ .local_flags = flags,
+ .remote_flags = flags,
+ }
+ };
+ const char magic_mac[6] = { 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF};
+ struct xdp_redirect_multi_kern *xdp_redirect_multi_kern;
+ struct bpf_object *bpf_objs[VETH_EGRESS_SKEL_NB];
+ struct xdp_redirect_map *xdp_redirect_map;
+ struct bpf_devmap_val devmap_val = {};
+ struct net_configuration net_config;
+ int mac_map, egress_map, res_map;
+ struct nstoken *nstoken = NULL;
+ struct xdp_dummy *xdp_dummy;
+ int err;
+ int i;
+
+ xdp_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_dummy, "xdp_dummy__open_and_load"))
+ return;
+
+ xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_redirect_multi_kern, "xdp_redirect_multi_kern__open_and_load"))
+ goto destroy_xdp_dummy;
+
+ xdp_redirect_map = xdp_redirect_map__open_and_load();
+ if (!ASSERT_OK_PTR(xdp_redirect_map, "xdp_redirect_map__open_and_load"))
+ goto destroy_xdp_redirect_multi_kern;
+
+ if (!ASSERT_OK(create_network(&net_config), "create network"))
+ goto destroy_xdp_redirect_map;
+
+ mac_map = bpf_map__fd(xdp_redirect_multi_kern->maps.mac_map);
+ if (!ASSERT_OK_FD(mac_map, "open mac_map"))
+ goto destroy_xdp_redirect_map;
+
+ egress_map = bpf_map__fd(xdp_redirect_multi_kern->maps.map_egress);
+ if (!ASSERT_OK_FD(egress_map, "open map_egress"))
+ goto destroy_xdp_redirect_map;
+
+ devmap_val.bpf_prog.fd = bpf_program__fd(xdp_redirect_multi_kern->progs.xdp_devmap_prog);
+
+ bpf_objs[0] = xdp_dummy->obj;
+ bpf_objs[1] = xdp_redirect_multi_kern->obj;
+ bpf_objs[2] = xdp_redirect_map->obj;
+
+ nstoken = open_netns(net_config.ns0_name);
+ if (!ASSERT_OK_PTR(nstoken, "open NS0"))
+ goto destroy_xdp_redirect_map;
+
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ int ifindex = if_nametoindex(net_config.veth_cfg[i].local_veth);
+
+ SYS(destroy_xdp_redirect_map,
+ "ip -n %s neigh add %s lladdr 00:00:00:00:00:01 dev %s",
+ net_config.veth_cfg[i].namespace, IP_NEIGH, net_config.veth_cfg[i].remote_veth);
+
+ if (attach_programs_to_veth_pair(bpf_objs, VETH_REDIRECT_SKEL_NB,
+ &net_config, prog_cfg, i))
+ goto destroy_xdp_redirect_map;
+
+ err = bpf_map_update_elem(mac_map, &ifindex, magic_mac, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto destroy_xdp_redirect_map;
+
+ devmap_val.ifindex = ifindex;
+ err = bpf_map_update_elem(egress_map, &ifindex, &devmap_val, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto destroy_xdp_redirect_map;
+ }
+
+ SYS_NOFAIL("ip netns exec %s ping %s -i 0.1 -c 4 -W1 > /dev/null ",
+ net_config.veth_cfg[0].namespace, IP_NEIGH);
+
+ res_map = bpf_map__fd(xdp_redirect_map->maps.rx_mac);
+ if (!ASSERT_OK_FD(res_map, "open rx_map"))
+ goto destroy_xdp_redirect_map;
+
+ for (i = 0; i < 2; i++) {
+ u32 key = i;
+ u64 res;
+
+ err = bpf_map_lookup_elem(res_map, &key, &res);
+ if (!ASSERT_OK(err, "get MAC res"))
+ goto destroy_xdp_redirect_map;
+
+ ASSERT_STRNEQ((const char *)&res, magic_mac, ETH_ALEN, "compare mac");
+ }
+
+destroy_xdp_redirect_map:
+ close_netns(nstoken);
+ xdp_redirect_map__destroy(xdp_redirect_map);
+destroy_xdp_redirect_multi_kern:
+ xdp_redirect_multi_kern__destroy(xdp_redirect_multi_kern);
+destroy_xdp_dummy:
+ xdp_dummy__destroy(xdp_dummy);
+
+ cleanup_network(&net_config);
+}
+
+void test_xdp_veth_redirect(void)
+{
+ if (test__start_subtest("0"))
+ xdp_veth_redirect(0);
+
+ if (test__start_subtest("DRV_MODE"))
+ xdp_veth_redirect(XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("SKB_MODE"))
+ xdp_veth_redirect(XDP_FLAGS_SKB_MODE);
+}
+
+void test_xdp_veth_broadcast_redirect(void)
+{
+ if (test__start_subtest("0/BROADCAST"))
+ xdp_veth_broadcast_redirect(0, BPF_F_BROADCAST);
+
+ if (test__start_subtest("0/(BROADCAST | EXCLUDE_INGRESS)"))
+ xdp_veth_broadcast_redirect(0, BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+
+ if (test__start_subtest("DRV_MODE/BROADCAST"))
+ xdp_veth_broadcast_redirect(XDP_FLAGS_DRV_MODE, BPF_F_BROADCAST);
+
+ if (test__start_subtest("DRV_MODE/(BROADCAST | EXCLUDE_INGRESS)"))
+ xdp_veth_broadcast_redirect(XDP_FLAGS_DRV_MODE,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+
+ if (test__start_subtest("SKB_MODE/BROADCAST"))
+ xdp_veth_broadcast_redirect(XDP_FLAGS_SKB_MODE, BPF_F_BROADCAST);
+
+ if (test__start_subtest("SKB_MODE/(BROADCAST | EXCLUDE_INGRESS)"))
+ xdp_veth_broadcast_redirect(XDP_FLAGS_SKB_MODE,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+}
+
+void test_xdp_veth_egress(void)
+{
+ if (test__start_subtest("0/egress"))
+ xdp_veth_egress(0);
+
+ if (test__start_subtest("DRV_MODE/egress"))
+ xdp_veth_egress(XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("SKB_MODE/egress"))
+ xdp_veth_egress(XDP_FLAGS_SKB_MODE);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
new file mode 100644
index 000000000000..5af28f359cfd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -0,0 +1,2596 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+#include <errno.h>
+#include <linux/bitmap.h>
+#include <linux/if_link.h>
+#include <linux/mman.h>
+#include <linux/netdev.h>
+#include <poll.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "network_helpers.h"
+#include "test_xsk.h"
+#include "xsk_xdp_common.h"
+#include "xsk_xdp_progs.skel.h"
+
+#define DEFAULT_BATCH_SIZE 64
+#define MIN_PKT_SIZE 64
+#define MAX_ETH_JUMBO_SIZE 9000
+#define MAX_INTERFACES 2
+#define MAX_TEARDOWN_ITER 10
+#define MAX_TX_BUDGET_DEFAULT 32
+#define PKT_DUMP_NB_TO_PRINT 16
+/* Just to align the data in the packet */
+#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2)
+#define POLL_TMOUT 1000
+#define THREAD_TMOUT 3
+#define UMEM_HEADROOM_TEST_SIZE 128
+#define XSK_DESC__INVALID_OPTION (0xffff)
+#define XSK_UMEM__INVALID_FRAME_SIZE (MAX_ETH_JUMBO_SIZE + 1)
+#define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
+#define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
+
+static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
+
+bool opt_verbose;
+pthread_barrier_t barr;
+pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int pkts_in_flight;
+
+/* The payload is a word consisting of a packet sequence number in the upper
+ * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
+ * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
+ */
+static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
+{
+ u32 *ptr = (u32 *)dest, i;
+
+ start /= sizeof(*ptr);
+ size /= sizeof(*ptr);
+ for (i = 0; i < size; i++)
+ ptr[i] = htonl(pkt_nb << 16 | (i + start));
+}
+
+static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
+{
+ memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
+ eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
+}
+
+static bool is_umem_valid(struct ifobject *ifobj)
+{
+ return !!ifobj->umem->umem;
+}
+
+static u32 mode_to_xdp_flags(enum test_mode mode)
+{
+ return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
+}
+
+static u64 umem_size(struct xsk_umem_info *umem)
+{
+ return umem->num_frames * umem->frame_size;
+}
+
+int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
+ u64 size)
+{
+ struct xsk_umem_config cfg = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = umem->frame_size,
+ .frame_headroom = umem->frame_headroom,
+ .flags = XSK_UMEM__DEFAULT_FLAGS
+ };
+ int ret;
+
+ if (umem->fill_size)
+ cfg.fill_size = umem->fill_size;
+
+ if (umem->comp_size)
+ cfg.comp_size = umem->comp_size;
+
+ if (umem->unaligned_mode)
+ cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+
+ ret = xsk_umem__create(&umem->umem, buffer, size,
+ &umem->fq, &umem->cq, &cfg);
+ if (ret)
+ return ret;
+
+ umem->buffer = buffer;
+ if (ifobj->shared_umem && ifobj->rx_on) {
+ umem->base_addr = umem_size(umem);
+ umem->next_buffer = umem_size(umem);
+ }
+
+ return 0;
+}
+
+static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
+{
+ u64 addr;
+
+ addr = umem->next_buffer;
+ umem->next_buffer += umem->frame_size;
+ if (umem->next_buffer >= umem->base_addr + umem_size(umem))
+ umem->next_buffer = umem->base_addr;
+
+ return addr;
+}
+
+static void umem_reset_alloc(struct xsk_umem_info *umem)
+{
+ umem->next_buffer = 0;
+}
+
+static int enable_busy_poll(struct xsk_socket_info *xsk)
+{
+ int sock_opt;
+
+ sock_opt = 1;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ sock_opt = 20;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ sock_opt = xsk->batch_size;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ return 0;
+}
+
+int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
+ struct ifobject *ifobject, bool shared)
+{
+ struct xsk_socket_config cfg = {};
+ struct xsk_ring_cons *rxr;
+ struct xsk_ring_prod *txr;
+
+ xsk->umem = umem;
+ cfg.rx_size = xsk->rxqsize;
+ cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg.bind_flags = ifobject->bind_flags;
+ if (shared)
+ cfg.bind_flags |= XDP_SHARED_UMEM;
+ if (ifobject->mtu > MAX_ETH_PKT_SIZE)
+ cfg.bind_flags |= XDP_USE_SG;
+ if (umem->comp_size)
+ cfg.tx_size = umem->comp_size;
+ if (umem->fill_size)
+ cfg.rx_size = umem->fill_size;
+
+ txr = ifobject->tx_on ? &xsk->tx : NULL;
+ rxr = ifobject->rx_on ? &xsk->rx : NULL;
+ return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
+}
+
+#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
+static unsigned int get_max_skb_frags(void)
+{
+ unsigned int max_skb_frags = 0;
+ FILE *file;
+
+ file = fopen(MAX_SKB_FRAGS_PATH, "r");
+ if (!file) {
+ ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
+ return 0;
+ }
+
+ if (fscanf(file, "%u", &max_skb_frags) != 1)
+ ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
+
+ fclose(file);
+ return max_skb_frags;
+}
+
+static int set_ring_size(struct ifobject *ifobj)
+{
+ int ret;
+ u32 ctr = 0;
+
+ while (ctr++ < SOCK_RECONF_CTR) {
+ ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
+ if (!ret)
+ break;
+
+ /* Retry if it fails */
+ if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
+ return -errno;
+
+ usleep(USLEEP_MAX);
+ }
+
+ return ret;
+}
+
+int hw_ring_size_reset(struct ifobject *ifobj)
+{
+ ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
+ ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
+ return set_ring_size(ifobj);
+}
+
+static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
+
+ ifobj->xsk = &ifobj->xsk_arr[0];
+ ifobj->use_poll = false;
+ ifobj->use_fill_ring = true;
+ ifobj->release_rx = true;
+ ifobj->validation_func = NULL;
+ ifobj->use_metadata = false;
+
+ if (i == 0) {
+ ifobj->rx_on = false;
+ ifobj->tx_on = true;
+ } else {
+ ifobj->rx_on = true;
+ ifobj->tx_on = false;
+ }
+
+ memset(ifobj->umem, 0, sizeof(*ifobj->umem));
+ ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
+ ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+
+ for (j = 0; j < MAX_SOCKETS; j++) {
+ memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
+ ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
+ if (i == 0)
+ ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
+ else
+ ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
+
+ memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
+ memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
+ ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
+ ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
+ }
+ }
+
+ if (ifobj_tx->hw_ring_size_supp)
+ hw_ring_size_reset(ifobj_tx);
+
+ test->ifobj_tx = ifobj_tx;
+ test->ifobj_rx = ifobj_rx;
+ test->current_step = 0;
+ test->total_steps = 1;
+ test->nb_sockets = 1;
+ test->fail = false;
+ test->set_ring = false;
+ test->adjust_tail = false;
+ test->adjust_tail_support = false;
+ test->mtu = MAX_ETH_PKT_SIZE;
+ test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
+ test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
+}
+
+void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run)
+{
+ struct pkt_stream *tx_pkt_stream;
+ struct pkt_stream *rx_pkt_stream;
+ u32 i;
+
+ tx_pkt_stream = test->tx_pkt_stream_default;
+ rx_pkt_stream = test->rx_pkt_stream_default;
+ memset(test, 0, sizeof(*test));
+ test->tx_pkt_stream_default = tx_pkt_stream;
+ test->rx_pkt_stream_default = rx_pkt_stream;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
+
+ ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
+ if (mode == TEST_MODE_ZC)
+ ifobj->bind_flags |= XDP_ZEROCOPY;
+ else
+ ifobj->bind_flags |= XDP_COPY;
+ }
+
+ memcpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
+ test->test_func = test_to_run->test_func;
+ test->mode = mode;
+ __test_spec_init(test, ifobj_tx, ifobj_rx);
+}
+
+static void test_spec_reset(struct test_spec *test)
+{
+ __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
+}
+
+static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
+ struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
+ struct bpf_map *xskmap_tx)
+{
+ test->xdp_prog_rx = xdp_prog_rx;
+ test->xdp_prog_tx = xdp_prog_tx;
+ test->xskmap_rx = xskmap_rx;
+ test->xskmap_tx = xskmap_tx;
+}
+
+static int test_spec_set_mtu(struct test_spec *test, int mtu)
+{
+ int err;
+
+ if (test->ifobj_rx->mtu != mtu) {
+ err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
+ if (err)
+ return err;
+ test->ifobj_rx->mtu = mtu;
+ }
+ if (test->ifobj_tx->mtu != mtu) {
+ err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
+ if (err)
+ return err;
+ test->ifobj_tx->mtu = mtu;
+ }
+
+ return 0;
+}
+
+void pkt_stream_reset(struct pkt_stream *pkt_stream)
+{
+ if (pkt_stream) {
+ pkt_stream->current_pkt_nb = 0;
+ pkt_stream->nb_rx_pkts = 0;
+ }
+}
+
+static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
+{
+ if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
+ return NULL;
+
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
+}
+
+static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
+{
+ while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
+ (*pkts_sent)++;
+ if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
+ pkt_stream->current_pkt_nb++;
+ }
+ return NULL;
+}
+
+void pkt_stream_delete(struct pkt_stream *pkt_stream)
+{
+ free(pkt_stream->pkts);
+ free(pkt_stream);
+}
+
+void pkt_stream_restore_default(struct test_spec *test)
+{
+ struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
+
+ if (tx_pkt_stream != test->tx_pkt_stream_default) {
+ pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
+ test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
+ }
+
+ if (rx_pkt_stream != test->rx_pkt_stream_default) {
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+ test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
+ }
+}
+
+static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
+{
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = calloc(1, sizeof(*pkt_stream));
+ if (!pkt_stream)
+ return NULL;
+
+ pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
+ if (!pkt_stream->pkts) {
+ free(pkt_stream);
+ return NULL;
+ }
+
+ pkt_stream->nb_pkts = nb_pkts;
+ return pkt_stream;
+}
+
+static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
+{
+ u32 nb_frags = 1, next_frag;
+
+ if (!pkt)
+ return 1;
+
+ if (!pkt_stream->verbatim) {
+ if (!pkt->valid || !pkt->len)
+ return 1;
+ return ceil_u32(pkt->len, frame_size);
+ }
+
+ /* Search for the end of the packet in verbatim mode */
+ if (!pkt_continues(pkt->options))
+ return nb_frags;
+
+ next_frag = pkt_stream->current_pkt_nb;
+ pkt++;
+ while (next_frag++ < pkt_stream->nb_pkts) {
+ nb_frags++;
+ if (!pkt_continues(pkt->options) || !pkt->valid)
+ break;
+ pkt++;
+ }
+ return nb_frags;
+}
+
+static bool set_pkt_valid(int offset, u32 len)
+{
+ return len <= MAX_ETH_JUMBO_SIZE;
+}
+
+static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
+{
+ pkt->offset = offset;
+ pkt->len = len;
+ pkt->valid = set_pkt_valid(offset, len);
+}
+
+static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
+{
+ bool prev_pkt_valid = pkt->valid;
+
+ pkt_set(pkt_stream, pkt, offset, len);
+ pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
+}
+
+static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
+{
+ return ceil_u32(len, umem->frame_size) * umem->frame_size;
+}
+
+static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ pkt_stream = __pkt_stream_alloc(nb_pkts);
+ if (!pkt_stream)
+ return NULL;
+
+ pkt_stream->nb_pkts = nb_pkts;
+ pkt_stream->max_pkt_len = pkt_len;
+ for (i = 0; i < nb_pkts; i++) {
+ struct pkt *pkt = &pkt_stream->pkts[i];
+
+ pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
+ pkt->pkt_nb = nb_start + i * nb_off;
+ }
+
+ return pkt_stream;
+}
+
+struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
+{
+ return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
+}
+
+static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
+{
+ return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
+}
+
+static int pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
+{
+ ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
+
+ if (!ifobj->xsk->pkt_stream)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
+{
+ int ret;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
+ if (ret)
+ return ret;
+
+ return pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
+}
+
+static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
+ int offset)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
+ if (!pkt_stream)
+ return -ENOMEM;
+
+ for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
+ pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
+
+ ifobj->xsk->pkt_stream = pkt_stream;
+
+ return 0;
+}
+
+static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
+{
+ int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
+
+ if (ret)
+ return ret;
+
+ return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
+}
+
+static int pkt_stream_receive_half(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ u32 i;
+
+ if (test->ifobj_rx->xsk->pkt_stream != test->rx_pkt_stream_default)
+ /* Packet stream has already been replaced so we have to release this one.
+ * The newly created one will be freed by the restore_default() at the
+ * end of the test
+ */
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
+ pkt_stream->pkts[0].len);
+ if (!test->ifobj_rx->xsk->pkt_stream)
+ return -ENOMEM;
+
+ pkt_stream = test->ifobj_rx->xsk->pkt_stream;
+ for (i = 1; i < pkt_stream->nb_pkts; i += 2)
+ pkt_stream->pkts[i].valid = false;
+
+ pkt_stream->nb_valid_entries /= 2;
+
+ return 0;
+}
+
+static int pkt_stream_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
+
+ pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
+ }
+
+ return 0;
+}
+
+static void release_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *later_free_tx = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *later_free_rx = test->ifobj_rx->xsk->pkt_stream;
+ int i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ /* later_free_{rx/tx} will be freed by restore_default() */
+ if (test->ifobj_tx->xsk_arr[i].pkt_stream != later_free_tx)
+ pkt_stream_delete(test->ifobj_tx->xsk_arr[i].pkt_stream);
+ if (test->ifobj_rx->xsk_arr[i].pkt_stream != later_free_rx)
+ pkt_stream_delete(test->ifobj_rx->xsk_arr[i].pkt_stream);
+ }
+
+}
+
+static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
+{
+ if (!pkt->valid)
+ return pkt->offset;
+ return pkt->offset + umem_alloc_buffer(umem);
+}
+
+static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
+{
+ pkt_stream->current_pkt_nb--;
+}
+
+static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
+ u32 pkt_nb, u32 bytes_written)
+{
+ void *data = xsk_umem__get_data(umem->buffer, addr);
+
+ if (len < MIN_PKT_SIZE)
+ return;
+
+ if (!bytes_written) {
+ gen_eth_hdr(xsk, data);
+
+ len -= PKT_HDR_SIZE;
+ data += PKT_HDR_SIZE;
+ } else {
+ bytes_written -= PKT_HDR_SIZE;
+ }
+
+ write_payload(data, pkt_nb, bytes_written, len);
+}
+
+static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
+ u32 nb_frames, bool verbatim)
+{
+ u32 i, len = 0, pkt_nb = 0, payload = 0;
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = __pkt_stream_alloc(nb_frames);
+ if (!pkt_stream)
+ return NULL;
+
+ for (i = 0; i < nb_frames; i++) {
+ struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
+ struct pkt *frame = &frames[i];
+
+ pkt->offset = frame->offset;
+ if (verbatim) {
+ *pkt = *frame;
+ pkt->pkt_nb = payload;
+ if (!frame->valid || !pkt_continues(frame->options))
+ payload++;
+ } else {
+ if (frame->valid)
+ len += frame->len;
+ if (frame->valid && pkt_continues(frame->options))
+ continue;
+
+ pkt->pkt_nb = pkt_nb;
+ pkt->len = len;
+ pkt->valid = frame->valid;
+ pkt->options = 0;
+
+ len = 0;
+ }
+
+ print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
+ pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
+
+ if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
+ pkt_stream->max_pkt_len = pkt->len;
+
+ if (pkt->valid)
+ pkt_stream->nb_valid_entries++;
+
+ pkt_nb++;
+ }
+
+ pkt_stream->nb_pkts = pkt_nb;
+ pkt_stream->verbatim = verbatim;
+ return pkt_stream;
+}
+
+static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
+{
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
+
+ pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
+
+ return 0;
+}
+
+static void pkt_print_data(u32 *data, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ u32 seqnum, pkt_nb;
+
+ seqnum = ntohl(*data) & 0xffff;
+ pkt_nb = ntohl(*data) >> 16;
+ ksft_print_msg("%u:%u ", pkt_nb, seqnum);
+ data++;
+ }
+}
+
+static void pkt_dump(void *pkt, u32 len, bool eth_header)
+{
+ struct ethhdr *ethhdr = pkt;
+ u32 i, *data;
+
+ if (eth_header) {
+ /*extract L2 frame */
+ ksft_print_msg("DEBUG>> L2: dst mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ ksft_print_msg("%02X", ethhdr->h_dest[i]);
+
+ ksft_print_msg("\nDEBUG>> L2: src mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ ksft_print_msg("%02X", ethhdr->h_source[i]);
+
+ data = pkt + PKT_HDR_SIZE;
+ } else {
+ data = pkt;
+ }
+
+ /*extract L5 frame */
+ ksft_print_msg("\nDEBUG>> L5: seqnum: ");
+ pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
+ ksft_print_msg("....");
+ if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
+ ksft_print_msg("\n.... ");
+ pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
+ PKT_DUMP_NB_TO_PRINT);
+ }
+ ksft_print_msg("\n---------------------------------------\n");
+}
+
+static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
+{
+ u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
+ u32 offset = addr % umem->frame_size, expected_offset;
+ int pkt_offset = pkt->valid ? pkt->offset : 0;
+
+ if (!umem->unaligned_mode)
+ pkt_offset = 0;
+
+ expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
+
+ if (offset == expected_offset)
+ return true;
+
+ ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
+ return false;
+}
+
+static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
+{
+ void *data = xsk_umem__get_data(buffer, addr);
+ struct xdp_info *meta = data - sizeof(struct xdp_info);
+
+ if (meta->count != pkt->pkt_nb) {
+ ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
+ __func__, pkt->pkt_nb,
+ (unsigned long long)meta->count);
+ return false;
+ }
+
+ return true;
+}
+
+static int is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx, bool *supported)
+{
+ struct bpf_map *data_map;
+ int adjust_value = 0;
+ int key = 0;
+ int ret;
+
+ data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
+ if (!data_map || !bpf_map__is_internal(data_map)) {
+ ksft_print_msg("Error: could not find bss section of XDP program\n");
+ return -EINVAL;
+ }
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
+ if (ret) {
+ ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
+ return ret;
+ }
+
+ /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
+ * helper is not supported. Skip the adjust_tail test case in this scenario.
+ */
+ *supported = adjust_value != -EOPNOTSUPP;
+
+ return 0;
+}
+
+static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
+ u32 bytes_processed)
+{
+ u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
+ void *data = xsk_umem__get_data(umem->buffer, addr);
+
+ addr -= umem->base_addr;
+
+ if (addr >= umem->num_frames * umem->frame_size ||
+ addr + len > umem->num_frames * umem->frame_size) {
+ ksft_print_msg("Frag invalid addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
+ return false;
+ }
+ if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
+ ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
+ return false;
+ }
+
+ pkt_data = data;
+ if (!bytes_processed) {
+ pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
+ len -= PKT_HDR_SIZE;
+ } else {
+ bytes_processed -= PKT_HDR_SIZE;
+ }
+
+ expected_seqnum = bytes_processed / sizeof(*pkt_data);
+ seqnum = ntohl(*pkt_data) & 0xffff;
+ pkt_nb = ntohl(*pkt_data) >> 16;
+
+ if (expected_pkt_nb != pkt_nb) {
+ ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
+ __func__, expected_pkt_nb, pkt_nb);
+ goto error;
+ }
+ if (expected_seqnum != seqnum) {
+ ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
+ __func__, expected_seqnum, seqnum);
+ goto error;
+ }
+
+ words_to_end = len / sizeof(*pkt_data) - 1;
+ pkt_data += words_to_end;
+ seqnum = ntohl(*pkt_data) & 0xffff;
+ expected_seqnum += words_to_end;
+ if (expected_seqnum != seqnum) {
+ ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
+ __func__, expected_seqnum, seqnum);
+ goto error;
+ }
+
+ return true;
+
+error:
+ pkt_dump(data, len, !bytes_processed);
+ return false;
+}
+
+static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
+{
+ if (pkt->len != len) {
+ ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
+ __func__, pkt->len, len);
+ pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
+ return false;
+ }
+
+ return true;
+}
+
+static u32 load_value(u32 *counter)
+{
+ return __atomic_load_n(counter, __ATOMIC_ACQUIRE);
+}
+
+static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret)
+{
+ u32 max_budget = MAX_TX_BUDGET_DEFAULT;
+ u32 cons, ready_to_send;
+ int delta;
+
+ cons = load_value(xsk->tx.consumer);
+ ready_to_send = load_value(xsk->tx.producer) - cons;
+ *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
+
+ delta = load_value(xsk->tx.consumer) - cons;
+ /* By default, xsk should consume exact @max_budget descs at one
+ * send in this case where hitting the max budget limit in while
+ * loop is triggered in __xsk_generic_xmit(). Please make sure that
+ * the number of descs to be sent is larger than @max_budget, or
+ * else the tx.consumer will be updated in xskq_cons_peek_desc()
+ * in time which hides the issue we try to verify.
+ */
+ if (ready_to_send > max_budget && delta != max_budget)
+ return false;
+
+ return true;
+}
+
+int kick_tx(struct xsk_socket_info *xsk)
+{
+ int ret;
+
+ if (xsk->check_consumer) {
+ if (!kick_tx_with_check(xsk, &ret))
+ return TEST_FAILURE;
+ } else {
+ ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ }
+ if (ret >= 0)
+ return TEST_PASS;
+ if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
+ usleep(100);
+ return TEST_PASS;
+ }
+ return TEST_FAILURE;
+}
+
+int kick_rx(struct xsk_socket_info *xsk)
+{
+ int ret;
+
+ ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
+ if (ret < 0)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
+{
+ unsigned int rcvd;
+ u32 idx;
+ int ret;
+
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ }
+
+ rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
+ if (rcvd) {
+ if (rcvd > xsk->outstanding_tx) {
+ u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
+
+ ksft_print_msg("[%s] Too many packets completed\n", __func__);
+ ksft_print_msg("Last completion address: %llx\n",
+ (unsigned long long)addr);
+ return TEST_FAILURE;
+ }
+
+ xsk_ring_cons__release(&xsk->umem->cq, rcvd);
+ xsk->outstanding_tx -= rcvd;
+ }
+
+ return TEST_PASS;
+}
+
+static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
+{
+ u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
+ u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+ struct ifobject *ifobj = test->ifobj_rx;
+ struct xsk_umem_info *umem = xsk->umem;
+ struct pollfd fds = { };
+ struct pkt *pkt;
+ u64 first_addr = 0;
+ int ret;
+
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLIN;
+
+ ret = kick_rx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+
+ if (ifobj->use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ return TEST_FAILURE;
+
+ if (!ret) {
+ if (!is_umem_valid(test->ifobj_tx))
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
+ return TEST_CONTINUE;
+ }
+
+ if (!(fds.revents & POLLIN))
+ return TEST_CONTINUE;
+ }
+
+ rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
+ if (!rcvd)
+ return TEST_CONTINUE;
+
+ if (ifobj->use_fill_ring) {
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ return TEST_FAILURE;
+ }
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ }
+ }
+
+ while (frags_processed < rcvd) {
+ const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
+ u64 addr = desc->addr, orig;
+
+ orig = xsk_umem__extract_addr(addr);
+ addr = xsk_umem__add_offset_to_addr(addr);
+
+ if (!nb_frags) {
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
+ if (!pkt) {
+ ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
+ __func__, addr, desc->len);
+ return TEST_FAILURE;
+ }
+ }
+
+ print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
+ addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
+
+ if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
+ !is_metadata_correct(pkt, umem->buffer, addr)))
+ return TEST_FAILURE;
+
+ if (!nb_frags++)
+ first_addr = addr;
+ frags_processed++;
+ pkt_len += desc->len;
+ if (ifobj->use_fill_ring)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
+
+ if (pkt_continues(desc->options))
+ continue;
+
+ /* The complete packet has been received */
+ if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr))
+ return TEST_FAILURE;
+
+ pkt_stream->nb_rx_pkts++;
+ nb_frags = 0;
+ pkt_len = 0;
+ }
+
+ if (nb_frags) {
+ /* In the middle of a packet. Start over from beginning of packet. */
+ idx_rx -= nb_frags;
+ xsk_ring_cons__cancel(&xsk->rx, nb_frags);
+ if (ifobj->use_fill_ring) {
+ idx_fq -= nb_frags;
+ xsk_ring_prod__cancel(&umem->fq, nb_frags);
+ }
+ frags_processed -= nb_frags;
+ }
+
+ if (ifobj->use_fill_ring)
+ xsk_ring_prod__submit(&umem->fq, frags_processed);
+ if (ifobj->release_rx)
+ xsk_ring_cons__release(&xsk->rx, frags_processed);
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight -= pkts_sent;
+ pthread_mutex_unlock(&pacing_mutex);
+ pkts_sent = 0;
+
+ return TEST_CONTINUE;
+}
+
+bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
+ unsigned long *bitmap)
+{
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+
+ if (!pkt_stream) {
+ __set_bit(sock_num, bitmap);
+ return false;
+ }
+
+ if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
+ __set_bit(sock_num, bitmap);
+ if (bitmap_full(bitmap, test->nb_sockets))
+ return true;
+ }
+
+ return false;
+}
+
+static int receive_pkts(struct test_spec *test)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ struct xsk_socket_info *xsk;
+ u32 sock_num = 0;
+ int res, ret;
+
+ bitmap_zero(bitmap, test->nb_sockets);
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (1) {
+ xsk = &test->ifobj_rx->xsk_arr[sock_num];
+
+ if ((all_packets_received(test, xsk, sock_num, bitmap)))
+ break;
+
+ res = __receive_pkts(test, xsk);
+ if (!(res == TEST_PASS || res == TEST_CONTINUE))
+ return res;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+ sock_num = (sock_num + 1) % test->nb_sockets;
+ }
+
+ return TEST_PASS;
+}
+
+static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
+{
+ u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+ struct xsk_umem_info *umem = ifobject->umem;
+ bool use_poll = ifobject->use_poll;
+ struct pollfd fds = { };
+ int ret;
+
+ buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
+ /* pkts_in_flight might be negative if many invalid packets are sent */
+ if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
+ buffer_len)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ return TEST_CONTINUE;
+ }
+
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLOUT;
+
+ while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
+ if (use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (timeout) {
+ if (ret < 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, errno);
+ return TEST_FAILURE;
+ }
+ if (ret == 0)
+ return TEST_PASS;
+ break;
+ }
+ if (ret <= 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, errno);
+ return TEST_FAILURE;
+ }
+ }
+
+ complete_pkts(xsk, xsk->batch_size);
+ }
+
+ for (i = 0; i < xsk->batch_size; i++) {
+ struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
+ u32 nb_frags_left, nb_frags, bytes_written = 0;
+
+ if (!pkt)
+ break;
+
+ nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
+ if (nb_frags > xsk->batch_size - i) {
+ pkt_stream_cancel(pkt_stream);
+ xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
+ break;
+ }
+ nb_frags_left = nb_frags;
+
+ while (nb_frags_left--) {
+ struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
+
+ tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
+ if (pkt_stream->verbatim) {
+ tx_desc->len = pkt->len;
+ tx_desc->options = pkt->options;
+ } else if (nb_frags_left) {
+ tx_desc->len = umem->frame_size;
+ tx_desc->options = XDP_PKT_CONTD;
+ } else {
+ tx_desc->len = pkt->len - bytes_written;
+ tx_desc->options = 0;
+ }
+ if (pkt->valid)
+ pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
+ bytes_written);
+ bytes_written += tx_desc->len;
+
+ print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
+ tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
+
+ if (nb_frags_left) {
+ i++;
+ if (pkt_stream->verbatim)
+ pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
+ }
+ }
+
+ if (pkt && pkt->valid) {
+ valid_pkts++;
+ valid_frags += nb_frags;
+ }
+ }
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight += valid_pkts;
+ pthread_mutex_unlock(&pacing_mutex);
+
+ xsk_ring_prod__submit(&xsk->tx, i);
+ xsk->outstanding_tx += valid_frags;
+
+ if (use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret <= 0) {
+ if (ret == 0 && timeout)
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
+ return TEST_FAILURE;
+ }
+ }
+
+ if (!timeout) {
+ if (complete_pkts(xsk, i))
+ return TEST_FAILURE;
+
+ usleep(10);
+ return TEST_PASS;
+ }
+
+ return TEST_CONTINUE;
+}
+
+static int wait_for_tx_completion(struct xsk_socket_info *xsk)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ int ret;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (xsk->outstanding_tx) {
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+
+ complete_pkts(xsk, xsk->batch_size);
+ }
+
+ return TEST_PASS;
+}
+
+bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
+{
+ return bitmap_full(bitmap, test->nb_sockets);
+}
+
+static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
+{
+ bool timeout = !is_umem_valid(test->ifobj_rx);
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ u32 i, ret;
+
+ bitmap_zero(bitmap, test->nb_sockets);
+
+ while (!(all_packets_sent(test, bitmap))) {
+ for (i = 0; i < test->nb_sockets; i++) {
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = ifobject->xsk_arr[i].pkt_stream;
+ if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
+ __set_bit(i, bitmap);
+ continue;
+ }
+ ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
+ if (ret == TEST_CONTINUE && !test->fail)
+ continue;
+
+ if ((ret || test->fail) && !timeout)
+ return TEST_FAILURE;
+
+ if (ret == TEST_PASS && timeout)
+ return ret;
+
+ ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
+ if (ret)
+ return TEST_FAILURE;
+ }
+ }
+
+ return TEST_PASS;
+}
+
+static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
+{
+ int fd = xsk_socket__fd(xsk), err;
+ socklen_t optlen, expected_len;
+
+ optlen = sizeof(*stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
+ if (err) {
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
+ }
+
+ expected_len = sizeof(struct xdp_statistics);
+ if (optlen != expected_len) {
+ ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
+ __func__, expected_len, optlen);
+ return TEST_FAILURE;
+ }
+
+ return TEST_PASS;
+}
+
+static int validate_rx_dropped(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ /* The receiver calls getsockopt after receiving the last (valid)
+ * packet which is not the final packet sent in this test (valid and
+ * invalid packets are sent in alternating fashion with the final
+ * packet being invalid). Since the last packet may or may not have
+ * been dropped already, both outcomes must be allowed.
+ */
+ if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
+ stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_rx_full(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_ring_full)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_fill_empty(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_fill_ring_empty_descs)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_tx_invalid_descs(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ int fd = xsk_socket__fd(xsk);
+ struct xdp_statistics stats;
+ socklen_t optlen;
+ int err;
+
+ optlen = sizeof(stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
+ if (err) {
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
+ }
+
+ if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
+ ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
+ __func__,
+ (unsigned long long)stats.tx_invalid_descs,
+ ifobject->xsk->pkt_stream->nb_pkts);
+ return TEST_FAILURE;
+ }
+
+ return TEST_PASS;
+}
+
+static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
+ struct xsk_umem_info *umem, bool tx)
+{
+ int i, ret;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ bool shared = (ifobject->shared_umem && tx) ? true : !!i;
+ u32 ctr = 0;
+
+ while (ctr++ < SOCK_RECONF_CTR) {
+ ret = xsk_configure_socket(&ifobject->xsk_arr[i], umem,
+ ifobject, shared);
+ if (!ret)
+ break;
+
+ /* Retry if it fails as xsk_socket__create() is asynchronous */
+ if (ctr >= SOCK_RECONF_CTR)
+ return ret;
+ usleep(USLEEP_MAX);
+ }
+ if (ifobject->busy_poll) {
+ ret = enable_busy_poll(&ifobject->xsk_arr[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
+{
+ int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
+
+ if (ret)
+ return ret;
+ ifobject->xsk = &ifobject->xsk_arr[0];
+ ifobject->xskmap = test->ifobj_rx->xskmap;
+ memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
+ ifobject->umem->base_addr = 0;
+
+ return 0;
+}
+
+static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
+ bool fill_up)
+{
+ u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
+ u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
+ int ret;
+
+ if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ buffers_to_fill = umem->num_frames;
+ else
+ buffers_to_fill = umem->fill_size;
+
+ ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
+ if (ret != buffers_to_fill)
+ return -ENOSPC;
+
+ while (filled < buffers_to_fill) {
+ struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
+ if (!pkt) {
+ if (!fill_up)
+ break;
+ addr = filled * umem->frame_size + umem->base_addr;
+ } else if (pkt->offset >= 0) {
+ addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
+ } else {
+ addr = pkt->offset + umem_alloc_buffer(umem);
+ }
+
+ *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
+ if (++filled >= buffers_to_fill)
+ break;
+ }
+ }
+ xsk_ring_prod__submit(&umem->fq, filled);
+ xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
+
+ pkt_stream_reset(pkt_stream);
+ umem_reset_alloc(umem);
+
+ return 0;
+}
+
+static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
+ int mmap_flags;
+ u64 umem_sz;
+ void *bufs;
+ int ret;
+ u32 i;
+
+ umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
+ mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+
+ if (ifobject->umem->unaligned_mode)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (ifobject->shared_umem)
+ umem_sz *= 2;
+
+ bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (bufs == MAP_FAILED)
+ return -errno;
+
+ ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
+ if (ret)
+ return ret;
+
+ ret = xsk_configure(test, ifobject, ifobject->umem, false);
+ if (ret)
+ return ret;
+
+ ifobject->xsk = &ifobject->xsk_arr[0];
+
+ if (!ifobject->rx_on)
+ return 0;
+
+ ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
+ ifobject->use_fill_ring);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ ifobject->xsk = &ifobject->xsk_arr[i];
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void *worker_testapp_validate_tx(void *arg)
+{
+ struct test_spec *test = (struct test_spec *)arg;
+ struct ifobject *ifobject = test->ifobj_tx;
+ int err;
+
+ if (test->current_step == 1) {
+ if (!ifobject->shared_umem) {
+ if (thread_common_ops(test, ifobject)) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+ } else {
+ if (thread_common_ops_tx(test, ifobject)) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+ }
+ }
+
+ err = send_pkts(test, ifobject);
+
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+ if (err)
+ test->fail = true;
+
+ pthread_exit(NULL);
+}
+
+void *worker_testapp_validate_rx(void *arg)
+{
+ struct test_spec *test = (struct test_spec *)arg;
+ struct ifobject *ifobject = test->ifobj_rx;
+ int err;
+
+ if (test->current_step == 1) {
+ err = thread_common_ops(test, ifobject);
+ } else {
+ xsk_clear_xskmap(ifobject->xskmap);
+ err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
+ if (err)
+ ksft_print_msg("Error: Failed to update xskmap, error %s\n",
+ strerror(-err));
+ }
+
+ pthread_barrier_wait(&barr);
+
+ /* We leave only now in case of error to avoid getting stuck in the barrier */
+ if (err) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+
+ err = receive_pkts(test);
+
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+
+ if (err) {
+ if (!test->adjust_tail) {
+ test->fail = true;
+ } else {
+ bool supported;
+
+ if (is_adjust_tail_supported(ifobject->xdp_progs, &supported))
+ test->fail = true;
+ else if (!supported)
+ test->adjust_tail_support = false;
+ else
+ test->fail = true;
+ }
+ }
+
+ pthread_exit(NULL);
+}
+
+static void testapp_clean_xsk_umem(struct ifobject *ifobj)
+{
+ u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
+
+ if (ifobj->shared_umem)
+ umem_sz *= 2;
+
+ umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+ xsk_umem__delete(ifobj->umem->umem);
+ munmap(ifobj->umem->buffer, umem_sz);
+}
+
+static void handler(int signum)
+{
+ pthread_exit(NULL);
+}
+
+static bool xdp_prog_changed_rx(struct test_spec *test)
+{
+ struct ifobject *ifobj = test->ifobj_rx;
+
+ return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
+}
+
+static bool xdp_prog_changed_tx(struct test_spec *test)
+{
+ struct ifobject *ifobj = test->ifobj_tx;
+
+ return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
+}
+
+static int xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
+ struct bpf_map *xskmap, enum test_mode mode)
+{
+ int err;
+
+ xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
+ err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
+ if (err) {
+ ksft_print_msg("Error attaching XDP program\n");
+ return err;
+ }
+
+ if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
+ if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
+ ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
+ return -EINVAL;
+ }
+
+ ifobj->xdp_prog = xdp_prog;
+ ifobj->xskmap = xskmap;
+ ifobj->mode = mode;
+
+ return 0;
+}
+
+static int xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
+ struct ifobject *ifobj_tx)
+{
+ int err = 0;
+
+ if (xdp_prog_changed_rx(test)) {
+ err = xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
+ if (err)
+ return err;
+ }
+
+ if (!ifobj_tx || ifobj_tx->shared_umem)
+ return 0;
+
+ if (xdp_prog_changed_tx(test))
+ err = xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
+
+ return err;
+}
+
+static void clean_sockets(struct test_spec *test, struct ifobject *ifobj)
+{
+ u32 i;
+
+ if (!ifobj || !test)
+ return;
+
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj->xsk_arr[i].xsk);
+}
+
+static void clean_umem(struct test_spec *test, struct ifobject *ifobj1, struct ifobject *ifobj2)
+{
+ if (!ifobj1)
+ return;
+
+ testapp_clean_xsk_umem(ifobj1);
+ if (ifobj2 && !ifobj2->shared_umem)
+ testapp_clean_xsk_umem(ifobj2);
+}
+
+static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
+ struct ifobject *ifobj2)
+{
+ pthread_t t0, t1;
+ int err;
+
+ if (test->mtu > MAX_ETH_PKT_SIZE) {
+ if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
+ (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
+ ksft_print_msg("Multi buffer for zero-copy not supported.\n");
+ return TEST_SKIP;
+ }
+ if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
+ (ifobj2 && !ifobj2->multi_buff_supp))) {
+ ksft_print_msg("Multi buffer not supported.\n");
+ return TEST_SKIP;
+ }
+ }
+ err = test_spec_set_mtu(test, test->mtu);
+ if (err) {
+ ksft_print_msg("Error, could not set mtu.\n");
+ return TEST_FAILURE;
+ }
+
+ if (ifobj2) {
+ if (pthread_barrier_init(&barr, NULL, 2))
+ return TEST_FAILURE;
+ pkt_stream_reset(ifobj2->xsk->pkt_stream);
+ }
+
+ test->current_step++;
+ pkt_stream_reset(ifobj1->xsk->pkt_stream);
+ pkts_in_flight = 0;
+
+ signal(SIGUSR1, handler);
+ /*Spawn RX thread */
+ pthread_create(&t0, NULL, ifobj1->func_ptr, test);
+
+ if (ifobj2) {
+ pthread_barrier_wait(&barr);
+ if (pthread_barrier_destroy(&barr)) {
+ pthread_kill(t0, SIGUSR1);
+ clean_sockets(test, ifobj1);
+ clean_umem(test, ifobj1, NULL);
+ return TEST_FAILURE;
+ }
+
+ /*Spawn TX thread */
+ pthread_create(&t1, NULL, ifobj2->func_ptr, test);
+
+ pthread_join(t1, NULL);
+ }
+
+ if (!ifobj2)
+ pthread_kill(t0, SIGUSR1);
+ else
+ pthread_join(t0, NULL);
+
+ if (test->total_steps == test->current_step || test->fail) {
+ clean_sockets(test, ifobj1);
+ clean_sockets(test, ifobj2);
+ clean_umem(test, ifobj1, ifobj2);
+ }
+
+ if (test->fail)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+static int testapp_validate_traffic(struct test_spec *test)
+{
+ struct ifobject *ifobj_rx = test->ifobj_rx;
+ struct ifobject *ifobj_tx = test->ifobj_tx;
+
+ if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
+ (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
+ ksft_print_msg("No huge pages present.\n");
+ return TEST_SKIP;
+ }
+
+ if (test->set_ring) {
+ if (ifobj_tx->hw_ring_size_supp) {
+ if (set_ring_size(ifobj_tx)) {
+ ksft_print_msg("Failed to change HW ring size.\n");
+ return TEST_FAILURE;
+ }
+ } else {
+ ksft_print_msg("Changing HW ring size not supported.\n");
+ return TEST_SKIP;
+ }
+ }
+
+ if (xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx))
+ return TEST_FAILURE;
+ return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
+}
+
+static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
+{
+ return __testapp_validate_traffic(test, ifobj, NULL);
+}
+
+int testapp_teardown(struct test_spec *test)
+{
+ int i;
+
+ for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+ test_spec_reset(test);
+ }
+
+ return TEST_PASS;
+}
+
+static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
+{
+ thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
+ struct ifobject *tmp_ifobj = (*ifobj1);
+
+ (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
+ (*ifobj2)->func_ptr = tmp_func_ptr;
+
+ *ifobj1 = *ifobj2;
+ *ifobj2 = tmp_ifobj;
+}
+
+int testapp_bidirectional(struct test_spec *test)
+{
+ int res;
+
+ test->ifobj_tx->rx_on = true;
+ test->ifobj_rx->tx_on = true;
+ test->total_steps = 2;
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+
+ print_verbose("Switching Tx/Rx direction\n");
+ swap_directions(&test->ifobj_rx, &test->ifobj_tx);
+ res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
+
+ swap_directions(&test->ifobj_rx, &test->ifobj_tx);
+ return res;
+}
+
+static int swap_xsk_resources(struct test_spec *test)
+{
+ int ret;
+
+ test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
+ test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
+ test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
+ test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
+
+ ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
+ if (ret)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+int testapp_xdp_prog_cleanup(struct test_spec *test)
+{
+ test->total_steps = 2;
+ test->nb_sockets = 2;
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+
+ if (swap_xsk_resources(test)) {
+ clean_sockets(test, test->ifobj_rx);
+ clean_sockets(test, test->ifobj_tx);
+ clean_umem(test, test->ifobj_rx, test->ifobj_tx);
+ return TEST_FAILURE;
+ }
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_headroom(struct test_spec *test)
+{
+ test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_rx_dropped(struct test_spec *test)
+{
+ if (test->mode == TEST_MODE_ZC) {
+ ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
+ return TEST_SKIP;
+ }
+
+ if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0))
+ return TEST_FAILURE;
+ test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
+ XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
+ if (pkt_stream_receive_half(test))
+ return TEST_FAILURE;
+ test->ifobj_rx->validation_func = validate_rx_dropped;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_tx_invalid_descs(struct test_spec *test)
+{
+ if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0))
+ return TEST_FAILURE;
+ test->ifobj_tx->validation_func = validate_tx_invalid_descs;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_rx_full(struct test_spec *test)
+{
+ if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+
+ test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
+ test->ifobj_rx->release_rx = false;
+ test->ifobj_rx->validation_func = validate_rx_full;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_fill_empty(struct test_spec *test)
+{
+ if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+
+ test->ifobj_rx->use_fill_ring = false;
+ test->ifobj_rx->validation_func = validate_fill_empty;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_unaligned(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* Let half of the packets straddle a 4K buffer boundary */
+ if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
+ return TEST_FAILURE;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_unaligned_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_single_pkt(struct test_spec *test)
+{
+ struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
+
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
+ return TEST_FAILURE;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_invalid_desc_mb(struct test_spec *test)
+{
+ struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ u64 umem_size = umem->num_frames * umem->frame_size;
+ struct pkt pkts[] = {
+ /* Valid packet for synch to start with */
+ {0, MIN_PKT_SIZE, 0, true, 0},
+ /* Zero frame len is not legal */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, 0, 0, false, 0},
+ /* Invalid address in the second frame */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ /* Invalid len in the middle */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ /* Invalid options in the middle */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
+ /* Transmit 2 frags, receive 3 */
+ {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
+ {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
+ /* Middle frame crosses chunk boundary with small length */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
+ /* Valid packet for synch so that something is received */
+ {0, MIN_PKT_SIZE, 0, true, 0}};
+
+ if (umem->unaligned_mode) {
+ /* Crossing a chunk boundary allowed */
+ pkts[12].valid = true;
+ pkts[13].valid = true;
+ }
+
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_invalid_desc(struct test_spec *test)
+{
+ struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ u64 umem_size = umem->num_frames * umem->frame_size;
+ struct pkt pkts[] = {
+ /* Zero packet address allowed */
+ {0, MIN_PKT_SIZE, 0, true},
+ /* Allowed packet */
+ {0, MIN_PKT_SIZE, 0, true},
+ /* Straddling the start of umem */
+ {-2, MIN_PKT_SIZE, 0, false},
+ /* Packet too large */
+ {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
+ /* Up to end of umem allowed */
+ {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
+ /* After umem ends */
+ {umem_size, MIN_PKT_SIZE, 0, false},
+ /* Straddle the end of umem */
+ {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 4K boundary */
+ {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 2K boundary */
+ {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
+ /* Valid packet for synch so that something is received */
+ {0, MIN_PKT_SIZE, 0, true}};
+
+ if (umem->unaligned_mode) {
+ /* Crossing a page boundary allowed */
+ pkts[7].valid = true;
+ }
+ if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
+ /* Crossing a 2K frame size boundary not allowed */
+ pkts[8].valid = false;
+ }
+
+ if (test->ifobj_tx->shared_umem) {
+ pkts[4].offset += umem_size;
+ pkts[5].offset += umem_size;
+ pkts[6].offset += umem_size;
+ }
+
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_drop(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ if (pkt_stream_receive_half(test))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_metadata_copy(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
+ skel_tx->progs.xsk_xdp_populate_metadata,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+ test->ifobj_rx->use_metadata = true;
+
+ skel_rx->bss->count = 0;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_shared_umem(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+ int ret;
+
+ test->total_steps = 1;
+ test->nb_sockets = 2;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
+ skel_tx->progs.xsk_xdp_shared_umem,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ if (pkt_stream_even_odd_sequence(test))
+ return TEST_FAILURE;
+
+ ret = testapp_validate_traffic(test);
+
+ release_even_odd_sequence(test);
+
+ return ret;
+}
+
+int testapp_poll_txq_tmout(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
+ test->ifobj_tx->umem->frame_size = 2048;
+ if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
+ return TEST_FAILURE;
+ return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
+}
+
+int testapp_poll_rxq_tmout(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
+}
+
+int testapp_too_many_frags(struct test_spec *test)
+{
+ struct pkt *pkts;
+ u32 max_frags, i;
+ int ret = TEST_FAILURE;
+
+ if (test->mode == TEST_MODE_ZC) {
+ max_frags = test->ifobj_tx->xdp_zc_max_segs;
+ } else {
+ max_frags = get_max_skb_frags();
+ if (!max_frags) {
+ ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
+ max_frags = 17;
+ }
+ max_frags += 1;
+ }
+
+ pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
+ if (!pkts)
+ return TEST_FAILURE;
+
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+
+ /* Valid packet for synch */
+ pkts[0].len = MIN_PKT_SIZE;
+ pkts[0].valid = true;
+
+ /* One valid packet with the max amount of frags */
+ for (i = 1; i < max_frags + 1; i++) {
+ pkts[i].len = MIN_PKT_SIZE;
+ pkts[i].options = XDP_PKT_CONTD;
+ pkts[i].valid = true;
+ }
+ pkts[max_frags].options = 0;
+
+ /* An invalid packet with the max amount of frags but signals packet
+ * continues on the last frag
+ */
+ for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
+ pkts[i].len = MIN_PKT_SIZE;
+ pkts[i].options = XDP_PKT_CONTD;
+ pkts[i].valid = false;
+ }
+
+ /* Valid packet for synch */
+ pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
+ pkts[2 * max_frags + 1].valid = true;
+
+ if (pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) {
+ free(pkts);
+ return TEST_FAILURE;
+ }
+
+ ret = testapp_validate_traffic(test);
+ free(pkts);
+ return ret;
+}
+
+static int xsk_load_xdp_programs(struct ifobject *ifobj)
+{
+ ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
+ if (libbpf_get_error(ifobj->xdp_progs))
+ return libbpf_get_error(ifobj->xdp_progs);
+
+ return 0;
+}
+
+/* Simple test */
+static bool hugepages_present(void)
+{
+ size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ void *bufs;
+
+ bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
+ if (bufs == MAP_FAILED)
+ return false;
+
+ mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+ munmap(bufs, mmap_sz);
+ return true;
+}
+
+int init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
+ int err;
+
+ ifobj->func_ptr = func_ptr;
+
+ err = xsk_load_xdp_programs(ifobj);
+ if (err) {
+ ksft_print_msg("Error loading XDP program\n");
+ return err;
+ }
+
+ if (hugepages_present())
+ ifobj->unaligned_supp = true;
+
+ err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (err) {
+ ksft_print_msg("Error querying XDP capabilities\n");
+ return err;
+ }
+ if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
+ ifobj->multi_buff_supp = true;
+ if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
+ if (query_opts.xdp_zc_max_segs > 1) {
+ ifobj->multi_buff_zc_supp = true;
+ ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
+ } else {
+ ifobj->xdp_zc_max_segs = 0;
+ }
+ }
+
+ return 0;
+}
+
+int testapp_send_receive(struct test_spec *test)
+{
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_poll_rx(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_poll_tx(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_aligned_inv_desc(struct test_spec *test)
+{
+ return testapp_invalid_desc(test);
+}
+
+int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ return testapp_invalid_desc(test);
+}
+
+int testapp_unaligned_inv_desc(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc(test);
+}
+
+int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
+{
+ u64 page_size, umem_size;
+
+ /* Odd frame size so the UMEM doesn't end near a page boundary. */
+ test->ifobj_tx->umem->frame_size = 4001;
+ test->ifobj_rx->umem->frame_size = 4001;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* This test exists to test descriptors that staddle the end of
+ * the UMEM but not a page.
+ */
+ page_size = sysconf(_SC_PAGESIZE);
+ umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ assert(umem_size % page_size > MIN_PKT_SIZE);
+ assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
+
+ return testapp_invalid_desc(test);
+}
+
+int testapp_aligned_inv_desc_mb(struct test_spec *test)
+{
+ return testapp_invalid_desc_mb(test);
+}
+
+int testapp_unaligned_inv_desc_mb(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc_mb(test);
+}
+
+int testapp_xdp_metadata(struct test_spec *test)
+{
+ return testapp_xdp_metadata_copy(test);
+}
+
+int testapp_xdp_metadata_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ return testapp_xdp_metadata_copy(test);
+}
+
+int testapp_hw_sw_min_ring_size(struct test_spec *test)
+{
+ int ret;
+
+ test->set_ring = true;
+ test->total_steps = 2;
+ test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
+ test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
+ test->ifobj_tx->xsk->batch_size = 1;
+ test->ifobj_rx->xsk->batch_size = 1;
+ ret = testapp_validate_traffic(test);
+ if (ret)
+ return ret;
+
+ /* Set batch size to hw_ring_size - 1 */
+ test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
+ test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_hw_sw_max_ring_size(struct test_spec *test)
+{
+ u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
+ int ret;
+
+ test->set_ring = true;
+ test->total_steps = 2;
+ test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
+ test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
+ test->ifobj_rx->umem->num_frames = max_descs;
+ test->ifobj_rx->umem->fill_size = max_descs;
+ test->ifobj_rx->umem->comp_size = max_descs;
+ test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+
+ ret = testapp_validate_traffic(test);
+ if (ret)
+ return ret;
+
+ /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
+ * updating the Rx HW tail register.
+ */
+ test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ if (pkt_stream_replace(test, max_descs, MIN_PKT_SIZE)) {
+ clean_sockets(test, test->ifobj_tx);
+ clean_sockets(test, test->ifobj_rx);
+ clean_umem(test, test->ifobj_rx, test->ifobj_tx);
+ return TEST_FAILURE;
+ }
+
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
+ skel_tx->progs.xsk_xdp_adjust_tail,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ skel_rx->bss->adjust_value = adjust_value;
+
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
+{
+ int ret;
+
+ test->adjust_tail_support = true;
+ test->adjust_tail = true;
+ test->total_steps = 1;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
+ if (ret)
+ return TEST_FAILURE;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
+ if (ret)
+ return TEST_FAILURE;
+
+ ret = testapp_xdp_adjust_tail(test, value);
+ if (ret)
+ return ret;
+
+ if (!test->adjust_tail_support) {
+ ksft_print_msg("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
+ mode_string(test), busy_poll_string(test));
+ return TEST_SKIP;
+ }
+
+ return 0;
+}
+
+int testapp_adjust_tail_shrink(struct test_spec *test)
+{
+ /* Shrink by 4 bytes for testing purpose */
+ return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
+}
+
+int testapp_adjust_tail_shrink_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ /* Shrink by the frag size */
+ return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
+}
+
+int testapp_adjust_tail_grow(struct test_spec *test)
+{
+ /* Grow by 4 bytes for testing purpose */
+ return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
+}
+
+int testapp_adjust_tail_grow_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
+ return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
+ XSK_UMEM__LARGE_FRAME_SIZE * 2);
+}
+
+int testapp_tx_queue_consumer(struct test_spec *test)
+{
+ int nr_packets;
+
+ if (test->mode == TEST_MODE_ZC) {
+ ksft_print_msg("Can not run TX_QUEUE_CONSUMER test for ZC mode\n");
+ return TEST_SKIP;
+ }
+
+ nr_packets = MAX_TX_BUDGET_DEFAULT + 1;
+ if (pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_tx->xsk->batch_size = nr_packets;
+ test->ifobj_tx->xsk->check_consumer = true;
+
+ return testapp_validate_traffic(test);
+}
+
+struct ifobject *ifobject_create(void)
+{
+ struct ifobject *ifobj;
+
+ ifobj = calloc(1, sizeof(struct ifobject));
+ if (!ifobj)
+ return NULL;
+
+ ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
+ if (!ifobj->xsk_arr)
+ goto out_xsk_arr;
+
+ ifobj->umem = calloc(1, sizeof(*ifobj->umem));
+ if (!ifobj->umem)
+ goto out_umem;
+
+ return ifobj;
+
+out_umem:
+ free(ifobj->xsk_arr);
+out_xsk_arr:
+ free(ifobj);
+ return NULL;
+}
+
+void ifobject_delete(struct ifobject *ifobj)
+{
+ free(ifobj->umem);
+ free(ifobj->xsk_arr);
+ free(ifobj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.h b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
new file mode 100644
index 000000000000..8fc78a057de0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TEST_XSK_H_
+#define TEST_XSK_H_
+
+#include <linux/ethtool.h>
+#include <linux/if_xdp.h>
+
+#include "../kselftest.h"
+#include "xsk.h"
+
+#ifndef SO_PREFER_BUSY_POLL
+#define SO_PREFER_BUSY_POLL 69
+#endif
+
+#ifndef SO_BUSY_POLL_BUDGET
+#define SO_BUSY_POLL_BUDGET 70
+#endif
+
+#define TEST_PASS 0
+#define TEST_FAILURE -1
+#define TEST_CONTINUE 1
+#define TEST_SKIP 2
+
+#define DEFAULT_PKT_CNT (4 * 1024)
+#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
+#define HUGEPAGE_SIZE (2 * 1024 * 1024)
+#define MIN_PKT_SIZE 64
+#define MAX_ETH_PKT_SIZE 1518
+#define MAX_INTERFACE_NAME_CHARS 16
+#define MAX_TEST_NAME_SIZE 48
+#define SOCK_RECONF_CTR 10
+#define USLEEP_MAX 10000
+
+extern bool opt_verbose;
+#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
+
+
+static inline u32 ceil_u32(u32 a, u32 b)
+{
+ return (a + b - 1) / b;
+}
+
+static inline u64 ceil_u64(u64 a, u64 b)
+{
+ return (a + b - 1) / b;
+}
+
+/* Simple test */
+enum test_mode {
+ TEST_MODE_SKB,
+ TEST_MODE_DRV,
+ TEST_MODE_ZC,
+ TEST_MODE_ALL
+};
+
+struct ifobject;
+struct test_spec;
+typedef int (*validation_func_t)(struct ifobject *ifobj);
+typedef void *(*thread_func_t)(void *arg);
+typedef int (*test_func_t)(struct test_spec *test);
+
+struct xsk_socket_info {
+ struct xsk_ring_cons rx;
+ struct xsk_ring_prod tx;
+ struct xsk_umem_info *umem;
+ struct xsk_socket *xsk;
+ struct pkt_stream *pkt_stream;
+ u32 outstanding_tx;
+ u32 rxqsize;
+ u32 batch_size;
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ bool check_consumer;
+};
+
+int kick_rx(struct xsk_socket_info *xsk);
+int kick_tx(struct xsk_socket_info *xsk);
+
+struct xsk_umem_info {
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+ struct xsk_umem *umem;
+ u64 next_buffer;
+ u32 num_frames;
+ u32 frame_headroom;
+ void *buffer;
+ u32 frame_size;
+ u32 base_addr;
+ u32 fill_size;
+ u32 comp_size;
+ bool unaligned_mode;
+};
+
+struct set_hw_ring {
+ u32 default_tx;
+ u32 default_rx;
+};
+
+int hw_ring_size_reset(struct ifobject *ifobj);
+
+struct ifobject {
+ char ifname[MAX_INTERFACE_NAME_CHARS];
+ struct xsk_socket_info *xsk;
+ struct xsk_socket_info *xsk_arr;
+ struct xsk_umem_info *umem;
+ thread_func_t func_ptr;
+ validation_func_t validation_func;
+ struct xsk_xdp_progs *xdp_progs;
+ struct bpf_map *xskmap;
+ struct bpf_program *xdp_prog;
+ struct ethtool_ringparam ring;
+ struct set_hw_ring set_ring;
+ enum test_mode mode;
+ int ifindex;
+ int mtu;
+ u32 bind_flags;
+ u32 xdp_zc_max_segs;
+ bool tx_on;
+ bool rx_on;
+ bool use_poll;
+ bool busy_poll;
+ bool use_fill_ring;
+ bool release_rx;
+ bool shared_umem;
+ bool use_metadata;
+ bool unaligned_supp;
+ bool multi_buff_supp;
+ bool multi_buff_zc_supp;
+ bool hw_ring_size_supp;
+};
+struct ifobject *ifobject_create(void);
+void ifobject_delete(struct ifobject *ifobj);
+int init_iface(struct ifobject *ifobj, thread_func_t func_ptr);
+
+int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, u64 size);
+int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
+ struct ifobject *ifobject, bool shared);
+
+
+struct pkt {
+ int offset;
+ u32 len;
+ u32 pkt_nb;
+ bool valid;
+ u16 options;
+};
+
+struct pkt_stream {
+ u32 nb_pkts;
+ u32 current_pkt_nb;
+ struct pkt *pkts;
+ u32 max_pkt_len;
+ u32 nb_rx_pkts;
+ u32 nb_valid_entries;
+ bool verbatim;
+};
+
+static inline bool pkt_continues(u32 options)
+{
+ return options & XDP_PKT_CONTD;
+}
+
+struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len);
+void pkt_stream_delete(struct pkt_stream *pkt_stream);
+void pkt_stream_reset(struct pkt_stream *pkt_stream);
+void pkt_stream_restore_default(struct test_spec *test);
+
+struct test_spec {
+ struct ifobject *ifobj_tx;
+ struct ifobject *ifobj_rx;
+ struct pkt_stream *tx_pkt_stream_default;
+ struct pkt_stream *rx_pkt_stream_default;
+ struct bpf_program *xdp_prog_rx;
+ struct bpf_program *xdp_prog_tx;
+ struct bpf_map *xskmap_rx;
+ struct bpf_map *xskmap_tx;
+ test_func_t test_func;
+ int mtu;
+ u16 total_steps;
+ u16 current_step;
+ u16 nb_sockets;
+ bool fail;
+ bool set_ring;
+ bool adjust_tail;
+ bool adjust_tail_support;
+ enum test_mode mode;
+ char name[MAX_TEST_NAME_SIZE];
+};
+
+#define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
+static inline char *mode_string(struct test_spec *test)
+{
+ switch (test->mode) {
+ case TEST_MODE_SKB:
+ return "SKB";
+ case TEST_MODE_DRV:
+ return "DRV";
+ case TEST_MODE_ZC:
+ return "ZC";
+ default:
+ return "BOGUS";
+ }
+}
+
+void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run);
+
+int testapp_adjust_tail_grow(struct test_spec *test);
+int testapp_adjust_tail_grow_mb(struct test_spec *test);
+int testapp_adjust_tail_shrink(struct test_spec *test);
+int testapp_adjust_tail_shrink_mb(struct test_spec *test);
+int testapp_aligned_inv_desc(struct test_spec *test);
+int testapp_aligned_inv_desc_2k_frame(struct test_spec *test);
+int testapp_aligned_inv_desc_mb(struct test_spec *test);
+int testapp_bidirectional(struct test_spec *test);
+int testapp_headroom(struct test_spec *test);
+int testapp_hw_sw_max_ring_size(struct test_spec *test);
+int testapp_hw_sw_min_ring_size(struct test_spec *test);
+int testapp_poll_rx(struct test_spec *test);
+int testapp_poll_rxq_tmout(struct test_spec *test);
+int testapp_poll_tx(struct test_spec *test);
+int testapp_poll_txq_tmout(struct test_spec *test);
+int testapp_send_receive(struct test_spec *test);
+int testapp_send_receive_2k_frame(struct test_spec *test);
+int testapp_send_receive_mb(struct test_spec *test);
+int testapp_send_receive_unaligned(struct test_spec *test);
+int testapp_send_receive_unaligned_mb(struct test_spec *test);
+int testapp_single_pkt(struct test_spec *test);
+int testapp_stats_fill_empty(struct test_spec *test);
+int testapp_stats_rx_dropped(struct test_spec *test);
+int testapp_stats_tx_invalid_descs(struct test_spec *test);
+int testapp_stats_rx_full(struct test_spec *test);
+int testapp_teardown(struct test_spec *test);
+int testapp_too_many_frags(struct test_spec *test);
+int testapp_tx_queue_consumer(struct test_spec *test);
+int testapp_unaligned_inv_desc(struct test_spec *test);
+int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test);
+int testapp_unaligned_inv_desc_mb(struct test_spec *test);
+int testapp_xdp_drop(struct test_spec *test);
+int testapp_xdp_metadata(struct test_spec *test);
+int testapp_xdp_metadata_mb(struct test_spec *test);
+int testapp_xdp_prog_cleanup(struct test_spec *test);
+int testapp_xdp_shared_umem(struct test_spec *test);
+
+void *worker_testapp_validate_rx(void *arg);
+void *worker_testapp_validate_tx(void *arg);
+
+static const struct test_spec tests[] = {
+ {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
+ {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
+ {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
+ {.name = "POLL_RX", .test_func = testapp_poll_rx},
+ {.name = "POLL_TX", .test_func = testapp_poll_tx},
+ {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
+ {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
+ {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
+ {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
+ {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
+ {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
+ {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
+ {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
+ {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
+ {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
+ {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
+ {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
+ {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
+ {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
+ {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
+ {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
+ {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
+ {.name = "XDP_ADJUST_TAIL_SHRINK", .test_func = testapp_adjust_tail_shrink},
+ {.name = "TX_QUEUE_CONSUMER", .test_func = testapp_tx_queue_consumer},
+ };
+
+static const struct test_spec ci_skip_tests[] = {
+ /* Flaky tests */
+ {.name = "XDP_ADJUST_TAIL_SHRINK_MULTI_BUFF", .test_func = testapp_adjust_tail_shrink_mb},
+ {.name = "XDP_ADJUST_TAIL_GROW", .test_func = testapp_adjust_tail_grow},
+ {.name = "XDP_ADJUST_TAIL_GROW_MULTI_BUFF", .test_func = testapp_adjust_tail_grow_mb},
+ {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
+ /* Tests with huge page dependency */
+ {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
+ {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
+ {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
+ .test_func = testapp_unaligned_inv_desc_4001_frame},
+ {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
+ .test_func = testapp_send_receive_unaligned_mb},
+ {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
+ /* Test with HW ring size dependency */
+ {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size},
+ {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size},
+ /* Too long test */
+ {.name = "TEARDOWN", .test_func = testapp_teardown},
+};
+
+
+#endif /* TEST_XSK_H_ */
diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c
new file mode 100644
index 000000000000..f45af1b0ef2c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Linutronix GmbH */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "test_time_tai.skel.h"
+
+#include <time.h>
+#include <stdint.h>
+
+#define TAI_THRESHOLD 1000000000ULL /* 1s */
+#define NSEC_PER_SEC 1000000000ULL
+
+static __u64 ts_to_ns(const struct timespec *ts)
+{
+ return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
+}
+
+void test_time_tai(void)
+{
+ struct __sk_buff skb = {
+ .cb[0] = 0,
+ .cb[1] = 0,
+ .tstamp = 0,
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ .ctx_out = &skb,
+ .ctx_size_out = sizeof(skb),
+ );
+ struct test_time_tai *skel;
+ struct timespec now_tai;
+ __u64 ts1, ts2, now;
+ int ret, prog_fd;
+
+ /* Open and load */
+ skel = test_time_tai__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tai_open"))
+ return;
+
+ /* Run test program */
+ prog_fd = bpf_program__fd(skel->progs.time_tai);
+ ret = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(ret, "test_run");
+
+ /* Retrieve generated TAI timestamps */
+ ts1 = skb.tstamp;
+ ts2 = skb.cb[0] | ((__u64)skb.cb[1] << 32);
+
+ /* TAI != 0 */
+ ASSERT_NEQ(ts1, 0, "tai_ts1");
+ ASSERT_NEQ(ts2, 0, "tai_ts2");
+
+ /* TAI is moving forward only */
+ ASSERT_GE(ts2, ts1, "tai_forward");
+
+ /* Check for future */
+ ret = clock_gettime(CLOCK_TAI, &now_tai);
+ ASSERT_EQ(ret, 0, "tai_gettime");
+ now = ts_to_ns(&now_tai);
+
+ ASSERT_TRUE(now > ts1, "tai_future_ts1");
+ ASSERT_TRUE(now > ts2, "tai_future_ts2");
+
+ /* Check for reasonable range */
+ ASSERT_TRUE(now - ts1 < TAI_THRESHOLD, "tai_range_ts1");
+ ASSERT_TRUE(now - ts2 < TAI_THRESHOLD, "tai_range_ts2");
+
+ test_time_tai__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
new file mode 100644
index 000000000000..34f9ccce2602
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "timer.skel.h"
+#include "timer_failure.skel.h"
+#include "timer_interrupt.skel.h"
+
+#define NUM_THR 8
+
+static void *spin_lock_thread(void *arg)
+{
+ int i, err, prog_fd = *(int *)arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ for (i = 0; i < 10000; i++) {
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err") ||
+ !ASSERT_OK(topts.retval, "test_run_opts retval"))
+ break;
+ }
+
+ pthread_exit(arg);
+}
+
+static int timer(struct timer *timer_skel)
+{
+ int i, err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ pthread_t thread_id[NUM_THR];
+ void *ret;
+
+ err = timer__attach(timer_skel);
+ if (!ASSERT_OK(err, "timer_attach"))
+ return err;
+
+ ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1");
+ ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1");
+ ASSERT_EQ(timer_skel->bss->pinned_callback_check, 0, "pinned_callback_check1");
+
+ prog_fd = bpf_program__fd(timer_skel->progs.test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+ timer__detach(timer_skel);
+
+ usleep(50); /* 10 usecs should be enough, but give it extra */
+ /* check that timer_cb1() was executed 10+10 times */
+ ASSERT_EQ(timer_skel->data->callback_check, 42, "callback_check2");
+ ASSERT_EQ(timer_skel->data->callback2_check, 42, "callback2_check2");
+
+ /* check that timer_cb2() was executed twice */
+ ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data");
+
+ /* check that timer_cb3() was executed twice */
+ ASSERT_EQ(timer_skel->bss->abs_data, 12, "abs_data");
+
+ /* check that timer_cb_pinned() was executed twice */
+ ASSERT_EQ(timer_skel->bss->pinned_callback_check, 2, "pinned_callback_check");
+
+ /* check that there were no errors in timer execution */
+ ASSERT_EQ(timer_skel->bss->err, 0, "err");
+
+ /* check that code paths completed */
+ ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
+
+ prog_fd = bpf_program__fd(timer_skel->progs.race);
+ for (i = 0; i < NUM_THR; i++) {
+ err = pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ while (i) {
+ err = pthread_join(thread_id[--i], &ret);
+ if (ASSERT_OK(err, "pthread_join"))
+ ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join");
+ }
+
+ return 0;
+}
+
+/* TODO: use pid filtering */
+void serial_test_timer(void)
+{
+ struct timer *timer_skel = NULL;
+ int err;
+
+ timer_skel = timer__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
+ return;
+
+ err = timer(timer_skel);
+ ASSERT_OK(err, "timer");
+ timer__destroy(timer_skel);
+
+ RUN_TESTS(timer_failure);
+}
+
+void test_timer_interrupt(void)
+{
+ struct timer_interrupt *skel = NULL;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ skel = timer_interrupt__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(skel, "timer_interrupt__open_and_load"))
+ return;
+
+ err = timer_interrupt__attach(skel);
+ if (!ASSERT_OK(err, "timer_interrupt__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_timer_interrupt);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+
+ usleep(50);
+
+ ASSERT_EQ(skel->bss->in_interrupt, 0, "in_interrupt");
+ if (skel->bss->preempt_count)
+ ASSERT_NEQ(skel->bss->in_interrupt_cb, 0, "in_interrupt_cb");
+
+out:
+ timer_interrupt__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
new file mode 100644
index 000000000000..b841597c8a3a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "timer_crash.skel.h"
+
+enum {
+ MODE_ARRAY,
+ MODE_HASH,
+};
+
+static void test_timer_crash_mode(int mode)
+{
+ struct timer_crash *skel;
+
+ skel = timer_crash__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
+ return;
+ skel->bss->pid = getpid();
+ skel->bss->crash_map = mode;
+ if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
+ goto end;
+ usleep(1);
+end:
+ timer_crash__destroy(skel);
+}
+
+void test_timer_crash(void)
+{
+ if (test__start_subtest("array"))
+ test_timer_crash_mode(MODE_ARRAY);
+ if (test__start_subtest("hash"))
+ test_timer_crash_mode(MODE_HASH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
new file mode 100644
index 000000000000..eb303fa1e09a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <pthread.h>
+#include <network_helpers.h>
+#include <sys/sysinfo.h>
+
+#include "timer_lockup.skel.h"
+
+static long cpu;
+static int *timer1_err;
+static int *timer2_err;
+static bool skip;
+
+volatile int k = 0;
+
+static void *timer_lockup_thread(void *arg)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1000,
+ );
+ int i, prog_fd = *(int *)arg;
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
+ ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset),
+ &cpuset),
+ "cpu affinity");
+
+ for (i = 0; !READ_ONCE(*timer1_err) && !READ_ONCE(*timer2_err); i++) {
+ bpf_prog_test_run_opts(prog_fd, &opts);
+ /* Skip the test if we can't reproduce the race in a reasonable
+ * amount of time.
+ */
+ if (i > 50) {
+ WRITE_ONCE(skip, true);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+void test_timer_lockup(void)
+{
+ int timer1_prog, timer2_prog;
+ struct timer_lockup *skel;
+ pthread_t thrds[2];
+ void *ret;
+
+ if (get_nprocs() < 2) {
+ test__skip();
+ return;
+ }
+
+ skel = timer_lockup__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
+ return;
+
+ timer1_prog = bpf_program__fd(skel->progs.timer1_prog);
+ timer2_prog = bpf_program__fd(skel->progs.timer2_prog);
+
+ timer1_err = &skel->bss->timer1_err;
+ timer2_err = &skel->bss->timer2_err;
+
+ if (!ASSERT_OK(pthread_create(&thrds[0], NULL, timer_lockup_thread,
+ &timer1_prog),
+ "pthread_create thread1"))
+ goto out;
+ if (!ASSERT_OK(pthread_create(&thrds[1], NULL, timer_lockup_thread,
+ &timer2_prog),
+ "pthread_create thread2")) {
+ pthread_exit(&thrds[0]);
+ goto out;
+ }
+
+ pthread_join(thrds[1], &ret);
+ pthread_join(thrds[0], &ret);
+
+ if (skip) {
+ test__skip();
+ goto out;
+ }
+
+ if (*timer1_err != -EDEADLK && *timer1_err != 0)
+ ASSERT_FAIL("timer1_err bad value");
+ if (*timer2_err != -EDEADLK && *timer2_err != 0)
+ ASSERT_FAIL("timer2_err bad value");
+out:
+ timer_lockup__destroy(skel);
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
new file mode 100644
index 000000000000..c930c7d7105b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "timer_mim.skel.h"
+#include "timer_mim_reject.skel.h"
+
+static int timer_mim(struct timer_mim *timer_skel)
+{
+ __u64 cnt1, cnt2;
+ int err, prog_fd, key1 = 1;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ err = timer_mim__attach(timer_skel);
+ if (!ASSERT_OK(err, "timer_attach"))
+ return err;
+
+ prog_fd = bpf_program__fd(timer_skel->progs.test1);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+ timer_mim__detach(timer_skel);
+
+ /* check that timer_cb[12] are incrementing 'cnt' */
+ cnt1 = READ_ONCE(timer_skel->bss->cnt);
+ for (int i = 0; i < 100; i++) {
+ cnt2 = READ_ONCE(timer_skel->bss->cnt);
+ if (cnt2 != cnt1)
+ break;
+ usleep(200); /* 100 times more than interval */
+ }
+ ASSERT_GT(cnt2, cnt1, "cnt");
+
+ ASSERT_EQ(timer_skel->bss->err, 0, "err");
+ /* check that code paths completed */
+ ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
+
+ close(bpf_map__fd(timer_skel->maps.inner_htab));
+ err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
+ ASSERT_EQ(err, 0, "delete inner map");
+
+ /* check that timer_cb[12] are no longer running */
+ cnt1 = READ_ONCE(timer_skel->bss->cnt);
+ for (int i = 0; i < 100; i++) {
+ usleep(200); /* 100 times more than interval */
+ cnt2 = READ_ONCE(timer_skel->bss->cnt);
+ if (cnt2 == cnt1)
+ break;
+ }
+ ASSERT_EQ(cnt2, cnt1, "cnt");
+
+ return 0;
+}
+
+void serial_test_timer_mim(void)
+{
+ struct timer_mim_reject *timer_reject_skel = NULL;
+ libbpf_print_fn_t old_print_fn = NULL;
+ struct timer_mim *timer_skel = NULL;
+ int err;
+
+ old_print_fn = libbpf_set_print(NULL);
+ timer_reject_skel = timer_mim_reject__open_and_load();
+ libbpf_set_print(old_print_fn);
+ if (!ASSERT_ERR_PTR(timer_reject_skel, "timer_reject_skel_load"))
+ goto cleanup;
+
+ timer_skel = timer_mim__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
+ goto cleanup;
+
+ err = timer_mim(timer_skel);
+ ASSERT_OK(err, "timer_mim");
+cleanup:
+ timer_mim__destroy(timer_skel);
+ timer_mim_reject__destroy(timer_reject_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
new file mode 100644
index 000000000000..b81dde283052
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -0,0 +1,1197 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "cap_helpers.h"
+#include <fcntl.h>
+#include <sched.h>
+#include <signal.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/unistd.h>
+#include <linux/mount.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
+#include "token_lsm.skel.h"
+#include "priv_freplace_prog.skel.h"
+
+static inline int sys_mount(const char *dev_name, const char *dir_name,
+ const char *type, unsigned long flags,
+ const void *data)
+{
+ return syscall(__NR_mount, dev_name, dir_name, type, flags, data);
+}
+
+static inline int sys_fsopen(const char *fsname, unsigned flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int sys_fspick(int dfd, const char *path, unsigned flags)
+{
+ return syscall(__NR_fspick, dfd, path, flags);
+}
+
+static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
+}
+
+static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
+{
+ return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
+}
+
+static inline int sys_move_mount(int from_dfd, const char *from_path,
+ int to_dfd, const char *to_path,
+ unsigned flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags);
+}
+
+static int drop_priv_caps(__u64 *old_caps)
+{
+ return cap_disable_effective((1ULL << CAP_BPF) |
+ (1ULL << CAP_PERFMON) |
+ (1ULL << CAP_NET_ADMIN) |
+ (1ULL << CAP_SYS_ADMIN), old_caps);
+}
+
+static int restore_priv_caps(__u64 old_caps)
+{
+ return cap_enable_effective(old_caps, NULL);
+}
+
+static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str)
+{
+ char buf[32];
+ int err;
+
+ if (!mask_str) {
+ if (mask == ~0ULL) {
+ mask_str = "any";
+ } else {
+ snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask);
+ mask_str = buf;
+ }
+ }
+
+ err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key,
+ mask_str, 0);
+ if (err < 0)
+ err = -errno;
+ return err;
+}
+
+#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0)
+
+struct bpffs_opts {
+ __u64 cmds;
+ __u64 maps;
+ __u64 progs;
+ __u64 attachs;
+ const char *cmds_str;
+ const char *maps_str;
+ const char *progs_str;
+ const char *attachs_str;
+};
+
+static int create_bpffs_fd(void)
+{
+ int fs_fd;
+
+ /* create VFS context */
+ fs_fd = sys_fsopen("bpf", 0);
+ ASSERT_GE(fs_fd, 0, "fs_fd");
+
+ return fs_fd;
+}
+
+static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
+{
+ int err;
+
+ /* set up token delegation mount options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str);
+ if (!ASSERT_OK(err, "fs_cfg_cmds"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str);
+ if (!ASSERT_OK(err, "fs_cfg_maps"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str);
+ if (!ASSERT_OK(err, "fs_cfg_progs"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str);
+ if (!ASSERT_OK(err, "fs_cfg_attachs"))
+ return err;
+
+ /* instantiate FS object */
+ err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+ if (err < 0)
+ return -errno;
+
+ return 0;
+}
+
+/* send FD over Unix domain (AF_UNIX) socket */
+static int sendfd(int sockfd, int fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1] = { fd }, err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fds));
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
+
+ err = sendmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "sendmsg"))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* receive FD over Unix domain (AF_UNIX) socket */
+static int recvfd(int sockfd, int *fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1], err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+
+ err = recvmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "recvmsg"))
+ return -EINVAL;
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (!ASSERT_OK_PTR(cmsg, "cmsg_null") ||
+ !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") ||
+ !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") ||
+ !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type"))
+ return -EINVAL;
+
+ memcpy(fds, CMSG_DATA(cmsg), sizeof(fds));
+ *fd = fds[0];
+
+ return 0;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static int write_file(const char *path, const void *buf, size_t count)
+{
+ int fd;
+ ssize_t ret;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
+ if (fd < 0)
+ return -1;
+
+ ret = write_nointr(fd, buf, count);
+ close(fd);
+ if (ret < 0 || (size_t)ret != count)
+ return -1;
+
+ return 0;
+}
+
+static int create_and_enter_userns(void)
+{
+ uid_t uid;
+ gid_t gid;
+ char map[100];
+
+ uid = getuid();
+ gid = getgid();
+
+ if (unshare(CLONE_NEWUSER))
+ return -1;
+
+ if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
+ errno != ENOENT)
+ return -1;
+
+ snprintf(map, sizeof(map), "0 %d 1", uid);
+ if (write_file("/proc/self/uid_map", map, strlen(map)))
+ return -1;
+
+
+ snprintf(map, sizeof(map), "0 %d 1", gid);
+ if (write_file("/proc/self/gid_map", map, strlen(map)))
+ return -1;
+
+ if (setgid(0))
+ return -1;
+
+ if (setuid(0))
+ return -1;
+
+ return 0;
+}
+
+typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel);
+
+static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback)
+{
+ int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1;
+ struct token_lsm *lsm_skel = NULL;
+ char one;
+
+ /* load and attach LSM "policy" before we go into unpriv userns */
+ lsm_skel = token_lsm__open_and_load();
+ if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ lsm_skel->bss->my_pid = getpid();
+ err = token_lsm__attach(lsm_skel);
+ if (!ASSERT_OK(err, "lsm_skel_attach"))
+ goto cleanup;
+
+ /* setup userns with root mappings */
+ err = create_and_enter_userns();
+ if (!ASSERT_OK(err, "create_and_enter_userns"))
+ goto cleanup;
+
+ /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "create_mountns"))
+ goto cleanup;
+
+ err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (!ASSERT_OK(err, "remount_root"))
+ goto cleanup;
+
+ fs_fd = create_bpffs_fd();
+ if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_maps_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_progs_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm");
+
+ /* pass BPF FS context object to parent */
+ err = sendfd(sock_fd, fs_fd);
+ if (!ASSERT_OK(err, "send_fs_fd"))
+ goto cleanup;
+
+ /* wait that the parent reads the fd, does the fsconfig() calls
+ * and send us a signal that it is done
+ */
+ err = read(sock_fd, &one, sizeof(one));
+ if (!ASSERT_GE(err, 0, "read_one"))
+ goto cleanup;
+
+ /* avoid mucking around with mount namespaces and mounting at
+ * well-known path, just create O_PATH fd for detached mount
+ */
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (!ASSERT_OK_FD(mnt_fd, "mnt_fd"))
+ goto cleanup;
+
+ /* try to fspick() BPF FS and try to add some delegation options */
+ fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH);
+ if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot reconfigure to set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ zclose(fs_fd);
+
+ bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR);
+ if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* create BPF token FD and pass it to parent for some extra checks */
+ token_fd = bpf_token_create(bpffs_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "child_token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = sendfd(sock_fd, token_fd);
+ if (!ASSERT_OK(err, "send_token_fd"))
+ goto cleanup;
+ zclose(token_fd);
+
+ /* do custom test logic with customly set up BPF FS instance */
+ err = callback(bpffs_fd, lsm_skel);
+ if (!ASSERT_OK(err, "test_callback"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ zclose(sock_fd);
+ zclose(mnt_fd);
+ zclose(fs_fd);
+ zclose(bpffs_fd);
+ zclose(token_fd);
+
+ lsm_skel->bss->my_pid = 0;
+ token_lsm__destroy(lsm_skel);
+
+ exit(-err);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
+{
+ int fs_fd = -1, token_fd = -1, err;
+ char one = 1;
+
+ err = recvfd(sock_fd, &fs_fd);
+ if (!ASSERT_OK(err, "recv_bpffs_fd"))
+ goto cleanup;
+
+ err = materialize_bpffs_fd(fs_fd, bpffs_opts);
+ if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* notify the child that we did the fsconfig() calls and it can proceed. */
+ err = write(sock_fd, &one, sizeof(one));
+ if (!ASSERT_EQ(err, sizeof(one), "send_one"))
+ goto cleanup;
+ zclose(fs_fd);
+
+ /* receive BPF token FD back from child for some extra tests */
+ err = recvfd(sock_fd, &token_fd);
+ if (!ASSERT_OK(err, "recv_token_fd"))
+ goto cleanup;
+
+ err = wait_for_pid(child_pid);
+ ASSERT_OK(err, "waitpid_child");
+
+cleanup:
+ zclose(sock_fd);
+ zclose(fs_fd);
+ zclose(token_fd);
+
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static void subtest_userns(struct bpffs_opts *bpffs_opts,
+ child_callback_fn child_cb)
+{
+ int sock_fds[2] = { -1, -1 };
+ int child_pid = 0, err;
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds);
+ if (!ASSERT_OK(err, "socketpair"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GE(child_pid, 0, "fork"))
+ goto cleanup;
+
+ if (child_pid == 0) {
+ zclose(sock_fds[0]);
+ return child(sock_fds[1], bpffs_opts, child_cb);
+
+ } else {
+ zclose(sock_fds[1]);
+ return parent(child_pid, bpffs_opts, sock_fds[0]);
+ }
+
+cleanup:
+ zclose(sock_fds[0]);
+ zclose(sock_fds[1]);
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts);
+ int err, token_fd = -1, map_fd = -1;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no token, no CAP_BPF -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* token without CAP_BPF -> fail */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* CAP_BPF without token -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* finally, namespaced CAP_BPF + token -> success */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ zclose(token_fd);
+ zclose(map_fd);
+ return err;
+}
+
+static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, btf_opts);
+ int err, token_fd = -1, btf_fd = -1;
+ const void *raw_btf_data;
+ struct btf *btf = NULL;
+ __u32 raw_btf_size;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* setup a trivial BTF data to load to the kernel */
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ goto cleanup;
+
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data"))
+ goto cleanup;
+
+ /* no token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = 0;
+ btf_opts.token_fd = 0;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* token + CAP_BPF -> success */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ btf__free(btf);
+ zclose(btf_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts);
+ int err, token_fd = -1, prog_fd = -1;
+ struct bpf_insn insns[] = {
+ /* bpf_jiffies64() requires CAP_BPF */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ /* bpf_get_current_task() requires CAP_PERFMON */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task),
+ /* r0 = 0; exit; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* validate we can successfully load BPF program with token; this
+ * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF)
+ * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have
+ * BPF token wired properly in a bunch of places in the kernel
+ */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_opts.expected_attach_type = BPF_XDP;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_GT(prog_fd, 0, "prog_fd")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no token + caps -> failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no caps + token -> failure */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no caps + no token -> definitely a failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = 0;
+cleanup:
+ zclose(prog_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_map *skel;
+ int err;
+
+ skel = priv_map__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_map__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_map__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = priv_map__load(skel);
+ priv_map__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_prog *skel;
+ int err;
+
+ skel = priv_prog__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_prog__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_capable() with LSM */
+ lsm_skel->bss->reject_capable = true;
+ lsm_skel->bss->reject_cmd = false;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_cmd() with LSM */
+ lsm_skel->bss->reject_capable = false;
+ lsm_skel->bss->reject_cmd = true;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_freplace_setup(int mnt_fd, struct priv_freplace_prog **fr_skel,
+ struct priv_prog **skel, int *tgt_fd)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ int err;
+ char buf[256];
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ *skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(*skel, "priv_prog__open_opts"))
+ return -EINVAL;
+ err = priv_prog__load(*skel);
+ if (!ASSERT_OK(err, "priv_prog__load"))
+ return -EINVAL;
+
+ *fr_skel = priv_freplace_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(*skel, "priv_freplace_prog__open_opts"))
+ return -EINVAL;
+
+ *tgt_fd = bpf_program__fd((*skel)->progs.xdp_prog1);
+ return 0;
+}
+
+/* Verify that freplace works from user namespace, because bpf token is loaded
+ * in bpf_object__prepare
+ */
+static int userns_obj_priv_freplace_prog(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ struct priv_freplace_prog *fr_skel = NULL;
+ struct priv_prog *skel = NULL;
+ int err, tgt_fd;
+
+ err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd);
+ if (!ASSERT_OK(err, "setup"))
+ goto out;
+
+ err = bpf_object__prepare(fr_skel->obj);
+ if (!ASSERT_OK(err, "freplace__prepare"))
+ goto out;
+
+ err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = priv_freplace_prog__load(fr_skel);
+ ASSERT_OK(err, "priv_freplace_prog__load");
+
+out:
+ priv_freplace_prog__destroy(fr_skel);
+ priv_prog__destroy(skel);
+ return err;
+}
+
+/* Verify that replace fails to set attach target from user namespace without bpf token */
+static int userns_obj_priv_freplace_prog_fail(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ struct priv_freplace_prog *fr_skel = NULL;
+ struct priv_prog *skel = NULL;
+ int err, tgt_fd;
+
+ err = userns_obj_priv_freplace_setup(mnt_fd, &fr_skel, &skel, &tgt_fd);
+ if (!ASSERT_OK(err, "setup"))
+ goto out;
+
+ err = bpf_program__set_attach_target(fr_skel->progs.new_xdp_prog2, tgt_fd, "xdp_prog1");
+ if (ASSERT_ERR(err, "attach fails"))
+ err = 0;
+ else
+ err = -EINVAL;
+
+out:
+ priv_freplace_prog__destroy(fr_skel);
+ priv_prog__destroy(skel);
+ return err;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (expect_success) {
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+ } else /* expect failure */ {
+ if (!ASSERT_ERR(err, "obj_token_path_load"))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+static const char *token_bpffs_custom_dir()
+{
+ return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs";
+}
+
+#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
+
+static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF
+ * token automatically and implicitly
+ */
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ return -EINVAL;
+
+ /* disable implicit BPF token creation by setting
+ * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail
+ */
+ err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ return -EINVAL;
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) {
+ unsetenv(TOKEN_ENVVAR);
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+ unsetenv(TOKEN_ENVVAR);
+
+ /* now the same struct_ops skeleton should succeed thanks to libbpf
+ * creating BPF token from /sys/fs/bpf mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ return -EINVAL;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, should fail */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ const char *custom_dir = token_bpffs_custom_dir();
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over custom location, so libbpf can't create
+ * BPF token implicitly, unless pointed to it through
+ * LIBBPF_BPF_TOKEN_PATH envvar
+ */
+ rmdir(custom_dir);
+ if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom"))
+ goto err_out;
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ goto err_out;
+
+ /* even though we have BPF FS with delegation, it's not at default
+ * /sys/fs/bpf location, so we still fail to load until envvar is set up
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) {
+ dummy_st_ops_success__destroy(skel);
+ goto err_out;
+ }
+
+ err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ goto err_out;
+
+ /* now the same struct_ops skeleton should succeed thanks to libbpf
+ * creating BPF token from custom mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ goto err_out;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, envvar
+ * will be ignored, should fail
+ */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ goto err_out;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ goto err_out;
+
+ rmdir(custom_dir);
+ unsetenv(TOKEN_ENVVAR);
+ return 0;
+err_out:
+ rmdir(custom_dir);
+ unsetenv(TOKEN_ENVVAR);
+ return -EINVAL;
+}
+
+#define bit(n) (1ULL << (n))
+
+static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ int err, token_fd = -1;
+ struct bpf_token_info info;
+ u32 len = sizeof(struct bpf_token_info);
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(&info, 0, len);
+ err = bpf_obj_get_info_by_fd(token_fd, &info, &len);
+ if (!ASSERT_ERR(err, "bpf_obj_get_token_info"))
+ goto cleanup;
+ if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* The BPF_PROG_TYPE_EXT is not set in token */
+ if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext"))
+ err = -EINVAL;
+
+cleanup:
+ zclose(token_fd);
+ return err;
+}
+
+void test_token(void)
+{
+ if (test__start_subtest("map_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "map_create",
+ .maps_str = "stack",
+ };
+
+ subtest_userns(&opts, userns_map_create);
+ }
+ if (test__start_subtest("btf_token")) {
+ struct bpffs_opts opts = {
+ .cmds = 1ULL << BPF_BTF_LOAD,
+ };
+
+ subtest_userns(&opts, userns_btf_load);
+ }
+ if (test__start_subtest("prog_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "PROG_LOAD",
+ .progs_str = "XDP",
+ .attachs_str = "xdp",
+ };
+
+ subtest_userns(&opts, userns_prog_load);
+ }
+ if (test__start_subtest("obj_priv_map")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_MAP_CREATE),
+ .maps = bit(BPF_MAP_TYPE_QUEUE),
+ };
+
+ subtest_userns(&opts, userns_obj_priv_map);
+ }
+ if (test__start_subtest("obj_priv_prog")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_PROG_LOAD),
+ .progs = bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_prog);
+ }
+ if (test__start_subtest("obj_priv_freplace_prog")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID),
+ .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+ subtest_userns(&opts, userns_obj_priv_freplace_prog);
+ }
+ if (test__start_subtest("obj_priv_freplace_prog_fail")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_PROG_LOAD) | bit(BPF_BTF_GET_FD_BY_ID),
+ .progs = bit(BPF_PROG_TYPE_EXT) | bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+ subtest_userns(&opts, userns_obj_priv_freplace_prog_fail);
+ }
+ if (test__start_subtest("obj_priv_btf_fail")) {
+ struct bpffs_opts opts = {
+ /* disallow BTF loading */
+ .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_fail);
+ }
+ if (test__start_subtest("obj_priv_btf_success")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_success);
+ }
+ if (test__start_subtest("obj_priv_implicit_token")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token);
+ }
+ if (test__start_subtest("obj_priv_implicit_token_envvar")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token_envvar);
+ }
+ if (test__start_subtest("bpf_token_info")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_MAP_CREATE),
+ .progs = bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_bpf_token_info);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
index fb095e5cd9af..655d69f0ff0b 100644
--- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-void test_tp_attach_query(void)
+void serial_test_tp_attach_query(void)
{
const int num_progs = 3;
int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
__u32 duration = 0, info_len, saved_prog_ids[num_progs];
- const char *file = "./test_tracepoint.o";
+ const char *file = "./test_tracepoint.bpf.o";
struct perf_event_query_bpf *query;
struct perf_event_attr attr = {};
struct bpf_object *obj[num_progs];
@@ -16,8 +16,13 @@ void test_tp_attach_query(void)
for (i = 0; i < num_progs; i++)
obj[i] = NULL;
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/tracing/events/sched/sched_switch/id");
+ } else {
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ }
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
return;
@@ -35,7 +40,7 @@ void test_tp_attach_query(void)
query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
for (i = 0; i < num_progs; i++) {
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
&prog_fd[i]);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto cleanup1;
@@ -45,8 +50,9 @@ void test_tp_attach_query(void)
prog_info.xlated_prog_len = 0;
prog_info.nr_map_ids = 0;
info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err = bpf_prog_get_info_by_fd(prog_fd[i], &prog_info,
+ &info_len);
+ if (CHECK(err, "bpf_prog_get_info_by_fd", "err %d errno %d\n",
err, errno))
goto cleanup1;
saved_prog_ids[i] = prog_info.id;
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c
new file mode 100644
index 000000000000..accc42e01f8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_tp_btf_nullable.skel.h"
+
+void test_tp_btf_nullable(void)
+{
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ RUN_TESTS(test_tp_btf_nullable);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_ext.c b/tools/testing/selftests/bpf/prog_tests/trace_ext.c
index 924441d4362d..aabdff7bea3e 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_ext.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_ext.c
@@ -23,8 +23,12 @@ void test_trace_ext(void)
int err, pkt_fd, ext_fd;
struct bpf_program *prog;
char buf[100];
- __u32 retval;
__u64 len;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
/* open/load/attach test_pkt_md_access */
skel_pkt = test_pkt_md_access__open_and_load();
@@ -77,32 +81,32 @@ void test_trace_ext(void)
/* load/attach tracing */
err = test_trace_ext_tracing__load(skel_trace);
- if (CHECK(err, "setup", "tracing/test_pkt_md_access_new load failed\n")) {
+ if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new load")) {
libbpf_strerror(err, buf, sizeof(buf));
fprintf(stderr, "%s\n", buf);
goto cleanup;
}
err = test_trace_ext_tracing__attach(skel_trace);
- if (CHECK(err, "setup", "tracing/test_pkt_md_access_new attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new attach"))
goto cleanup;
/* trigger the test */
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "run", "err %d errno %d retval %d\n", err, errno, retval);
+ err = bpf_prog_test_run_opts(pkt_fd, &topts);
+ ASSERT_OK(err, "test_run_opts err");
+ ASSERT_OK(topts.retval, "test_run_opts retval");
bss_ext = skel_ext->bss;
bss_trace = skel_trace->bss;
len = bss_ext->ext_called;
- CHECK(bss_ext->ext_called == 0,
- "check", "failed to trigger freplace/test_pkt_md_access\n");
- CHECK(bss_trace->fentry_called != len,
- "check", "failed to trigger fentry/test_pkt_md_access_new\n");
- CHECK(bss_trace->fexit_called != len,
- "check", "failed to trigger fexit/test_pkt_md_access_new\n");
+ ASSERT_NEQ(bss_ext->ext_called, 0,
+ "failed to trigger freplace/test_pkt_md_access");
+ ASSERT_EQ(bss_trace->fentry_called, len,
+ "failed to trigger fentry/test_pkt_md_access_new");
+ ASSERT_EQ(bss_trace->fexit_called, len,
+ "failed to trigger fexit/test_pkt_md_access_new");
cleanup:
test_trace_ext_tracing__destroy(skel_trace);
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
index 39b0decb1bb2..e56e88596d64 100644
--- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c
+++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
@@ -3,73 +3,56 @@
#include <test_progs.h>
-#include "trace_printk.skel.h"
+#include "trace_printk.lskel.h"
-#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
-void test_trace_printk(void)
+static void trace_pipe_cb(const char *str, void *data)
{
- int err, iter = 0, duration = 0, found = 0;
- struct trace_printk__bss *bss;
- struct trace_printk *skel;
- char *buf = NULL;
- FILE *fp = NULL;
- size_t buflen;
-
- skel = trace_printk__open();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (strstr(str, SEARCHMSG) != NULL)
+ (*(int *)data)++;
+}
+
+void serial_test_trace_printk(void)
+{
+ struct trace_printk_lskel__bss *bss;
+ struct trace_printk_lskel *skel;
+ int err = 0, found = 0;
+
+ skel = trace_printk_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "trace_printk__open"))
return;
- err = trace_printk__load(skel);
- if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+ ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]");
+ skel->rodata->fmt[0] = 't';
+
+ err = trace_printk_lskel__load(skel);
+ if (!ASSERT_OK(err, "trace_printk__load"))
goto cleanup;
bss = skel->bss;
- err = trace_printk__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ err = trace_printk_lskel__attach(skel);
+ if (!ASSERT_OK(err, "trace_printk__attach"))
goto cleanup;
- fp = fopen(TRACEBUF, "r");
- if (CHECK(fp == NULL, "could not open trace buffer",
- "error %d opening %s", errno, TRACEBUF))
- goto cleanup;
-
- /* We do not want to wait forever if this test fails... */
- fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
-
/* wait for tracepoint to trigger */
usleep(1);
- trace_printk__detach(skel);
+ trace_printk_lskel__detach(skel);
- if (CHECK(bss->trace_printk_ran == 0,
- "bpf_trace_printk never ran",
- "ran == %d", bss->trace_printk_ran))
+ if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran"))
goto cleanup;
- if (CHECK(bss->trace_printk_ret <= 0,
- "bpf_trace_printk returned <= 0 value",
- "got %d", bss->trace_printk_ret))
+ if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret"))
goto cleanup;
/* verify our search string is in the trace buffer */
- while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
- if (strstr(buf, SEARCHMSG) != NULL)
- found++;
- if (found == bss->trace_printk_ran)
- break;
- if (++iter > 1000)
- break;
- }
-
- if (CHECK(!found, "message from bpf_trace_printk not found",
- "no instance of %s in %s", SEARCHMSG, TRACEBUF))
+ ASSERT_OK(read_trace_pipe_iter(trace_pipe_cb, &found, 1000),
+ "read_trace_pipe_iter");
+
+ if (!ASSERT_EQ(found, bss->trace_printk_ran, "found"))
goto cleanup;
cleanup:
- trace_printk__destroy(skel);
- free(buf);
- if (fp)
- fclose(fp);
+ trace_printk_lskel__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
new file mode 100644
index 000000000000..2af6a6f2096a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+
+#include "trace_vprintk.lskel.h"
+
+#define SEARCHMSG "1,2,3,4,5,6,7,8,9,10"
+
+static void trace_pipe_cb(const char *str, void *data)
+{
+ if (strstr(str, SEARCHMSG) != NULL)
+ (*(int *)data)++;
+}
+
+void serial_test_trace_vprintk(void)
+{
+ struct trace_vprintk_lskel__bss *bss;
+ struct trace_vprintk_lskel *skel;
+ int err = 0, found = 0;
+
+ skel = trace_vprintk_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
+ goto cleanup;
+
+ bss = skel->bss;
+
+ err = trace_vprintk_lskel__attach(skel);
+ if (!ASSERT_OK(err, "trace_vprintk__attach"))
+ goto cleanup;
+
+ /* wait for tracepoint to trigger */
+ usleep(1);
+ trace_vprintk_lskel__detach(skel);
+
+ if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran"))
+ goto cleanup;
+
+ if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret"))
+ goto cleanup;
+
+ /* verify our search string is in the trace buffer */
+ ASSERT_OK(read_trace_pipe_iter(trace_pipe_cb, &found, 1000),
+ "read_trace_pipe_iter");
+
+ if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found"))
+ goto cleanup;
+
+ if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret"))
+ goto cleanup;
+
+cleanup:
+ trace_vprintk_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
new file mode 100644
index 000000000000..10e231965589
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "tracing_failure.skel.h"
+
+static void test_bpf_spin_lock(bool is_spin_lock)
+{
+ struct tracing_failure *skel;
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ if (is_spin_lock)
+ bpf_program__set_autoload(skel->progs.test_spin_lock, true);
+ else
+ bpf_program__set_autoload(skel->progs.test_spin_unlock, true);
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_OK(err, "tracing_failure__load"))
+ goto out;
+
+ err = tracing_failure__attach(skel);
+ ASSERT_ERR(err, "tracing_failure__attach");
+
+out:
+ tracing_failure__destroy(skel);
+}
+
+static void test_tracing_fail_prog(const char *prog_name, const char *exp_msg)
+{
+ struct tracing_failure *skel;
+ struct bpf_program *prog;
+ char log_buf[256];
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+
+ bpf_program__set_autoload(prog, true);
+ bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf));
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_ERR(err, "tracing_failure__load"))
+ goto out;
+
+ ASSERT_HAS_SUBSTR(log_buf, exp_msg, "log_buf");
+out:
+ tracing_failure__destroy(skel);
+}
+
+static void test_tracing_deny(void)
+{
+ int btf_id;
+
+ /* __rcu_read_lock depends on CONFIG_PREEMPT_RCU */
+ btf_id = libbpf_find_vmlinux_btf_id("__rcu_read_lock", BPF_TRACE_FENTRY);
+ if (btf_id <= 0) {
+ test__skip();
+ return;
+ }
+
+ test_tracing_fail_prog("tracing_deny",
+ "Attaching tracing programs to function '__rcu_read_lock' is rejected.");
+}
+
+static void test_fexit_noreturns(void)
+{
+ test_tracing_fail_prog("fexit_noreturns",
+ "Attaching fexit/fmod_ret to __noreturn function 'do_exit' is rejected.");
+}
+
+void test_tracing_failure(void)
+{
+ if (test__start_subtest("bpf_spin_lock"))
+ test_bpf_spin_lock(true);
+ if (test__start_subtest("bpf_spin_unlock"))
+ test_bpf_spin_lock(false);
+ if (test__start_subtest("tracing_deny"))
+ test_tracing_deny();
+ if (test__start_subtest("fexit_noreturns"))
+ test_fexit_noreturns();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
new file mode 100644
index 000000000000..6f8c0bfb0415
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "tracing_struct.skel.h"
+#include "tracing_struct_many_args.skel.h"
+
+static void test_struct_args(void)
+{
+ struct tracing_struct *skel;
+ int err;
+
+ skel = tracing_struct__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load"))
+ return;
+
+ err = tracing_struct__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct__attach"))
+ goto destroy_skel;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a");
+ ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b");
+ ASSERT_EQ(skel->bss->t1_b, 1, "t1:b");
+ ASSERT_EQ(skel->bss->t1_c, 4, "t1:c");
+
+ ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs");
+ ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0");
+ ASSERT_EQ(skel->bss->t1_reg1, 3, "t1 reg1");
+ ASSERT_EQ(skel->bss->t1_reg2, 1, "t1 reg2");
+ ASSERT_EQ(skel->bss->t1_reg3, 4, "t1 reg3");
+ ASSERT_EQ(skel->bss->t1_ret, 10, "t1 ret");
+
+ ASSERT_EQ(skel->bss->t2_a, 1, "t2:a");
+ ASSERT_EQ(skel->bss->t2_b_a, 2, "t2:b.a");
+ ASSERT_EQ(skel->bss->t2_b_b, 3, "t2:b.b");
+ ASSERT_EQ(skel->bss->t2_c, 4, "t2:c");
+ ASSERT_EQ(skel->bss->t2_ret, 10, "t2 ret");
+
+ ASSERT_EQ(skel->bss->t3_a, 1, "t3:a");
+ ASSERT_EQ(skel->bss->t3_b, 4, "t3:b");
+ ASSERT_EQ(skel->bss->t3_c_a, 2, "t3:c.a");
+ ASSERT_EQ(skel->bss->t3_c_b, 3, "t3:c.b");
+ ASSERT_EQ(skel->bss->t3_ret, 10, "t3 ret");
+
+ ASSERT_EQ(skel->bss->t4_a_a, 10, "t4:a.a");
+ ASSERT_EQ(skel->bss->t4_b, 1, "t4:b");
+ ASSERT_EQ(skel->bss->t4_c, 2, "t4:c");
+ ASSERT_EQ(skel->bss->t4_d, 3, "t4:d");
+ ASSERT_EQ(skel->bss->t4_e_a, 2, "t4:e.a");
+ ASSERT_EQ(skel->bss->t4_e_b, 3, "t4:e.b");
+ ASSERT_EQ(skel->bss->t4_ret, 21, "t4 ret");
+
+ ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret");
+
+ ASSERT_EQ(skel->bss->t6, 1, "t6 ret");
+
+destroy_skel:
+ tracing_struct__destroy(skel);
+}
+
+static void test_struct_many_args(void)
+{
+ struct tracing_struct_many_args *skel;
+ int err;
+
+ skel = tracing_struct_many_args__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct_many_args__open_and_load"))
+ return;
+
+ err = tracing_struct_many_args__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct_many_args__attach"))
+ goto destroy_skel;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->t7_a, 16, "t7:a");
+ ASSERT_EQ(skel->bss->t7_b, 17, "t7:b");
+ ASSERT_EQ(skel->bss->t7_c, 18, "t7:c");
+ ASSERT_EQ(skel->bss->t7_d, 19, "t7:d");
+ ASSERT_EQ(skel->bss->t7_e, 20, "t7:e");
+ ASSERT_EQ(skel->bss->t7_f_a, 21, "t7:f.a");
+ ASSERT_EQ(skel->bss->t7_f_b, 22, "t7:f.b");
+ ASSERT_EQ(skel->bss->t7_ret, 133, "t7 ret");
+
+ ASSERT_EQ(skel->bss->t8_a, 16, "t8:a");
+ ASSERT_EQ(skel->bss->t8_b, 17, "t8:b");
+ ASSERT_EQ(skel->bss->t8_c, 18, "t8:c");
+ ASSERT_EQ(skel->bss->t8_d, 19, "t8:d");
+ ASSERT_EQ(skel->bss->t8_e, 20, "t8:e");
+ ASSERT_EQ(skel->bss->t8_f_a, 21, "t8:f.a");
+ ASSERT_EQ(skel->bss->t8_f_b, 22, "t8:f.b");
+ ASSERT_EQ(skel->bss->t8_g, 23, "t8:g");
+ ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret");
+
+ ASSERT_EQ(skel->bss->t9_a, 16, "t9:a");
+ ASSERT_EQ(skel->bss->t9_b, 17, "t9:b");
+ ASSERT_EQ(skel->bss->t9_c, 18, "t9:c");
+ ASSERT_EQ(skel->bss->t9_d, 19, "t9:d");
+ ASSERT_EQ(skel->bss->t9_e, 20, "t9:e");
+ ASSERT_EQ(skel->bss->t9_f, 21, "t9:f");
+ ASSERT_EQ(skel->bss->t9_g, 22, "t9:f");
+ ASSERT_EQ(skel->bss->t9_h_a, 23, "t9:h.a");
+ ASSERT_EQ(skel->bss->t9_h_b, 24, "t9:h.b");
+ ASSERT_EQ(skel->bss->t9_h_c, 25, "t9:h.c");
+ ASSERT_EQ(skel->bss->t9_h_d, 26, "t9:h.d");
+ ASSERT_EQ(skel->bss->t9_i, 27, "t9:i");
+ ASSERT_EQ(skel->bss->t9_ret, 258, "t9 ret");
+
+destroy_skel:
+ tracing_struct_many_args__destroy(skel);
+}
+
+static void test_union_args(void)
+{
+ struct tracing_struct *skel;
+ int err;
+
+ skel = tracing_struct__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load"))
+ return;
+
+ err = tracing_struct__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct__attach"))
+ goto out;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->ut1_a_a, 1, "ut1:a.arg.a");
+ ASSERT_EQ(skel->bss->ut1_b, 4, "ut1:b");
+ ASSERT_EQ(skel->bss->ut1_c, 5, "ut1:c");
+
+ ASSERT_EQ(skel->bss->ut2_a, 6, "ut2:a");
+ ASSERT_EQ(skel->bss->ut2_b_a, 2, "ut2:b.arg.a");
+ ASSERT_EQ(skel->bss->ut2_b_b, 3, "ut2:b.arg.b");
+
+out:
+ tracing_struct__destroy(skel);
+}
+
+void test_tracing_struct(void)
+{
+ if (test__start_subtest("struct_args"))
+ test_struct_args();
+ if (test__start_subtest("struct_many_args"))
+ test_struct_many_args();
+ if (test__start_subtest("union_args"))
+ test_union_args();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 781c8d11604b..6cd7349d4a2b 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -1,123 +1,100 @@
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
-#include <sched.h>
-#include <sys/prctl.h>
#include <test_progs.h>
-#define MAX_TRAMP_PROGS 40
-
struct inst {
struct bpf_object *obj;
- struct bpf_link *link_fentry;
- struct bpf_link *link_fexit;
+ struct bpf_link *link;
};
-static int test_task_rename(void)
-{
- int fd, duration = 0, err;
- char buf[] = "test_overhead";
-
- fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
- if (CHECK(fd < 0, "open /proc", "err %d", errno))
- return -1;
- err = write(fd, buf, sizeof(buf));
- if (err < 0) {
- CHECK(err < 0, "task rename", "err %d", errno);
- close(fd);
- return -1;
- }
- close(fd);
- return 0;
-}
-
-static struct bpf_link *load(struct bpf_object *obj, const char *name)
+static struct bpf_program *load_prog(char *file, char *name, struct inst *inst)
{
+ struct bpf_object *obj;
struct bpf_program *prog;
- int duration = 0;
+ int err;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+ return NULL;
- prog = bpf_object__find_program_by_title(obj, name);
- if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name))
- return ERR_PTR(-EINVAL);
- return bpf_program__attach_trace(prog);
+ inst->obj = obj;
+
+ err = bpf_object__load(obj);
+ if (!ASSERT_OK(err, "obj_load"))
+ return NULL;
+
+ prog = bpf_object__find_program_by_name(obj, name);
+ if (!ASSERT_OK_PTR(prog, "obj_find_prog"))
+ return NULL;
+
+ return prog;
}
-void test_trampoline_count(void)
+/* TODO: use different target function to run in concurrent mode */
+void serial_test_trampoline_count(void)
{
- const char *fentry_name = "fentry/__set_task_comm";
- const char *fexit_name = "fexit/__set_task_comm";
- const char *object = "test_trampoline_count.o";
- struct inst inst[MAX_TRAMP_PROGS] = {};
- int err, i = 0, duration = 0;
- struct bpf_object *obj;
+ char *file = "test_trampoline_count.bpf.o";
+ char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
+ int bpf_max_tramp_links, err, i, prog_fd;
+ struct bpf_program *prog;
struct bpf_link *link;
- char comm[16] = {};
-
- /* attach 'allowed' 40 trampoline programs */
- for (i = 0; i < MAX_TRAMP_PROGS; i++) {
- obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
- obj = NULL;
+ struct inst *inst;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ bpf_max_tramp_links = get_bpf_max_tramp_links();
+ if (!ASSERT_GE(bpf_max_tramp_links, 1, "bpf_max_tramp_links"))
+ return;
+ inst = calloc(bpf_max_tramp_links + 1, sizeof(*inst));
+ if (!ASSERT_OK_PTR(inst, "inst"))
+ return;
+
+ /* attach 'allowed' trampoline programs */
+ for (i = 0; i < bpf_max_tramp_links; i++) {
+ prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]);
+ if (!prog)
goto cleanup;
- }
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
- inst[i].obj = obj;
- obj = NULL;
-
- if (rand() % 2) {
- link = load(inst[i].obj, fentry_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
- link = NULL;
- goto cleanup;
- }
- inst[i].link_fentry = link;
- } else {
- link = load(inst[i].obj, fexit_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
- link = NULL;
- goto cleanup;
- }
- inst[i].link_fexit = link;
- }
+
+ inst[i].link = link;
}
/* and try 1 extra.. */
- obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
- obj = NULL;
+ prog = load_prog(file, "fmod_ret_test", &inst[i]);
+ if (!prog)
goto cleanup;
- }
-
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
- goto cleanup_extra;
/* ..that needs to fail */
- link = load(obj, fentry_name);
- if (CHECK(!IS_ERR(link), "cannot attach over the limit", "err %ld\n", PTR_ERR(link))) {
- bpf_link__destroy(link);
- goto cleanup_extra;
+ link = bpf_program__attach(prog);
+ if (!ASSERT_ERR_PTR(link, "attach_prog")) {
+ inst[i].link = link;
+ goto cleanup;
}
/* with E2BIG error */
- CHECK(PTR_ERR(link) != -E2BIG, "proper error check", "err %ld\n", PTR_ERR(link));
+ if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG"))
+ goto cleanup;
+ if (!ASSERT_EQ(link, NULL, "ptr_is_null"))
+ goto cleanup;
+
+ /* and finally execute the probe */
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto cleanup;
- /* and finaly execute the probe */
- if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L)))
- goto cleanup_extra;
- CHECK_FAIL(test_task_rename());
- CHECK_FAIL(prctl(PR_SET_NAME, comm, 0L, 0L, 0L));
+ ASSERT_EQ(opts.retval & 0xffff, 33, "bpf_modify_return_test.result");
+ ASSERT_EQ(opts.retval >> 16, 2, "bpf_modify_return_test.side_effect");
-cleanup_extra:
- bpf_object__close(obj);
cleanup:
- if (i >= MAX_TRAMP_PROGS)
- i = MAX_TRAMP_PROGS - 1;
for (; i >= 0; i--) {
- bpf_link__destroy(inst[i].link_fentry);
- bpf_link__destroy(inst[i].link_fexit);
+ bpf_link__destroy(inst[i].link);
bpf_object__close(inst[i].obj);
}
+ free(inst);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/type_cast.c b/tools/testing/selftests/bpf/prog_tests/type_cast.c
new file mode 100644
index 000000000000..9317d5fa2635
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/type_cast.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "type_cast.skel.h"
+
+static void test_xdp(void)
+{
+ struct type_cast *skel;
+ int err, prog_fd;
+ char buf[128];
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
+
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.md_xdp, true);
+ err = type_cast__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.md_xdp);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval");
+
+ ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex");
+ ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex");
+ ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name");
+ ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum");
+
+out:
+ type_cast__destroy(skel);
+}
+
+static void test_tc(void)
+{
+ struct type_cast *skel;
+ int err, prog_fd;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.md_skb, true);
+ err = type_cast__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.md_skb);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "tc test_run retval");
+
+ ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len");
+ ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len");
+ ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len");
+ ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len");
+ ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare");
+
+out:
+ type_cast__destroy(skel);
+}
+
+static const char * const negative_tests[] = {
+ "untrusted_ptr",
+ "kctx_u64",
+};
+
+static void test_negative(void)
+{
+ struct bpf_program *prog;
+ struct type_cast *skel;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(negative_tests); i++) {
+ skel = type_cast__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+ bpf_program__set_autoload(prog, true);
+ err = type_cast__load(skel);
+ ASSERT_ERR(err, "skel_load");
+out:
+ type_cast__destroy(skel);
+ }
+}
+
+void test_type_cast(void)
+{
+ if (test__start_subtest("xdp"))
+ test_xdp();
+ if (test__start_subtest("tc"))
+ test_tc();
+ if (test__start_subtest("negative"))
+ test_negative();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c
index 2aba09d4d01b..2643d896ddae 100644
--- a/tools/testing/selftests/bpf/prog_tests/udp_limit.c
+++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c
@@ -5,8 +5,6 @@
#include <sys/types.h>
#include <sys/socket.h>
-static int duration;
-
void test_udp_limit(void)
{
struct udp_limit *skel;
@@ -14,30 +12,29 @@ void test_udp_limit(void)
int cgroup_fd;
cgroup_fd = test__join_cgroup("/udp_limit");
- if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
+ if (!ASSERT_GE(cgroup_fd, 0, "cg-join"))
return;
skel = udp_limit__open_and_load();
- if (CHECK(!skel, "skel-load", "errno %d", errno))
+ if (!ASSERT_OK_PTR(skel, "skel-load"))
goto close_cgroup_fd;
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.sock, "cg_attach_sock"))
+ goto close_skeleton;
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
- if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
- "cg-attach", "sock %ld sock_release %ld",
- PTR_ERR(skel->links.sock),
- PTR_ERR(skel->links.sock_release)))
+ if (!ASSERT_OK_PTR(skel->links.sock_release, "cg_attach_sock_release"))
goto close_skeleton;
/* BPF program enforces a single UDP socket per cgroup,
* verify that.
*/
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
- if (CHECK(fd1 < 0, "fd1", "errno %d", errno))
+ if (!ASSERT_GE(fd1, 0, "socket(fd1)"))
goto close_skeleton;
fd2 = socket(AF_INET, SOCK_DGRAM, 0);
- if (CHECK(fd2 >= 0, "fd2", "errno %d", errno))
+ if (!ASSERT_LT(fd2, 0, "socket(fd2)"))
goto close_skeleton;
/* We can reopen again after close. */
@@ -45,7 +42,7 @@ void test_udp_limit(void)
fd1 = -1;
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
- if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno))
+ if (!ASSERT_GE(fd1, 0, "socket(fd1-again)"))
goto close_skeleton;
/* Make sure the program was invoked the expected
@@ -55,13 +52,11 @@ void test_udp_limit(void)
* - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE
* - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE
*/
- if (CHECK(skel->bss->invocations != 4, "bss-invocations",
- "invocations=%d", skel->bss->invocations))
+ if (!ASSERT_EQ(skel->bss->invocations, 4, "bss-invocations"))
goto close_skeleton;
/* We should still have a single socket in use */
- if (CHECK(skel->bss->in_use != 1, "bss-in_use",
- "in_use=%d", skel->bss->in_use))
+ if (!ASSERT_EQ(skel->bss->in_use, 1, "bss-in_use"))
goto close_skeleton;
close_skeleton:
diff --git a/tools/testing/selftests/bpf/prog_tests/uninit_stack.c b/tools/testing/selftests/bpf/prog_tests/uninit_stack.c
new file mode 100644
index 000000000000..e64c71948491
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uninit_stack.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "uninit_stack.skel.h"
+
+void test_uninit_stack(void)
+{
+ RUN_TESTS(uninit_stack);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
new file mode 100644
index 000000000000..472f4f9fa95f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_unpriv_bpf_disabled.skel.h"
+
+#include "cap_helpers.h"
+#include "bpf_util.h"
+
+/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
+ * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
+ * CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as
+ * we know its value is correct since it is explicitly defined in
+ * cap_helpers.h.
+ */
+#define ALL_CAPS ((2ULL << CAP_BPF) - 1)
+
+#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_"
+#define NUM_MAPS 7
+
+static __u32 got_perfbuf_val;
+static __u32 got_ringbuf_val;
+
+static int process_ringbuf(void *ctx, void *data, size_t len)
+{
+ if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
+ got_ringbuf_val = *(__u32 *)data;
+ return 0;
+}
+
+static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
+{
+ if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
+ got_perfbuf_val = *(__u32 *)data;
+}
+
+static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "r+");
+ if (!fp)
+ return -errno;
+ if (old_val && fscanf(fp, "%s", old_val) <= 0) {
+ ret = -ENOENT;
+ } else if (!old_val || strcmp(old_val, new_val) != 0) {
+ fseek(fp, 0, SEEK_SET);
+ if (fprintf(fp, "%s", new_val) < 0)
+ ret = -errno;
+ }
+ fclose(fp);
+
+ return ret;
+}
+
+static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
+ __u32 prog_id, int prog_fd, int perf_fd,
+ char **map_paths, int *map_fds)
+{
+ struct perf_buffer *perfbuf = NULL;
+ struct ring_buffer *ringbuf = NULL;
+ int i, nr_cpus, link_fd = -1;
+
+ nr_cpus = bpf_num_possible_cpus();
+
+ skel->bss->perfbuf_val = 1;
+ skel->bss->ringbuf_val = 2;
+
+ /* Positive tests for unprivileged BPF disabled. Verify we can
+ * - retrieve and interact with pinned maps;
+ * - set up and interact with perf buffer;
+ * - set up and interact with ring buffer;
+ * - create a link
+ */
+ perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
+ NULL);
+ if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
+ goto cleanup;
+
+ ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+ goto cleanup;
+
+ /* trigger & validate perf event, ringbuf output */
+ usleep(1);
+
+ ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
+ ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
+ ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
+ ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
+
+ for (i = 0; i < NUM_MAPS; i++) {
+ map_fds[i] = bpf_obj_get(map_paths[i]);
+ if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
+ goto cleanup;
+ }
+
+ for (i = 0; i < NUM_MAPS; i++) {
+ bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
+ bool array = strstr(map_paths[i], "array") != NULL;
+ bool buf = strstr(map_paths[i], "buf") != NULL;
+ __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
+ __u32 expected_val = 1;
+ int j;
+
+ /* skip ringbuf, perfbuf */
+ if (buf)
+ continue;
+
+ for (j = 0; j < nr_cpus; j++)
+ vals[j] = expected_val;
+
+ if (prog_array) {
+ /* need valid prog array value */
+ vals[0] = prog_fd;
+ /* prog array lookup returns prog id, not fd */
+ expected_val = prog_id;
+ }
+ ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
+ ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
+ ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
+ if (!array)
+ ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
+ }
+
+ link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
+ BPF_PERF_EVENT, NULL);
+ ASSERT_GT(link_fd, 0, "link_create");
+
+cleanup:
+ if (link_fd)
+ close(link_fd);
+ if (perfbuf)
+ perf_buffer__free(perfbuf);
+ if (ringbuf)
+ ring_buffer__free(ringbuf);
+}
+
+static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
+ __u32 prog_id, int prog_fd, int perf_fd,
+ char **map_paths, int *map_fds)
+{
+ const struct bpf_insn prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
+ struct bpf_map_info map_info = {};
+ __u32 map_info_len = sizeof(map_info);
+ struct bpf_link_info link_info = {};
+ __u32 link_info_len = sizeof(link_info);
+ struct btf *btf = NULL;
+ __u32 attach_flags = 0;
+ __u32 prog_ids[3] = {};
+ __u32 prog_cnt = 3;
+ __u32 next;
+ int i;
+
+ /* Negative tests for unprivileged BPF disabled. Verify we cannot
+ * - load BPF programs;
+ * - create BPF maps;
+ * - get a prog/map/link fd by id;
+ * - get next prog/map/link id
+ * - query prog
+ * - BTF load
+ */
+ ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
+ prog_insns, prog_insn_cnt, &load_opts),
+ -EPERM, "prog_load_fails");
+
+ /* some map types require particular correct parameters which could be
+ * sanity-checked before enforcing -EPERM, so only validate that
+ * the simple ARRAY and HASH maps are failing with -EPERM
+ */
+ for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
+ ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
+ -EPERM, "map_create_fails");
+
+ ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
+ ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
+
+ if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
+ "obj_get_info_by_fd")) {
+ ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
+ "map_get_next_id_fails");
+ }
+ ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
+
+ if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
+ &link_info, &link_info_len),
+ "obj_get_info_by_fd")) {
+ ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
+ "link_get_next_id_fails");
+ }
+ ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
+
+ ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
+ &prog_cnt), -EPERM, "prog_query_fails");
+
+ btf = btf__new_empty();
+ if (ASSERT_OK_PTR(btf, "empty_btf") &&
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
+ const void *raw_btf_data;
+ __u32 raw_btf_size;
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
+ ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
+ "bpf_btf_load_fails");
+ }
+ btf__free(btf);
+}
+
+void test_unpriv_bpf_disabled(void)
+{
+ char *map_paths[NUM_MAPS] = { PINPATH "array",
+ PINPATH "percpu_array",
+ PINPATH "hash",
+ PINPATH "percpu_hash",
+ PINPATH "perfbuf",
+ PINPATH "ringbuf",
+ PINPATH "prog_array" };
+ int map_fds[NUM_MAPS];
+ struct test_unpriv_bpf_disabled *skel;
+ char unprivileged_bpf_disabled_orig[32] = {};
+ char perf_event_paranoid_orig[32] = {};
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_info_len = sizeof(prog_info);
+ struct perf_event_attr attr = {};
+ int prog_fd, perf_fd = -1, i, ret;
+ __u64 save_caps = 0;
+ __u32 prog_id;
+
+ skel = test_unpriv_bpf_disabled__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->test_pid = getpid();
+
+ map_fds[0] = bpf_map__fd(skel->maps.array);
+ map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
+ map_fds[2] = bpf_map__fd(skel->maps.hash);
+ map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
+ map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
+ map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
+ map_fds[6] = bpf_map__fd(skel->maps.prog_array);
+
+ for (i = 0; i < NUM_MAPS; i++)
+ ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
+
+ /* allow user without caps to use perf events */
+ if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
+ "-1"),
+ "set_perf_event_paranoid"))
+ goto cleanup;
+ /* ensure unprivileged bpf disabled is set */
+ ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
+ unprivileged_bpf_disabled_orig, "2");
+ if (ret == -EPERM) {
+ /* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */
+ if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
+ "unprivileged_bpf_disabled_on"))
+ goto cleanup;
+ } else {
+ if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
+ goto cleanup;
+ }
+
+ prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
+ ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
+ "obj_get_info_by_fd");
+ prog_id = prog_info.id;
+ ASSERT_GT(prog_id, 0, "valid_prog_id");
+
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 1000;
+ perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
+ goto cleanup;
+
+ if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
+ goto cleanup;
+
+ if (test__start_subtest("unpriv_bpf_disabled_positive"))
+ test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
+ map_fds);
+
+ if (test__start_subtest("unpriv_bpf_disabled_negative"))
+ test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
+ map_fds);
+
+cleanup:
+ close(perf_fd);
+ if (save_caps)
+ cap_enable_effective(save_caps, NULL);
+ if (strlen(perf_event_paranoid_orig) > 0)
+ sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
+ if (strlen(unprivileged_bpf_disabled_orig) > 0)
+ sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
+ unprivileged_bpf_disabled_orig);
+ for (i = 0; i < NUM_MAPS; i++)
+ unlink(map_paths[i]);
+ test_unpriv_bpf_disabled__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe.c b/tools/testing/selftests/bpf/prog_tests/uprobe.c
new file mode 100644
index 000000000000..86404476c1da
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Hengqi Chen */
+
+#include <test_progs.h>
+#include <asm/ptrace.h>
+#include "test_uprobe.skel.h"
+
+static FILE *urand_spawn(int *pid)
+{
+ FILE *f;
+
+ /* urandom_read's stdout is wired into f */
+ f = popen("./urandom_read 1 report-pid", "r");
+ if (!f)
+ return NULL;
+
+ if (fscanf(f, "%d", pid) != 1) {
+ pclose(f);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ return f;
+}
+
+static int urand_trigger(FILE **urand_pipe)
+{
+ int exit_code;
+
+ /* pclose() waits for child process to exit and returns their exit code */
+ exit_code = pclose(*urand_pipe);
+ *urand_pipe = NULL;
+
+ return exit_code;
+}
+
+static void test_uprobe_attach(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+ FILE *urand_pipe = NULL;
+ int urand_pid = 0, err;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ urand_pipe = urand_spawn(&urand_pid);
+ if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
+ goto cleanup;
+
+ skel->bss->my_pid = urand_pid;
+
+ /* Manual attach uprobe to urandlib_api
+ * There are two `urandlib_api` symbols in .dynsym section:
+ * - urandlib_api@LIBURANDOM_READ_1.0.0
+ * - urandlib_api@@LIBURANDOM_READ_2.0.0
+ * Both are global bind and would cause a conflict if user
+ * specify the symbol name without a version suffix
+ */
+ uprobe_opts.func_name = "urandlib_api";
+ skel->links.test4 = bpf_program__attach_uprobe_opts(skel->progs.test4,
+ urand_pid,
+ "./liburandom_read.so",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_ERR_PTR(skel->links.test4, "urandlib_api_attach_conflict"))
+ goto cleanup;
+
+ uprobe_opts.func_name = "urandlib_api@LIBURANDOM_READ_1.0.0";
+ skel->links.test4 = bpf_program__attach_uprobe_opts(skel->progs.test4,
+ urand_pid,
+ "./liburandom_read.so",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test4, "urandlib_api_attach_ok"))
+ goto cleanup;
+
+ /* Auto attach 3 u[ret]probes to urandlib_api_sameoffset */
+ err = test_uprobe__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger urandom_read */
+ ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
+
+ ASSERT_EQ(skel->bss->test1_result, 1, "urandlib_api_sameoffset");
+ ASSERT_EQ(skel->bss->test2_result, 1, "urandlib_api_sameoffset@v1");
+ ASSERT_EQ(skel->bss->test3_result, 3, "urandlib_api_sameoffset@@v2");
+ ASSERT_EQ(skel->bss->test4_result, 1, "urandlib_api");
+
+cleanup:
+ if (urand_pipe)
+ pclose(urand_pipe);
+ test_uprobe__destroy(skel);
+}
+
+#ifdef __x86_64__
+__naked __maybe_unused unsigned long uprobe_regs_change_trigger(void)
+{
+ asm volatile (
+ "ret\n"
+ );
+}
+
+static __naked void uprobe_regs_change(struct pt_regs *before, struct pt_regs *after)
+{
+ asm volatile (
+ "movq %r11, 48(%rdi)\n"
+ "movq %r10, 56(%rdi)\n"
+ "movq %r9, 64(%rdi)\n"
+ "movq %r8, 72(%rdi)\n"
+ "movq %rax, 80(%rdi)\n"
+ "movq %rcx, 88(%rdi)\n"
+ "movq %rdx, 96(%rdi)\n"
+ "movq %rsi, 104(%rdi)\n"
+ "movq %rdi, 112(%rdi)\n"
+
+ /* save 2nd argument */
+ "pushq %rsi\n"
+ "call uprobe_regs_change_trigger\n"
+
+ /* save return value and load 2nd argument pointer to rax */
+ "pushq %rax\n"
+ "movq 8(%rsp), %rax\n"
+
+ "movq %r11, 48(%rax)\n"
+ "movq %r10, 56(%rax)\n"
+ "movq %r9, 64(%rax)\n"
+ "movq %r8, 72(%rax)\n"
+ "movq %rcx, 88(%rax)\n"
+ "movq %rdx, 96(%rax)\n"
+ "movq %rsi, 104(%rax)\n"
+ "movq %rdi, 112(%rax)\n"
+
+ /* restore return value and 2nd argument */
+ "pop %rax\n"
+ "pop %rsi\n"
+
+ "movq %rax, 80(%rsi)\n"
+ "ret\n"
+ );
+}
+
+static void regs_common(void)
+{
+ struct pt_regs before = {}, after = {}, expected = {
+ .rax = 0xc0ffe,
+ .rcx = 0xbad,
+ .rdx = 0xdead,
+ .r8 = 0x8,
+ .r9 = 0x9,
+ .r10 = 0x10,
+ .r11 = 0x11,
+ .rdi = 0x12,
+ .rsi = 0x13,
+ };
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->my_pid = getpid();
+ skel->bss->regs = expected;
+
+ uprobe_opts.func_name = "uprobe_regs_change_trigger";
+ skel->links.test_regs_change = bpf_program__attach_uprobe_opts(skel->progs.test_regs_change,
+ -1,
+ "/proc/self/exe",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test_regs_change, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ uprobe_regs_change(&before, &after);
+
+ ASSERT_EQ(after.rax, expected.rax, "ax");
+ ASSERT_EQ(after.rcx, expected.rcx, "cx");
+ ASSERT_EQ(after.rdx, expected.rdx, "dx");
+ ASSERT_EQ(after.r8, expected.r8, "r8");
+ ASSERT_EQ(after.r9, expected.r9, "r9");
+ ASSERT_EQ(after.r10, expected.r10, "r10");
+ ASSERT_EQ(after.r11, expected.r11, "r11");
+ ASSERT_EQ(after.rdi, expected.rdi, "rdi");
+ ASSERT_EQ(after.rsi, expected.rsi, "rsi");
+
+cleanup:
+ test_uprobe__destroy(skel);
+}
+
+static noinline unsigned long uprobe_regs_change_ip_1(void)
+{
+ return 0xc0ffee;
+}
+
+static noinline unsigned long uprobe_regs_change_ip_2(void)
+{
+ return 0xdeadbeef;
+}
+
+static void regs_ip(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+ unsigned long ret;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->my_pid = getpid();
+ skel->bss->ip = (unsigned long) uprobe_regs_change_ip_2;
+
+ uprobe_opts.func_name = "uprobe_regs_change_ip_1";
+ skel->links.test_regs_change_ip = bpf_program__attach_uprobe_opts(
+ skel->progs.test_regs_change_ip,
+ -1,
+ "/proc/self/exe",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test_regs_change_ip, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ ret = uprobe_regs_change_ip_1();
+ ASSERT_EQ(ret, 0xdeadbeef, "ret");
+
+cleanup:
+ test_uprobe__destroy(skel);
+}
+
+static void test_uprobe_regs_change(void)
+{
+ if (test__start_subtest("regs_change_common"))
+ regs_common();
+ if (test__start_subtest("regs_change_ip"))
+ regs_ip();
+}
+#else
+static void test_uprobe_regs_change(void) { }
+#endif
+
+void test_uprobe(void)
+{
+ if (test__start_subtest("attach"))
+ test_uprobe_attach();
+ test_uprobe_regs_change();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
new file mode 100644
index 000000000000..d5b3377aa33c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include "test_uprobe_autoattach.skel.h"
+
+/* uprobe attach point */
+static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3,
+ int arg4, int arg5, int arg6,
+ int arg7, int arg8)
+{
+ asm volatile ("");
+ return arg1 + arg2 + arg3 + arg4 + arg5 + arg6 + arg7 + arg8 + 1;
+}
+
+void test_uprobe_autoattach(void)
+{
+ const char *devnull_str = "/dev/null";
+ struct test_uprobe_autoattach *skel;
+ int trigger_ret;
+ FILE *devnull;
+
+ skel = test_uprobe_autoattach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate uprobe & uretprobe */
+ trigger_ret = autoattach_trigger_func(1, 2, 3, 4, 5, 6, 7, 8);
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ devnull = fopen(devnull_str, "r");
+
+ ASSERT_EQ(skel->bss->uprobe_byname_parm1, 1, "check_uprobe_byname_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname_ret, trigger_ret, "check_uretprobe_byname_ret");
+ ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uprobe_byname2_parm1, (__u64)(long)devnull_str,
+ "check_uprobe_byname2_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_rc, (__u64)(long)devnull,
+ "check_uretprobe_byname2_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
+
+ ASSERT_EQ(skel->bss->a[0], 1, "arg1");
+ ASSERT_EQ(skel->bss->a[1], 2, "arg2");
+ ASSERT_EQ(skel->bss->a[2], 3, "arg3");
+#if FUNC_REG_ARG_CNT > 3
+ ASSERT_EQ(skel->bss->a[3], 4, "arg4");
+#endif
+#if FUNC_REG_ARG_CNT > 4
+ ASSERT_EQ(skel->bss->a[4], 5, "arg5");
+#endif
+#if FUNC_REG_ARG_CNT > 5
+ ASSERT_EQ(skel->bss->a[5], 6, "arg6");
+#endif
+#if FUNC_REG_ARG_CNT > 6
+ ASSERT_EQ(skel->bss->a[6], 7, "arg7");
+#endif
+#if FUNC_REG_ARG_CNT > 7
+ ASSERT_EQ(skel->bss->a[7], 8, "arg8");
+#endif
+
+ fclose(devnull);
+cleanup:
+ test_uprobe_autoattach__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
new file mode 100644
index 000000000000..2ee17ef1dae2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -0,0 +1,1376 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <unistd.h>
+#include <pthread.h>
+#include <test_progs.h>
+#include "uprobe_multi.skel.h"
+#include "uprobe_multi_bench.skel.h"
+#include "uprobe_multi_usdt.skel.h"
+#include "uprobe_multi_consumers.skel.h"
+#include "uprobe_multi_pid_filter.skel.h"
+#include "uprobe_multi_session.skel.h"
+#include "uprobe_multi_session_single.skel.h"
+#include "uprobe_multi_session_cookie.skel.h"
+#include "uprobe_multi_session_recursive.skel.h"
+#include "uprobe_multi_verifier.skel.h"
+#include "bpf/libbpf_internal.h"
+#include "testing_helpers.h"
+#include "../sdt.h"
+
+static char test_data[] = "test_data";
+
+noinline void uprobe_multi_func_1(void)
+{
+ asm volatile ("");
+}
+
+noinline void uprobe_multi_func_2(void)
+{
+ asm volatile ("");
+}
+
+noinline void uprobe_multi_func_3(void)
+{
+ asm volatile ("");
+}
+
+noinline void usdt_trigger(void)
+{
+ STAP_PROBE(test, pid_filter_usdt);
+}
+
+noinline void uprobe_session_recursive(int i)
+{
+ if (i)
+ uprobe_session_recursive(i - 1);
+}
+
+struct child {
+ int go[2];
+ int c2p[2]; /* child -> parent channel */
+ int pid;
+ int tid;
+ pthread_t thread;
+ char stack[65536];
+};
+
+static void release_child(struct child *child)
+{
+ int child_status;
+
+ if (!child)
+ return;
+ close(child->go[1]);
+ close(child->go[0]);
+ if (child->thread)
+ pthread_join(child->thread, NULL);
+ close(child->c2p[0]);
+ close(child->c2p[1]);
+ if (child->pid > 0)
+ waitpid(child->pid, &child_status, 0);
+}
+
+static void kick_child(struct child *child)
+{
+ char c = 1;
+
+ if (child) {
+ write(child->go[1], &c, 1);
+ release_child(child);
+ }
+ fflush(NULL);
+}
+
+static int child_func(void *arg)
+{
+ struct child *child = arg;
+ int err, c;
+
+ close(child->go[1]);
+
+ /* wait for parent's kick */
+ err = read(child->go[0], &c, 1);
+ if (err != 1)
+ exit(err);
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+
+ exit(errno);
+}
+
+static int spawn_child_flag(struct child *child, bool clone_vm)
+{
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child->go))
+ return -1;
+
+ if (clone_vm) {
+ child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
+ CLONE_VM|SIGCHLD, child);
+ } else {
+ child->pid = child->tid = fork();
+ }
+ if (child->pid < 0) {
+ release_child(child);
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* fork-ed child */
+ if (!clone_vm && child->pid == 0)
+ child_func(child);
+
+ return 0;
+}
+
+static int spawn_child(struct child *child)
+{
+ return spawn_child_flag(child, false);
+}
+
+static void *child_thread(void *ctx)
+{
+ struct child *child = ctx;
+ int c = 0, err;
+
+ child->tid = sys_gettid();
+
+ /* let parent know we are ready */
+ err = write(child->c2p[1], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ /* wait for parent's kick */
+ err = read(child->go[0], &c, 1);
+ if (err != 1)
+ pthread_exit(&err);
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+
+ err = 0;
+ pthread_exit(&err);
+}
+
+static int spawn_thread(struct child *child)
+{
+ int c, err;
+
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child->go))
+ return -1;
+ /* pipe to notify parent that child thread is ready */
+ if (pipe(child->c2p)) {
+ close(child->go[0]);
+ close(child->go[1]);
+ return -1;
+ }
+
+ child->pid = getpid();
+
+ err = pthread_create(&child->thread, NULL, child_thread, child);
+ if (err) {
+ err = -errno;
+ close(child->go[0]);
+ close(child->go[1]);
+ close(child->c2p[0]);
+ close(child->c2p[1]);
+ errno = -err;
+ return -1;
+ }
+
+ err = read(child->c2p[0], &c, 1);
+ if (!ASSERT_EQ(err, 1, "child_thread_ready"))
+ return -1;
+
+ return 0;
+}
+
+static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
+{
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ skel->bss->user_ptr = test_data;
+
+ /*
+ * Disable pid check in bpf program if we are pid filter test,
+ * because the probe should be executed only by child->pid
+ * passed at the probe attach.
+ */
+ skel->bss->pid = child ? 0 : getpid();
+ skel->bss->expect_pid = child ? child->pid : 0;
+
+ /* trigger all probes, if we are testing child *process*, just to make
+ * sure that PID filtering doesn't let through activations from wrong
+ * PIDs; when we test child *thread*, we don't want to do this to
+ * avoid double counting number of triggering events
+ */
+ if (!child || !child->thread) {
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
+ }
+
+ if (child)
+ kick_child(child);
+
+ /*
+ * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
+ * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
+ */
+ ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
+
+ ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
+
+ if (child) {
+ ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
+ }
+}
+
+static void test_skel_api(void)
+{
+ struct uprobe_multi *skel = NULL;
+ int err;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ err = uprobe_multi__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi__attach"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, NULL);
+
+cleanup:
+ uprobe_multi__destroy(skel);
+}
+
+static void
+__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
+ struct child *child)
+{
+ pid_t pid = child ? child->pid : -1;
+ struct uprobe_multi *skel = NULL;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
+ pid, binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ /* Attach (uprobe-backed) USDTs */
+ skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
+ goto cleanup;
+
+ skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
+ "test", "pid_filter_usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, child);
+
+ ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
+ if (child) {
+ ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
+ ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
+ }
+cleanup:
+ uprobe_multi__destroy(skel);
+}
+
+static void
+test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
+{
+ static struct child child;
+
+ /* no pid filter */
+ __test_attach_api(binary, pattern, opts, NULL);
+
+ /* pid filter */
+ if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
+ return;
+
+ __test_attach_api(binary, pattern, opts, &child);
+
+ /* pid filter (thread) */
+ if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
+ return;
+
+ __test_attach_api(binary, pattern, opts, &child);
+}
+
+static void test_attach_api_pattern(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+
+ test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
+ test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
+}
+
+static void test_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_attach_api("/proc/self/exe", NULL, &opts);
+}
+
+static void test_attach_api_fails(void)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *path = "/proc/self/exe";
+ struct uprobe_multi *skel = NULL;
+ int prog_fd, link_fd = -1;
+ unsigned long offset = 0;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
+
+ /* abnormal cnt */
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = &offset;
+ opts.uprobe_multi.cnt = INT_MAX;
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt"))
+ goto cleanup;
+
+ /* cnt is 0 */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero"))
+ goto cleanup;
+
+ /* negative offset */
+ offset = -1;
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = (unsigned long *) &offset;
+ opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative"))
+ goto cleanup;
+
+ /* offsets is NULL */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null"))
+ goto cleanup;
+
+ /* wrong offsets pointer */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.offsets = (unsigned long *) 1,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong"))
+ goto cleanup;
+
+ /* path is NULL */
+ offset = 1;
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null"))
+ goto cleanup;
+
+ /* wrong path pointer */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = (const char *) 1,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong"))
+ goto cleanup;
+
+ /* wrong path type */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = "/",
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type"))
+ goto cleanup;
+
+ /* wrong cookies pointer */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cookies = (__u64 *) 1ULL,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong"))
+ goto cleanup;
+
+ /* wrong ref_ctr_offsets pointer */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cookies = (__u64 *) &offset,
+ .uprobe_multi.ref_ctr_offsets = (unsigned long *) 1,
+ .uprobe_multi.cnt = 1,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong"))
+ goto cleanup;
+
+ /* wrong flags */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.flags = 1 << 31,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags"))
+ goto cleanup;
+
+ /* wrong pid */
+ LIBBPF_OPTS_RESET(opts,
+ .uprobe_multi.path = path,
+ .uprobe_multi.offsets = (unsigned long *) &offset,
+ .uprobe_multi.cnt = 1,
+ .uprobe_multi.pid = -2,
+ );
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ goto cleanup;
+ ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
+
+cleanup:
+ if (link_fd >= 0)
+ close(link_fd);
+ uprobe_multi__destroy(skel);
+}
+
+#ifdef __x86_64__
+noinline void uprobe_multi_error_func(void)
+{
+ /*
+ * If --fcf-protection=branch is enabled the gcc generates endbr as
+ * first instruction, so marking the exact address of int3 with the
+ * symbol to be used in the attach_uprobe_fail_trap test below.
+ */
+ asm volatile (
+ ".globl uprobe_multi_error_func_int3; \n"
+ "uprobe_multi_error_func_int3: \n"
+ "int3 \n"
+ );
+}
+
+/*
+ * Attaching uprobe on uprobe_multi_error_func results in error
+ * because it already starts with int3 instruction.
+ */
+static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[4] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ "uprobe_multi_error_func_int3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+
+ skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
+ bpf_link__destroy(skel->links.uprobe);
+ skel->links.uprobe = NULL;
+ }
+}
+#else
+static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
+#endif
+
+short sema_1 __used, sema_2 __used;
+
+static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
+{
+ unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
+ unsigned long offsets[3], ref_ctr_offsets[3];
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *path = "/proc/self/exe";
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ };
+ const char *sema[3] = {
+ "sema_1",
+ "sema_2",
+ };
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
+ &tmp_offsets, STT_FUNC);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
+ return;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
+ &tmp_ref_ctr_offsets, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
+ goto cleanup;
+
+ /*
+ * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
+ * but with different ref_ctr_offset which is not allowed and results in fail.
+ */
+ offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
+ offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
+ offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
+
+ ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
+ ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
+ ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
+
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
+ opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
+ opts.uprobe_multi.cnt = 3;
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ close(link_fd);
+
+cleanup:
+ free(tmp_ref_ctr_offsets);
+ free(tmp_offsets);
+}
+
+static void test_attach_uprobe_fails(void)
+{
+ struct uprobe_multi *skel = NULL;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ return;
+
+ /* attach fails due to adding uprobe on trap instruction, x86_64 only */
+ attach_uprobe_fail_trap(skel);
+
+ /* attach fail due to wrong ref_ctr_offs on one of the uprobes */
+ attach_uprobe_fail_refctr(skel);
+
+ uprobe_multi__destroy(skel);
+}
+
+static void __test_link_api(struct child *child)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *path = "/proc/self/exe";
+ struct uprobe_multi *skel = NULL;
+ unsigned long *offsets = NULL;
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+ int link_extra_fd = -1;
+ int err;
+
+ err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
+ return;
+
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = offsets;
+ opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
+ opts.uprobe_multi.pid = child ? child->pid : 0;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe);
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.uretprobe);
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
+ link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
+ link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ opts.uprobe_multi.pid = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
+ link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, child);
+
+cleanup:
+ if (link1_fd >= 0)
+ close(link1_fd);
+ if (link2_fd >= 0)
+ close(link2_fd);
+ if (link3_fd >= 0)
+ close(link3_fd);
+ if (link4_fd >= 0)
+ close(link4_fd);
+ if (link_extra_fd >= 0)
+ close(link_extra_fd);
+
+ uprobe_multi__destroy(skel);
+ free(offsets);
+}
+
+static void test_link_api(void)
+{
+ static struct child child;
+
+ /* no pid filter */
+ __test_link_api(NULL);
+
+ /* pid filter */
+ if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
+ return;
+
+ __test_link_api(&child);
+
+ /* pid filter (thread) */
+ if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
+ return;
+
+ __test_link_api(&child);
+}
+
+static struct bpf_program *
+get_program(struct uprobe_multi_consumers *skel, int prog)
+{
+ switch (prog) {
+ case 0:
+ return skel->progs.uprobe_0;
+ case 1:
+ return skel->progs.uprobe_1;
+ case 2:
+ return skel->progs.uprobe_2;
+ case 3:
+ return skel->progs.uprobe_3;
+ default:
+ ASSERT_FAIL("get_program");
+ return NULL;
+ }
+}
+
+static struct bpf_link **
+get_link(struct uprobe_multi_consumers *skel, int link)
+{
+ switch (link) {
+ case 0:
+ return &skel->links.uprobe_0;
+ case 1:
+ return &skel->links.uprobe_1;
+ case 2:
+ return &skel->links.uprobe_2;
+ case 3:
+ return &skel->links.uprobe_3;
+ default:
+ ASSERT_FAIL("get_link");
+ return NULL;
+ }
+}
+
+static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset)
+{
+ struct bpf_program *prog = get_program(skel, idx);
+ struct bpf_link **link = get_link(skel, idx);
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+
+ if (!prog || !link)
+ return -1;
+
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
+ /*
+ * bit/prog: 0 uprobe entry
+ * bit/prog: 1 uprobe return
+ * bit/prog: 2 uprobe session without return
+ * bit/prog: 3 uprobe session with return
+ */
+ opts.retprobe = idx == 1;
+ opts.session = idx == 2 || idx == 3;
+
+ *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
+ return -1;
+ return 0;
+}
+
+static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
+{
+ struct bpf_link **link = get_link(skel, idx);
+
+ bpf_link__destroy(*link);
+ *link = NULL;
+}
+
+static bool test_bit(int bit, unsigned long val)
+{
+ return val & (1 << bit);
+}
+
+noinline int
+uprobe_consumer_test(struct uprobe_multi_consumers *skel,
+ unsigned long before, unsigned long after,
+ unsigned long offset)
+{
+ int idx;
+
+ /* detach uprobe for each unset programs in 'before' state ... */
+ for (idx = 0; idx < 4; idx++) {
+ if (test_bit(idx, before) && !test_bit(idx, after))
+ uprobe_detach(skel, idx);
+ }
+
+ /* ... and attach all new programs in 'after' state */
+ for (idx = 0; idx < 4; idx++) {
+ if (!test_bit(idx, before) && test_bit(idx, after)) {
+ if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after"))
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * We generate 16 consumer_testX functions that will have uprobe installed on
+ * and will be called in separate threads. All function pointer are stored in
+ * "consumers" section and each thread will pick one function based on index.
+ */
+
+extern const void *__start_consumers;
+
+#define __CONSUMER_TEST(func) \
+noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \
+ unsigned long after, unsigned long offset) \
+{ \
+ return uprobe_consumer_test(skel, before, after, offset); \
+} \
+void *__ ## func __used __attribute__((section("consumers"))) = (void *) func;
+
+#define CONSUMER_TEST(func) __CONSUMER_TEST(func)
+
+#define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__))
+#define C4 C1 C1 C1 C1
+#define C16 C4 C4 C4 C4
+
+C16
+
+typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long,
+ unsigned long, unsigned long);
+
+static int consumer_test(struct uprobe_multi_consumers *skel,
+ unsigned long before, unsigned long after,
+ test_t test, unsigned long offset)
+{
+ int err, idx, ret = -1;
+
+ printf("consumer_test before %lu after %lu\n", before, after);
+
+ /* 'before' is each, we attach uprobe for every set idx */
+ for (idx = 0; idx < 4; idx++) {
+ if (test_bit(idx, before)) {
+ if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before"))
+ goto cleanup;
+ }
+ }
+
+ err = test(skel, before, after, offset);
+ if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
+ goto cleanup;
+
+ for (idx = 0; idx < 4; idx++) {
+ bool uret_stays, uret_survives;
+ const char *fmt = "BUG";
+ __u64 val = 0;
+
+ switch (idx) {
+ case 0:
+ /*
+ * uprobe entry
+ * +1 if define in 'before'
+ */
+ if (test_bit(idx, before))
+ val++;
+ fmt = "prog 0: uprobe";
+ break;
+ case 1:
+ /*
+ * To trigger uretprobe consumer, the uretprobe under test either stayed from
+ * before to after (uret_stays + test_bit) or uretprobe instance survived and
+ * we have uretprobe active in after (uret_survives + test_bit)
+ */
+ uret_stays = before & after & 0b0110;
+ uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001));
+
+ if ((uret_stays || uret_survives) && test_bit(idx, after))
+ val++;
+ fmt = "prog 1: uretprobe";
+ break;
+ case 2:
+ /*
+ * session with return
+ * +1 if defined in 'before'
+ * +1 if defined in 'after'
+ */
+ if (test_bit(idx, before)) {
+ val++;
+ if (test_bit(idx, after))
+ val++;
+ }
+ fmt = "prog 2: session with return";
+ break;
+ case 3:
+ /*
+ * session without return
+ * +1 if defined in 'before'
+ */
+ if (test_bit(idx, before))
+ val++;
+ fmt = "prog 3: session with NO return";
+ break;
+ }
+
+ if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
+ goto cleanup;
+ skel->bss->uprobe_result[idx] = 0;
+ }
+
+ ret = 0;
+
+cleanup:
+ for (idx = 0; idx < 4; idx++)
+ uprobe_detach(skel, idx);
+ return ret;
+}
+
+#define CONSUMER_MAX 16
+
+/*
+ * Each thread runs 1/16 of the load by running test for single
+ * 'before' number (based on thread index) and full scale of
+ * 'after' numbers.
+ */
+static void *consumer_thread(void *arg)
+{
+ unsigned long idx = (unsigned long) arg;
+ struct uprobe_multi_consumers *skel;
+ unsigned long offset;
+ const void *func;
+ int after;
+
+ skel = uprobe_multi_consumers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
+ return NULL;
+
+ func = *((&__start_consumers) + idx);
+
+ offset = get_uprobe_offset(func);
+ if (!ASSERT_GE(offset, 0, "uprobe_offset"))
+ goto out;
+
+ for (after = 0; after < CONSUMER_MAX; after++)
+ if (consumer_test(skel, idx, after, func, offset))
+ goto out;
+
+out:
+ uprobe_multi_consumers__destroy(skel);
+ return NULL;
+}
+
+
+static void test_consumers(void)
+{
+ pthread_t pt[CONSUMER_MAX];
+ unsigned long idx;
+ int err;
+
+ /*
+ * The idea of this test is to try all possible combinations of
+ * uprobes consumers attached on single function.
+ *
+ * - 1 uprobe entry consumer
+ * - 1 uprobe exit consumer
+ * - 1 uprobe session with return
+ * - 1 uprobe session without return
+ *
+ * The test uses 4 uprobes attached on single function, but that
+ * translates into single uprobe with 4 consumers in kernel.
+ *
+ * The before/after values present the state of attached consumers
+ * before and after the probed function:
+ *
+ * bit/prog 0 : uprobe entry
+ * bit/prog 1 : uprobe return
+ *
+ * For example for:
+ *
+ * before = 0b01
+ * after = 0b10
+ *
+ * it means that before we call 'uprobe_consumer_test' we attach
+ * uprobes defined in 'before' value:
+ *
+ * - bit/prog 1: uprobe entry
+ *
+ * uprobe_consumer_test is called and inside it we attach and detach
+ * uprobes based on 'after' value:
+ *
+ * - bit/prog 0: is detached
+ * - bit/prog 1: is attached
+ *
+ * uprobe_consumer_test returns and we check counters values increased
+ * by bpf programs on each uprobe to match the expected count based on
+ * before/after bits.
+ */
+
+ for (idx = 0; idx < CONSUMER_MAX; idx++) {
+ err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ while (idx)
+ pthread_join(pt[--idx], NULL);
+}
+
+static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
+{
+ switch (idx) {
+ case 0: return skel->progs.uprobe_multi_0;
+ case 1: return skel->progs.uprobe_multi_1;
+ case 2: return skel->progs.uprobe_multi_2;
+ }
+ return NULL;
+}
+
+#define TASKS 3
+
+static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
+ struct bpf_link *link[TASKS] = {};
+ struct child child[TASKS] = {};
+ int i;
+
+ memset(skel->bss->test, 0, sizeof(skel->bss->test));
+
+ for (i = 0; i < TASKS; i++) {
+ if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
+ goto cleanup;
+ skel->bss->pids[i] = child[i].pid;
+ }
+
+ for (i = 0; i < TASKS; i++) {
+ link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
+ child[i].pid, "/proc/self/exe",
+ "uprobe_multi_func_1", &opts);
+ if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+ }
+
+ for (i = 0; i < TASKS; i++)
+ kick_child(&child[i]);
+
+ for (i = 0; i < TASKS; i++) {
+ ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
+ ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
+ }
+
+cleanup:
+ for (i = 0; i < TASKS; i++)
+ bpf_link__destroy(link[i]);
+ for (i = 0; i < TASKS; i++)
+ release_child(&child[i]);
+}
+
+static void test_pid_filter_process(bool clone_vm)
+{
+ struct uprobe_multi_pid_filter *skel;
+
+ skel = uprobe_multi_pid_filter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
+ return;
+
+ run_pid_filter(skel, clone_vm, false);
+ run_pid_filter(skel, clone_vm, true);
+
+ uprobe_multi_pid_filter__destroy(skel);
+}
+
+static void test_session_skel_api(void)
+{
+ struct uprobe_multi_session *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct bpf_link *link = NULL;
+ int err;
+
+ skel = uprobe_multi_session__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->user_ptr = test_data;
+
+ err = uprobe_multi_session__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ /*
+ * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
+ * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
+ * doubled, because we run extra test for sleepable session.
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
+
+ /* We expect increase in 3 entry and 1 return session calls -> 4 */
+ ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ uprobe_multi_session__destroy(skel);
+}
+
+static void test_session_single_skel_api(void)
+{
+ struct uprobe_multi_session_single *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ int err;
+
+ skel = uprobe_multi_session_single__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_single__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
+ goto cleanup;
+
+ uprobe_multi_func_1();
+
+ /*
+ * We expect consumer 0 and 2 to trigger just entry handler (value 1)
+ * and consumer 1 to hit both (value 2).
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
+
+cleanup:
+ uprobe_multi_session_single__destroy(skel);
+}
+
+static void test_session_cookie_skel_api(void)
+{
+ struct uprobe_multi_session_cookie *skel = NULL;
+ int err;
+
+ skel = uprobe_multi_session_cookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_cookie__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
+ ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
+ ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
+
+cleanup:
+ uprobe_multi_session_cookie__destroy(skel);
+}
+
+static void test_session_recursive_skel_api(void)
+{
+ struct uprobe_multi_session_recursive *skel = NULL;
+ int i, err;
+
+ skel = uprobe_multi_session_recursive__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_recursive__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
+ skel->bss->test_uprobe_cookie_entry[i] = i + 1;
+
+ uprobe_session_recursive(5);
+
+ /*
+ * entry uprobe:
+ * uprobe_session_recursive(5) { *cookie = 1, return 0
+ * uprobe_session_recursive(4) { *cookie = 2, return 1
+ * uprobe_session_recursive(3) { *cookie = 3, return 0
+ * uprobe_session_recursive(2) { *cookie = 4, return 1
+ * uprobe_session_recursive(1) { *cookie = 5, return 0
+ * uprobe_session_recursive(0) { *cookie = 6, return 1
+ * return uprobe:
+ * } i = 0 not executed
+ * } i = 1 test_uprobe_cookie_return[0] = 5
+ * } i = 2 not executed
+ * } i = 3 test_uprobe_cookie_return[1] = 3
+ * } i = 4 not executed
+ * } i = 5 test_uprobe_cookie_return[2] = 1
+ */
+
+ ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
+ ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
+
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
+
+cleanup:
+ uprobe_multi_session_recursive__destroy(skel);
+}
+
+static void test_bench_attach_uprobe(void)
+{
+ long attach_start_ns = 0, attach_end_ns = 0;
+ struct uprobe_multi_bench *skel = NULL;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+ int err;
+
+ skel = uprobe_multi_bench__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
+ goto cleanup;
+
+ attach_start_ns = get_time_ns();
+
+ err = uprobe_multi_bench__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
+ goto cleanup;
+
+ attach_end_ns = get_time_ns();
+
+ system("./uprobe_multi bench");
+
+ ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
+
+cleanup:
+ detach_start_ns = get_time_ns();
+ uprobe_multi_bench__destroy(skel);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+}
+
+static void test_bench_attach_usdt(void)
+{
+ long attach_start_ns = 0, attach_end_ns = 0;
+ struct uprobe_multi_usdt *skel = NULL;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+
+ skel = uprobe_multi_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
+ goto cleanup;
+
+ attach_start_ns = get_time_ns();
+
+ skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
+ "test", "usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ attach_end_ns = get_time_ns();
+
+ system("./uprobe_multi usdt");
+
+ ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
+
+cleanup:
+ detach_start_ns = get_time_ns();
+ uprobe_multi_usdt__destroy(skel);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+}
+
+void test_uprobe_multi_test(void)
+{
+ if (test__start_subtest("skel_api"))
+ test_skel_api();
+ if (test__start_subtest("attach_api_pattern"))
+ test_attach_api_pattern();
+ if (test__start_subtest("attach_api_syms"))
+ test_attach_api_syms();
+ if (test__start_subtest("link_api"))
+ test_link_api();
+ if (test__start_subtest("bench_uprobe"))
+ test_bench_attach_uprobe();
+ if (test__start_subtest("bench_usdt"))
+ test_bench_attach_usdt();
+ if (test__start_subtest("attach_api_fails"))
+ test_attach_api_fails();
+ if (test__start_subtest("attach_uprobe_fails"))
+ test_attach_uprobe_fails();
+ if (test__start_subtest("consumers"))
+ test_consumers();
+ if (test__start_subtest("filter_fork"))
+ test_pid_filter_process(false);
+ if (test__start_subtest("filter_clone_vm"))
+ test_pid_filter_process(true);
+ if (test__start_subtest("session"))
+ test_session_skel_api();
+ if (test__start_subtest("session_single"))
+ test_session_single_skel_api();
+ if (test__start_subtest("session_cookie"))
+ test_session_cookie_skel_api();
+ if (test__start_subtest("session_cookie_recursive"))
+ test_session_recursive_skel_api();
+ RUN_TESTS(uprobe_multi_verifier);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
new file mode 100644
index 000000000000..955a37751b52
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#ifdef __x86_64__
+
+#include <unistd.h>
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <asm/prctl.h>
+#include "uprobe_syscall.skel.h"
+#include "uprobe_syscall_executed.skel.h"
+#include "bpf/libbpf_internal.h"
+
+#define USDT_NOP .byte 0x0f, 0x1f, 0x44, 0x00, 0x00
+#include "usdt.h"
+
+#pragma GCC diagnostic ignored "-Wattributes"
+
+__attribute__((aligned(16)))
+__nocf_check __weak __naked unsigned long uprobe_regs_trigger(void)
+{
+ asm volatile (
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00\n" /* nop5 */
+ "movq $0xdeadbeef, %rax\n"
+ "ret\n"
+ );
+}
+
+__naked void uprobe_regs(struct pt_regs *before, struct pt_regs *after)
+{
+ asm volatile (
+ "movq %r15, 0(%rdi)\n"
+ "movq %r14, 8(%rdi)\n"
+ "movq %r13, 16(%rdi)\n"
+ "movq %r12, 24(%rdi)\n"
+ "movq %rbp, 32(%rdi)\n"
+ "movq %rbx, 40(%rdi)\n"
+ "movq %r11, 48(%rdi)\n"
+ "movq %r10, 56(%rdi)\n"
+ "movq %r9, 64(%rdi)\n"
+ "movq %r8, 72(%rdi)\n"
+ "movq %rax, 80(%rdi)\n"
+ "movq %rcx, 88(%rdi)\n"
+ "movq %rdx, 96(%rdi)\n"
+ "movq %rsi, 104(%rdi)\n"
+ "movq %rdi, 112(%rdi)\n"
+ "movq $0, 120(%rdi)\n" /* orig_rax */
+ "movq $0, 128(%rdi)\n" /* rip */
+ "movq $0, 136(%rdi)\n" /* cs */
+ "pushq %rax\n"
+ "pushf\n"
+ "pop %rax\n"
+ "movq %rax, 144(%rdi)\n" /* eflags */
+ "pop %rax\n"
+ "movq %rsp, 152(%rdi)\n" /* rsp */
+ "movq $0, 160(%rdi)\n" /* ss */
+
+ /* save 2nd argument */
+ "pushq %rsi\n"
+ "call uprobe_regs_trigger\n"
+
+ /* save return value and load 2nd argument pointer to rax */
+ "pushq %rax\n"
+ "movq 8(%rsp), %rax\n"
+
+ "movq %r15, 0(%rax)\n"
+ "movq %r14, 8(%rax)\n"
+ "movq %r13, 16(%rax)\n"
+ "movq %r12, 24(%rax)\n"
+ "movq %rbp, 32(%rax)\n"
+ "movq %rbx, 40(%rax)\n"
+ "movq %r11, 48(%rax)\n"
+ "movq %r10, 56(%rax)\n"
+ "movq %r9, 64(%rax)\n"
+ "movq %r8, 72(%rax)\n"
+ "movq %rcx, 88(%rax)\n"
+ "movq %rdx, 96(%rax)\n"
+ "movq %rsi, 104(%rax)\n"
+ "movq %rdi, 112(%rax)\n"
+ "movq $0, 120(%rax)\n" /* orig_rax */
+ "movq $0, 128(%rax)\n" /* rip */
+ "movq $0, 136(%rax)\n" /* cs */
+
+ /* restore return value and 2nd argument */
+ "pop %rax\n"
+ "pop %rsi\n"
+
+ "movq %rax, 80(%rsi)\n"
+
+ "pushf\n"
+ "pop %rax\n"
+
+ "movq %rax, 144(%rsi)\n" /* eflags */
+ "movq %rsp, 152(%rsi)\n" /* rsp */
+ "movq $0, 160(%rsi)\n" /* ss */
+ "ret\n"
+);
+}
+
+static void test_uprobe_regs_equal(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = retprobe,
+ );
+ struct uprobe_syscall *skel = NULL;
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long *pp;
+ unsigned long offset;
+ unsigned int i, cnt;
+
+ offset = get_uprobe_offset(&uprobe_regs_trigger);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ return;
+
+ skel = uprobe_syscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load"))
+ goto cleanup;
+
+ skel->links.probe = bpf_program__attach_uprobe_opts(skel->progs.probe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(skel->links.probe, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ /* make sure uprobe gets optimized */
+ if (!retprobe)
+ uprobe_regs_trigger();
+
+ uprobe_regs(&before, &after);
+
+ pp = (unsigned long *) &skel->bss->regs;
+ cnt = sizeof(before)/sizeof(*pb);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ /*
+ * Check register before and after uprobe_regs_trigger call
+ * that triggers the uretprobe.
+ */
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0xdeadbeef, "return value");
+ break;
+ default:
+ if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+
+ /*
+ * Check register seen from bpf program and register after
+ * uprobe_regs_trigger call (with rax exception, check below).
+ */
+ switch (offset) {
+ /*
+ * These values will be different (not set in uretprobe_regs),
+ * we don't care.
+ */
+ case offsetof(struct pt_regs, orig_rax):
+ case offsetof(struct pt_regs, rip):
+ case offsetof(struct pt_regs, cs):
+ case offsetof(struct pt_regs, rsp):
+ case offsetof(struct pt_regs, ss):
+ break;
+ /*
+ * uprobe does not see return value in rax, it needs to see the
+ * original (before) rax value
+ */
+ case offsetof(struct pt_regs, rax):
+ if (!retprobe) {
+ ASSERT_EQ(pp[i], pb[i], "uprobe rax prog-before value check");
+ break;
+ }
+ default:
+ if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+
+cleanup:
+ uprobe_syscall__destroy(skel);
+}
+
+#define BPF_TESTMOD_UPROBE_TEST_FILE "/sys/kernel/bpf_testmod_uprobe"
+
+static int write_bpf_testmod_uprobe(unsigned long offset)
+{
+ size_t n, ret;
+ char buf[30];
+ int fd;
+
+ n = sprintf(buf, "%lu", offset);
+
+ fd = open(BPF_TESTMOD_UPROBE_TEST_FILE, O_WRONLY);
+ if (fd < 0)
+ return -errno;
+
+ ret = write(fd, buf, n);
+ close(fd);
+ return ret != n ? (int) ret : 0;
+}
+
+static void test_regs_change(void)
+{
+ struct pt_regs before = {}, after = {};
+ unsigned long *pb = (unsigned long *) &before;
+ unsigned long *pa = (unsigned long *) &after;
+ unsigned long cnt = sizeof(before)/sizeof(*pb);
+ unsigned int i, err, offset;
+
+ offset = get_uprobe_offset(uprobe_regs_trigger);
+
+ err = write_bpf_testmod_uprobe(offset);
+ if (!ASSERT_OK(err, "register_uprobe"))
+ return;
+
+ /* make sure uprobe gets optimized */
+ uprobe_regs_trigger();
+
+ uprobe_regs(&before, &after);
+
+ err = write_bpf_testmod_uprobe(0);
+ if (!ASSERT_OK(err, "unregister_uprobe"))
+ return;
+
+ for (i = 0; i < cnt; i++) {
+ unsigned int offset = i * sizeof(unsigned long);
+
+ switch (offset) {
+ case offsetof(struct pt_regs, rax):
+ ASSERT_EQ(pa[i], 0x12345678deadbeef, "rax");
+ break;
+ case offsetof(struct pt_regs, rcx):
+ ASSERT_EQ(pa[i], 0x87654321feebdaed, "rcx");
+ break;
+ case offsetof(struct pt_regs, r11):
+ ASSERT_EQ(pa[i], (__u64) -1, "r11");
+ break;
+ default:
+ if (!ASSERT_EQ(pa[i], pb[i], "register before-after value check"))
+ fprintf(stdout, "failed register offset %u\n", offset);
+ }
+ }
+}
+
+#ifndef __NR_uretprobe
+#define __NR_uretprobe 335
+#endif
+
+__naked unsigned long uretprobe_syscall_call_1(void)
+{
+ /*
+ * Pretend we are uretprobe trampoline to trigger the return
+ * probe invocation in order to verify we get SIGILL.
+ */
+ asm volatile (
+ "pushq %rax\n"
+ "pushq %rcx\n"
+ "pushq %r11\n"
+ "movq $" __stringify(__NR_uretprobe) ", %rax\n"
+ "syscall\n"
+ "popq %r11\n"
+ "popq %rcx\n"
+ "retq\n"
+ );
+}
+
+__naked unsigned long uretprobe_syscall_call(void)
+{
+ asm volatile (
+ "call uretprobe_syscall_call_1\n"
+ "retq\n"
+ );
+}
+
+static void test_uretprobe_syscall_call(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = true,
+ );
+ struct uprobe_syscall_executed *skel;
+ int pid, status, err, go[2], c = 0;
+ struct bpf_link *link;
+
+ if (!ASSERT_OK(pipe(go), "pipe"))
+ return;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ goto cleanup;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork"))
+ goto cleanup;
+
+ /* child */
+ if (pid == 0) {
+ close(go[1]);
+
+ /* wait for parent's kick */
+ err = read(go[0], &c, 1);
+ if (err != 1)
+ exit(-1);
+
+ uretprobe_syscall_call();
+ _exit(0);
+ }
+
+ skel->bss->pid = pid;
+
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
+ pid, "/proc/self/exe",
+ "uretprobe_syscall_call", &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+ skel->links.test_uretprobe_multi = link;
+
+ /* kick the child */
+ write(go[1], &c, 1);
+ err = waitpid(pid, &status, 0);
+ ASSERT_EQ(err, pid, "waitpid");
+
+ /* verify the child got killed with SIGILL */
+ ASSERT_EQ(WIFSIGNALED(status), 1, "WIFSIGNALED");
+ ASSERT_EQ(WTERMSIG(status), SIGILL, "WTERMSIG");
+
+ /* verify the uretprobe program wasn't called */
+ ASSERT_EQ(skel->bss->executed, 0, "executed");
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+ close(go[1]);
+ close(go[0]);
+}
+
+#define TRAMP "[uprobes-trampoline]"
+
+__attribute__((aligned(16)))
+__nocf_check __weak __naked void uprobe_test(void)
+{
+ asm volatile (" \n"
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n"
+ "ret \n"
+ );
+}
+
+__attribute__((aligned(16)))
+__nocf_check __weak void usdt_test(void)
+{
+ USDT(optimized_uprobe, usdt);
+}
+
+static int find_uprobes_trampoline(void *tramp_addr)
+{
+ void *start, *end;
+ char line[128];
+ int ret = -1;
+ FILE *maps;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ fprintf(stderr, "cannot open maps\n");
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), maps)) {
+ int m = -1;
+
+ /* We care only about private r-x mappings. */
+ if (sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", &start, &end, &m) != 2)
+ continue;
+ if (m < 0)
+ continue;
+ if (!strncmp(&line[m], TRAMP, sizeof(TRAMP)-1) && (start == tramp_addr)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ fclose(maps);
+ return ret;
+}
+
+static unsigned char nop5[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
+
+static void *find_nop5(void *fn)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (!memcmp(nop5, fn + i, 5))
+ return fn + i;
+ }
+ return NULL;
+}
+
+typedef void (__attribute__((nocf_check)) *trigger_t)(void);
+
+static void *check_attach(struct uprobe_syscall_executed *skel, trigger_t trigger,
+ void *addr, int executed)
+{
+ struct __arch_relative_insn {
+ __u8 op;
+ __s32 raddr;
+ } __packed *call;
+ void *tramp = NULL;
+
+ /* Uprobe gets optimized after first trigger, so let's press twice. */
+ trigger();
+ trigger();
+
+ /* Make sure bpf program got executed.. */
+ ASSERT_EQ(skel->bss->executed, executed, "executed");
+
+ /* .. and check the trampoline is as expected. */
+ call = (struct __arch_relative_insn *) addr;
+ tramp = (void *) (call + 1) + call->raddr;
+ ASSERT_EQ(call->op, 0xe8, "call");
+ ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
+
+ return tramp;
+}
+
+static void check_detach(void *addr, void *tramp)
+{
+ /* [uprobes_trampoline] stays after detach */
+ ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
+ ASSERT_OK(memcmp(addr, nop5, 5), "nop5");
+}
+
+static void check(struct uprobe_syscall_executed *skel, struct bpf_link *link,
+ trigger_t trigger, void *addr, int executed)
+{
+ void *tramp;
+
+ tramp = check_attach(skel, trigger, addr, executed);
+ bpf_link__destroy(link);
+ check_detach(addr, tramp);
+}
+
+static void test_uprobe_legacy(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = true,
+ );
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ /* uprobe */
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
+ 0, "/proc/self/exe", offset, NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+ /* uretprobe */
+ skel->bss->executed = 0;
+
+ link = bpf_program__attach_uprobe_opts(skel->progs.test_uretprobe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_multi(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ /* uprobe.multi */
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_multi,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+ /* uretprobe.multi */
+ skel->bss->executed = 0;
+ opts.retprobe = true;
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_session(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .session = true,
+ );
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_session,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 4);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_usdt(void)
+{
+ struct uprobe_syscall_executed *skel;
+ struct bpf_link *link;
+ void *addr;
+
+ errno = 0;
+ addr = find_nop5(usdt_test);
+ if (!ASSERT_OK_PTR(addr, "find_nop5"))
+ return;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_usdt(skel->progs.test_usdt,
+ -1 /* all PIDs */, "/proc/self/exe",
+ "optimized_uprobe", "usdt", NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ check(skel, link, usdt_test, addr, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+/*
+ * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c.
+ *
+ * For use in inline enablement of shadow stack.
+ *
+ * The program can't return from the point where shadow stack gets enabled
+ * because there will be no address on the shadow stack. So it can't use
+ * syscall() for enablement, since it is a function.
+ *
+ * Based on code from nolibc.h. Keep a copy here because this can't pull
+ * in all of nolibc.h.
+ */
+#define ARCH_PRCTL(arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = __NR_arch_prctl; \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#ifndef ARCH_SHSTK_ENABLE
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#endif
+
+static void test_uretprobe_shadow_stack(void)
+{
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
+ test__skip();
+ return;
+ }
+
+ /* Run all the tests with shadow stack in place. */
+
+ test_uprobe_regs_equal(false);
+ test_uprobe_regs_equal(true);
+ test_uretprobe_syscall_call();
+
+ test_uprobe_legacy();
+ test_uprobe_multi();
+ test_uprobe_session();
+ test_uprobe_usdt();
+
+ test_regs_change();
+
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+}
+
+static volatile bool race_stop;
+
+static USDT_DEFINE_SEMA(race);
+
+static void *worker_trigger(void *arg)
+{
+ unsigned long rounds = 0;
+
+ while (!race_stop) {
+ uprobe_test();
+ rounds++;
+ }
+
+ printf("tid %ld trigger rounds: %lu\n", sys_gettid(), rounds);
+ return NULL;
+}
+
+static void *worker_attach(void *arg)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct uprobe_syscall_executed *skel;
+ unsigned long rounds = 0, offset;
+ const char *sema[2] = {
+ __stringify(USDT_SEMA(race)),
+ NULL,
+ };
+ unsigned long *ref;
+ int err;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ return NULL;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 1, (const char **) &sema, &ref, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
+ return NULL;
+
+ opts.ref_ctr_offset = *ref;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ while (!race_stop) {
+ skel->links.test_uprobe = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(skel->links.test_uprobe, "bpf_program__attach_uprobe_opts"))
+ break;
+
+ bpf_link__destroy(skel->links.test_uprobe);
+ skel->links.test_uprobe = NULL;
+ rounds++;
+ }
+
+ printf("tid %ld attach rounds: %lu hits: %d\n", sys_gettid(), rounds, skel->bss->executed);
+ uprobe_syscall_executed__destroy(skel);
+ free(ref);
+ return NULL;
+}
+
+static useconds_t race_msec(void)
+{
+ char *env;
+
+ env = getenv("BPF_SELFTESTS_UPROBE_SYSCALL_RACE_MSEC");
+ if (env)
+ return atoi(env);
+
+ /* default duration is 500ms */
+ return 500;
+}
+
+static void test_uprobe_race(void)
+{
+ int err, i, nr_threads;
+ pthread_t *threads;
+
+ nr_threads = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(nr_threads, 0, "libbpf_num_possible_cpus"))
+ return;
+ nr_threads = max(2, nr_threads);
+
+ threads = alloca(sizeof(*threads) * nr_threads);
+ if (!ASSERT_OK_PTR(threads, "malloc"))
+ return;
+
+ for (i = 0; i < nr_threads; i++) {
+ err = pthread_create(&threads[i], NULL, i % 2 ? worker_trigger : worker_attach,
+ NULL);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto cleanup;
+ }
+
+ usleep(race_msec() * 1000);
+
+cleanup:
+ race_stop = true;
+ for (nr_threads = i, i = 0; i < nr_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ ASSERT_FALSE(USDT_SEMA_IS_ACTIVE(race), "race_semaphore");
+}
+
+#ifndef __NR_uprobe
+#define __NR_uprobe 336
+#endif
+
+static void test_uprobe_error(void)
+{
+ long err = syscall(__NR_uprobe);
+
+ ASSERT_EQ(err, -1, "error");
+ ASSERT_EQ(errno, ENXIO, "errno");
+}
+
+static void __test_uprobe_syscall(void)
+{
+ if (test__start_subtest("uretprobe_regs_equal"))
+ test_uprobe_regs_equal(true);
+ if (test__start_subtest("uretprobe_syscall_call"))
+ test_uretprobe_syscall_call();
+ if (test__start_subtest("uretprobe_shadow_stack"))
+ test_uretprobe_shadow_stack();
+ if (test__start_subtest("uprobe_legacy"))
+ test_uprobe_legacy();
+ if (test__start_subtest("uprobe_multi"))
+ test_uprobe_multi();
+ if (test__start_subtest("uprobe_session"))
+ test_uprobe_session();
+ if (test__start_subtest("uprobe_usdt"))
+ test_uprobe_usdt();
+ if (test__start_subtest("uprobe_race"))
+ test_uprobe_race();
+ if (test__start_subtest("uprobe_error"))
+ test_uprobe_error();
+ if (test__start_subtest("uprobe_regs_equal"))
+ test_uprobe_regs_equal(false);
+ if (test__start_subtest("regs_change"))
+ test_regs_change();
+}
+#else
+static void __test_uprobe_syscall(void)
+{
+ test__skip();
+}
+#endif
+
+void test_uprobe_syscall(void)
+{
+ __test_uprobe_syscall();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
new file mode 100644
index 000000000000..6deb8d560ddd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "uretprobe_stack.skel.h"
+#include "../sdt.h"
+
+/* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT()
+ * call chain, each being traced by our BPF program. On entry or return from
+ * each target_*() we are capturing user stack trace and recording it in
+ * global variable, so that user space part of the test can validate it.
+ *
+ * Note, we put each target function into a custom section to get those
+ * __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us
+ * to know address range of those functions
+ */
+__attribute__((section("uprobe__target_4")))
+__weak int target_4(void)
+{
+ STAP_PROBE1(uretprobe_stack, target, 42);
+ return 42;
+}
+
+extern const void *__start_uprobe__target_4;
+extern const void *__stop_uprobe__target_4;
+
+__attribute__((section("uprobe__target_3")))
+__weak int target_3(void)
+{
+ return target_4();
+}
+
+extern const void *__start_uprobe__target_3;
+extern const void *__stop_uprobe__target_3;
+
+__attribute__((section("uprobe__target_2")))
+__weak int target_2(void)
+{
+ return target_3();
+}
+
+extern const void *__start_uprobe__target_2;
+extern const void *__stop_uprobe__target_2;
+
+__attribute__((section("uprobe__target_1")))
+__weak int target_1(int depth)
+{
+ if (depth < 1)
+ return 1 + target_1(depth + 1);
+ else
+ return target_2();
+}
+
+extern const void *__start_uprobe__target_1;
+extern const void *__stop_uprobe__target_1;
+
+extern const void *__start_uretprobe_stack_sec;
+extern const void *__stop_uretprobe_stack_sec;
+
+struct range {
+ long start;
+ long stop;
+};
+
+static struct range targets[] = {
+ {}, /* we want target_1 to map to target[1], so need 1-based indexing */
+ { (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 },
+ { (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 },
+ { (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 },
+ { (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 },
+};
+
+static struct range caller = {
+ (long)&__start_uretprobe_stack_sec,
+ (long)&__stop_uretprobe_stack_sec,
+};
+
+static void validate_stack(__u64 *ips, int stack_len, int cnt, ...)
+{
+ int i, j;
+ va_list args;
+
+ if (!ASSERT_GT(stack_len, 0, "stack_len"))
+ return;
+
+ stack_len /= 8;
+
+ /* check if we have enough entries to satisfy test expectations */
+ if (!ASSERT_GE(stack_len, cnt, "stack_len2"))
+ return;
+
+ if (env.verbosity >= VERBOSE_NORMAL) {
+ printf("caller: %#lx - %#lx\n", caller.start, caller.stop);
+ for (i = 1; i < ARRAY_SIZE(targets); i++)
+ printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop);
+ for (i = 0; i < stack_len; i++) {
+ for (j = 1; j < ARRAY_SIZE(targets); j++) {
+ if (ips[i] >= targets[j].start && ips[i] < targets[j].stop)
+ break;
+ }
+ if (j < ARRAY_SIZE(targets)) { /* found target match */
+ printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j);
+ } else if (ips[i] >= caller.start && ips[i] < caller.stop) {
+ printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]);
+ } else {
+ printf("ENTRY #%d: %#lx\n", i, (long)ips[i]);
+ }
+ }
+ }
+
+ va_start(args, cnt);
+
+ for (i = cnt - 1; i >= 0; i--) {
+ /* most recent entry is the deepest target function */
+ const struct range *t = va_arg(args, const struct range *);
+
+ ASSERT_GE(ips[i], t->start, "addr_start");
+ ASSERT_LT(ips[i], t->stop, "addr_stop");
+ }
+
+ va_end(args);
+}
+
+/* __weak prevents inlining */
+__attribute__((section("uretprobe_stack_sec")))
+__weak void test_uretprobe_stack(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct uretprobe_stack *skel;
+ int err;
+
+ skel = uretprobe_stack__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = uretprobe_stack__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* trigger */
+ ASSERT_EQ(target_1(0), 42 + 1, "trigger_return");
+
+ /*
+ * Stacks captured on ENTRY uprobes
+ */
+
+ /* (uprobe 1) target_1 in stack trace*/
+ validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len,
+ 2, &caller, &targets[1]);
+ /* (uprobe 1, recursed) */
+ validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len,
+ 3, &caller, &targets[1], &targets[1]);
+ /* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */
+ validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len,
+ 4, &caller, &targets[1], &targets[1], &targets[2]);
+ /* (uprobe 3) */
+ validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */
+ validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed)
+ * -> target_3 -> target_4 (uretprobes) chain
+ */
+ validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len,
+ 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]);
+
+ /*
+ * Now stacks captured on the way out in EXIT uprobes
+ */
+
+ /* (uretprobe 4) everything up to target_4, but excluding it */
+ validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len,
+ 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]);
+ /* we didn't install uretprobes on target_2 and target_3 */
+ /* (uretprobe 1, recur) first target_1 call only */
+ validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len,
+ 2, &caller, &targets[1]);
+ /* (uretprobe 1) just a caller in the stack trace */
+ validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len,
+ 1, &caller);
+
+cleanup:
+ uretprobe_stack__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
new file mode 100644
index 000000000000..f4be5269fa90
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#define _SDT_HAS_SEMAPHORES 1
+#include "../sdt.h"
+
+#include "test_usdt.skel.h"
+#include "test_urandom_usdt.skel.h"
+
+int lets_test_this(int);
+
+static volatile int idx = 2;
+static volatile __u64 bla = 0xFEDCBA9876543210ULL;
+static volatile short nums[] = {-1, -2, -3, -4};
+
+static volatile struct {
+ int x;
+ signed char y;
+} t1 = { 1, -127 };
+
+#define SEC(name) __attribute__((section(name), used))
+
+unsigned short test_usdt0_semaphore SEC(".probes");
+unsigned short test_usdt3_semaphore SEC(".probes");
+unsigned short test_usdt12_semaphore SEC(".probes");
+
+static void __always_inline trigger_func(int x) {
+ long y = 42;
+
+ if (test_usdt0_semaphore)
+ STAP_PROBE(test, usdt0);
+ if (test_usdt3_semaphore)
+ STAP_PROBE3(test, usdt3, x, y, &bla);
+ if (test_usdt12_semaphore) {
+ STAP_PROBE12(test, usdt12,
+ x, x + 1, y, x + y, 5,
+ y / 7, bla, &bla, -9, nums[x],
+ nums[idx], t1.y);
+ }
+}
+
+#if defined(__x86_64__) || defined(__i386__)
+/*
+ * SIB (Scale-Index-Base) addressing format: "size@(base_reg, index_reg, scale)"
+ * - 'size' is the size in bytes of the array element, and its sign indicates
+ * whether the type is signed (negative) or unsigned (positive).
+ * - 'base_reg' is the register holding the base address, normally rdx or edx
+ * - 'index_reg' is the register holding the index, normally rax or eax
+ * - 'scale' is the scaling factor (typically 1, 2, 4, or 8), which matches the
+ * size of the element type.
+ *
+ * For example, for an array of 'short' (signed 2-byte elements), the SIB spec would be:
+ * - size: -2 (negative because 'short' is signed)
+ * - scale: 2 (since sizeof(short) == 2)
+ *
+ * The resulting SIB format: "-2@(%%rdx,%%rax,2)" for x86_64, "-2@(%%edx,%%eax,2)" for i386
+ */
+static volatile short array[] = {-1, -2, -3, -4};
+
+#if defined(__x86_64__)
+#define USDT_SIB_ARG_SPEC -2@(%%rdx,%%rax,2)
+#else
+#define USDT_SIB_ARG_SPEC -2@(%%edx,%%eax,2)
+#endif
+
+unsigned short test_usdt_sib_semaphore SEC(".probes");
+
+static void trigger_sib_spec(void)
+{
+ /*
+ * Force SIB addressing with inline assembly.
+ *
+ * You must compile with -std=gnu99 or -std=c99 to use the
+ * STAP_PROBE_ASM macro.
+ *
+ * The STAP_PROBE_ASM macro generates a quoted string that gets
+ * inserted between the surrounding assembly instructions. In this
+ * case, USDT_SIB_ARG_SPEC is embedded directly into the instruction
+ * stream, creating a probe point between the asm statement boundaries.
+ * It works fine with gcc/clang.
+ *
+ * Register constraints:
+ * - "d"(array): Binds the 'array' variable to %rdx or %edx register
+ * - "a"(0): Binds the constant 0 to %rax or %eax register
+ * These ensure that when USDT_SIB_ARG_SPEC references %%rdx(%edx) and
+ * %%rax(%eax), they contain the expected values for SIB addressing.
+ *
+ * The "memory" clobber prevents the compiler from reordering memory
+ * accesses around the probe point, ensuring that the probe behavior
+ * is predictable and consistent.
+ */
+ asm volatile(
+ STAP_PROBE_ASM(test, usdt_sib, USDT_SIB_ARG_SPEC)
+ :
+ : "d"(array), "a"(0)
+ : "memory"
+ );
+}
+#endif
+
+static void subtest_basic_usdt(bool optimized)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err, i, called;
+ const __u64 expected_cookie = 0xcafedeadbeeffeed;
+
+#define TRIGGER(x) ({ \
+ trigger_func(x); \
+ if (optimized) \
+ trigger_func(x); \
+ optimized ? 2 : 1; \
+ })
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt0 won't be auto-attached */
+ opts.usdt_cookie = expected_cookie;
+ skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
+ 0 /*self*/, "/proc/self/exe",
+ "test", "usdt0", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
+ goto cleanup;
+
+#if defined(__x86_64__) || defined(__i386__)
+ opts.usdt_cookie = expected_cookie;
+ skel->links.usdt_sib = bpf_program__attach_usdt(skel->progs.usdt_sib,
+ 0 /*self*/, "/proc/self/exe",
+ "test", "usdt_sib", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt_sib, "usdt_sib_link"))
+ goto cleanup;
+#endif
+
+ called = TRIGGER(1);
+
+ ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
+
+ ASSERT_EQ(bss->usdt0_cookie, expected_cookie, "usdt0_cookie");
+ ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
+ ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
+ ASSERT_EQ(bss->usdt0_arg_size, -ENOENT, "usdt0_arg_size");
+
+ /* auto-attached usdt3 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+ ASSERT_EQ(bss->usdt3_arg_sizes[0], 4, "usdt3_arg1_size");
+ ASSERT_EQ(bss->usdt3_arg_sizes[1], 8, "usdt3_arg2_size");
+ ASSERT_EQ(bss->usdt3_arg_sizes[2], 8, "usdt3_arg3_size");
+
+ /* auto-attached usdt12 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie");
+ ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt");
+
+ ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5");
+ ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6");
+ ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7");
+ ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8");
+ ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9");
+ ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10");
+ ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11");
+ ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12");
+
+ int usdt12_expected_arg_sizes[12] = { 4, 4, 8, 8, 4, 8, 8, 8, 4, 2, 2, 1 };
+
+ for (i = 0; i < 12; i++)
+ ASSERT_EQ(bss->usdt12_arg_sizes[i], usdt12_expected_arg_sizes[i], "usdt12_arg_size");
+
+ /* trigger_func() is marked __always_inline, so USDT invocations will be
+ * inlined in two different places, meaning that each USDT will have
+ * at least 2 different places to be attached to. This verifies that
+ * bpf_program__attach_usdt() handles this properly and attaches to
+ * all possible places of USDT invocation.
+ */
+ called += TRIGGER(2);
+
+ ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
+
+ /* only check values that depend on trigger_func()'s input value */
+ ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
+
+ ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10");
+
+ /* detach and re-attach usdt3 */
+ bpf_link__destroy(skel->links.usdt3);
+
+ opts.usdt_cookie = 0xBADC00C51E;
+ skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */,
+ "/proc/self/exe", "test", "usdt3", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
+ goto cleanup;
+
+ called += TRIGGER(3);
+
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
+ /* this time usdt3 has custom cookie */
+ ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+
+#if defined(__x86_64__) || defined(__i386__)
+ trigger_sib_spec();
+ ASSERT_EQ(bss->usdt_sib_called, 1, "usdt_sib_called");
+ ASSERT_EQ(bss->usdt_sib_cookie, expected_cookie, "usdt_sib_cookie");
+ ASSERT_EQ(bss->usdt_sib_arg_cnt, 1, "usdt_sib_arg_cnt");
+ ASSERT_EQ(bss->usdt_sib_arg, nums[0], "usdt_sib_arg");
+ ASSERT_EQ(bss->usdt_sib_arg_ret, 0, "usdt_sib_arg_ret");
+ ASSERT_EQ(bss->usdt_sib_arg_size, sizeof(nums[0]), "usdt_sib_arg_size");
+#endif
+
+cleanup:
+ test_usdt__destroy(skel);
+#undef TRIGGER
+}
+
+unsigned short test_usdt_100_semaphore SEC(".probes");
+unsigned short test_usdt_300_semaphore SEC(".probes");
+unsigned short test_usdt_400_semaphore SEC(".probes");
+
+#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \
+ F(X+5); F(X+6); F(X+7); F(X+8); F(X+9);
+#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \
+ R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90);
+
+/* carefully control that we get exactly 100 inlines by preventing inlining */
+static void __always_inline f100(int x)
+{
+ STAP_PROBE1(test, usdt_100, x);
+}
+
+__weak void trigger_100_usdts(void)
+{
+ R100(f100, 0);
+}
+
+/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as
+ * many slots for specs. It's important that each STAP_PROBE2() invocation
+ * (after untolling) gets different arg spec due to compiler inlining i as
+ * a constant
+ */
+static void __always_inline f300(int x)
+{
+ STAP_PROBE1(test, usdt_300, x);
+}
+
+__weak void trigger_300_usdts(void)
+{
+ R100(f300, 0);
+ R100(f300, 100);
+ R100(f300, 200);
+}
+
+static void __always_inline f400(int x __attribute__((unused)))
+{
+ STAP_PROBE1(test, usdt_400, 400);
+}
+
+/* this time we have 400 different USDT call sites, but they have uniform
+ * argument location, so libbpf's spec string deduplication logic should keep
+ * spec count use very small and so we should be able to attach to all 400
+ * call sites
+ */
+__weak void trigger_400_usdts(void)
+{
+ R100(f400, 0);
+ R100(f400, 100);
+ R100(f400, 200);
+ R100(f400, 300);
+}
+
+static void subtest_multispec_usdt(void)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err, i;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt_100 is auto-attached and there are 100 inlined call sites,
+ * let's validate that all of them are properly attached to and
+ * handled from BPF side
+ */
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+
+ /* Stress test free spec ID tracking. By default libbpf allows up to
+ * 256 specs to be used, so if we don't return free spec IDs back
+ * after few detachments and re-attachments we should run out of
+ * available spec IDs.
+ */
+ for (i = 0; i < 2; i++) {
+ bpf_link__destroy(skel->links.usdt_100);
+
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_100", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach"))
+ goto cleanup;
+
+ bss->usdt_100_sum = 0;
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+ }
+
+ /* Now let's step it up and try to attach USDT that requires more than
+ * 256 attach points with different specs for each.
+ * Note that we need trigger_300_usdts() only to actually have 300
+ * USDT call sites, we are not going to actually trace them.
+ */
+ trigger_300_usdts();
+
+ bpf_link__destroy(skel->links.usdt_100);
+
+ bss->usdt_100_called = 0;
+ bss->usdt_100_sum = 0;
+
+ /* If built with arm64/clang, there will be much less number of specs
+ * for usdt_300 call sites.
+ */
+#if !defined(__aarch64__) || !defined(__clang__)
+ /* we'll reuse usdt_100 BPF program for usdt_300 test */
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
+ "test", "usdt_300", NULL);
+ err = -errno;
+ if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach"))
+ goto cleanup;
+ ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err");
+
+ /* let's check that there are no "dangling" BPF programs attached due
+ * to partial success of the above test:usdt_300 attachment
+ */
+ f300(777); /* this is 301st instance of usdt_300 */
+
+ ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
+ ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
+#endif
+
+ /* This time we have USDT with 400 inlined invocations, but arg specs
+ * should be the same across all sites, so libbpf will only need to
+ * use one spec and thus we'll be able to attach 400 uprobes
+ * successfully.
+ *
+ * Again, we are reusing usdt_100 BPF program.
+ */
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_400", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach"))
+ goto cleanup;
+
+ trigger_400_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called");
+ ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+
+static FILE *urand_spawn(int *pid)
+{
+ FILE *f;
+
+ /* urandom_read's stdout is wired into f */
+ f = popen("./urandom_read 1 report-pid", "r");
+ if (!f)
+ return NULL;
+
+ if (fscanf(f, "%d", pid) != 1) {
+ pclose(f);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ return f;
+}
+
+static int urand_trigger(FILE **urand_pipe)
+{
+ int exit_code;
+
+ /* pclose() waits for child process to exit and returns their exit code */
+ exit_code = pclose(*urand_pipe);
+ *urand_pipe = NULL;
+
+ return exit_code;
+}
+
+static void subtest_urandom_usdt(bool auto_attach)
+{
+ struct test_urandom_usdt *skel;
+ struct test_urandom_usdt__bss *bss;
+ struct bpf_link *l;
+ FILE *urand_pipe = NULL;
+ int err, urand_pid = 0;
+
+ skel = test_urandom_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ urand_pipe = urand_spawn(&urand_pid);
+ if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
+ goto cleanup;
+
+ bss = skel->bss;
+ bss->urand_pid = urand_pid;
+
+ if (auto_attach) {
+ err = test_urandom_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_auto_attach"))
+ goto cleanup;
+ } else {
+ l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_without_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_with_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_with_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_with_sema = l;
+
+ }
+
+ /* trigger urandom_read USDTs */
+ ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
+
+ ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt");
+ ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum");
+
+ ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt");
+ ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum");
+
+cleanup:
+ if (urand_pipe)
+ pclose(urand_pipe);
+ test_urandom_usdt__destroy(skel);
+}
+
+void test_usdt(void)
+{
+ if (test__start_subtest("basic"))
+ subtest_basic_usdt(false);
+#ifdef __x86_64__
+ if (test__start_subtest("basic_optimized"))
+ subtest_basic_usdt(true);
+#endif
+ if (test__start_subtest("multispec"))
+ subtest_multispec_usdt();
+ if (test__start_subtest("urand_auto_attach"))
+ subtest_urandom_usdt(true /* auto_attach */);
+ if (test__start_subtest("urand_pid_attach"))
+ subtest_urandom_usdt(false /* auto_attach */);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
new file mode 100644
index 000000000000..9fd3ae987321
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <linux/compiler.h>
+#include <linux/ring_buffer.h>
+#include <linux/build_bug.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/sysinfo.h>
+#include <test_progs.h>
+#include <uapi/linux/bpf.h>
+#include <unistd.h>
+
+#include "user_ringbuf_fail.skel.h"
+#include "user_ringbuf_success.skel.h"
+
+#include "../progs/test_user_ringbuf.h"
+
+static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
+static long c_ringbuf_size, c_max_entries;
+
+static void drain_current_samples(void)
+{
+ syscall(__NR_getpgid);
+}
+
+static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
+{
+ int i, err = 0;
+
+ /* Write some number of samples to the ring buffer. */
+ for (i = 0; i < num_samples; i++) {
+ struct sample *entry;
+ int read;
+
+ entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
+ if (!entry) {
+ err = -errno;
+ goto done;
+ }
+
+ entry->pid = getpid();
+ entry->seq = i;
+ entry->value = i * i;
+
+ read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
+ if (read <= 0) {
+ /* Assert on the error path to avoid spamming logs with
+ * mostly success messages.
+ */
+ ASSERT_GT(read, 0, "snprintf_comm");
+ err = read;
+ user_ring_buffer__discard(ringbuf, entry);
+ goto done;
+ }
+
+ user_ring_buffer__submit(ringbuf, entry);
+ }
+
+done:
+ drain_current_samples();
+
+ return err;
+}
+
+static struct user_ringbuf_success *open_load_ringbuf_skel(void)
+{
+ struct user_ringbuf_success *skel;
+ int err;
+
+ skel = user_ringbuf_success__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return NULL;
+
+ err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
+ if (!ASSERT_OK(err, "set_max_entries"))
+ goto cleanup;
+
+ err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
+ if (!ASSERT_OK(err, "set_max_entries"))
+ goto cleanup;
+
+ err = user_ringbuf_success__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ return skel;
+
+cleanup:
+ user_ringbuf_success__destroy(skel);
+ return NULL;
+}
+
+static void test_user_ringbuf_mappings(void)
+{
+ int err, rb_fd;
+ int page_size = getpagesize();
+ void *mmap_ptr;
+ struct user_ringbuf_success *skel;
+
+ skel = open_load_ringbuf_skel();
+ if (!skel)
+ return;
+
+ rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
+ /* cons_pos can be mapped R/O, can't add +X with mprotect. */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
+ ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
+ err = -errno;
+ ASSERT_ERR(err, "wr_prod_pos_err");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
+
+ /* prod_pos can be mapped RW, can't add +X with mprotect. */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ rb_fd, page_size);
+ ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
+ err = -errno;
+ ASSERT_ERR(err, "wr_prod_pos_err");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
+
+ /* data pages can be mapped RW, can't add +X with mprotect. */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
+ 2 * page_size);
+ ASSERT_OK_PTR(mmap_ptr, "rw_data");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
+ err = -errno;
+ ASSERT_ERR(err, "exec_data_err");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
+
+ user_ringbuf_success__destroy(skel);
+}
+
+static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
+ struct ring_buffer **kern_ringbuf_out,
+ ring_buffer_sample_fn callback,
+ struct user_ring_buffer **user_ringbuf_out)
+{
+ struct user_ringbuf_success *skel;
+ struct ring_buffer *kern_ringbuf = NULL;
+ struct user_ring_buffer *user_ringbuf = NULL;
+ int err = -ENOMEM, rb_fd;
+
+ skel = open_load_ringbuf_skel();
+ if (!skel)
+ return err;
+
+ /* only trigger BPF program for current process */
+ skel->bss->pid = getpid();
+
+ if (kern_ringbuf_out) {
+ rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
+ kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
+ if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
+ goto cleanup;
+
+ *kern_ringbuf_out = kern_ringbuf;
+ }
+
+ if (user_ringbuf_out) {
+ rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
+ user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
+ if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
+ goto cleanup;
+
+ *user_ringbuf_out = user_ringbuf;
+ ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
+ }
+
+ err = user_ringbuf_success__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ *skel_out = skel;
+ return 0;
+
+cleanup:
+ if (kern_ringbuf_out)
+ *kern_ringbuf_out = NULL;
+ if (user_ringbuf_out)
+ *user_ringbuf_out = NULL;
+ ring_buffer__free(kern_ringbuf);
+ user_ring_buffer__free(user_ringbuf);
+ user_ringbuf_success__destroy(skel);
+ return err;
+}
+
+static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
+ struct user_ring_buffer **ringbuf_out)
+{
+ return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
+}
+
+static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
+ __u32 size, __u64 producer_pos, int err)
+{
+ void *data_ptr;
+ __u64 *producer_pos_ptr;
+ int rb_fd, page_size = getpagesize();
+
+ rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
+
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
+
+ /* Map the producer_pos as RW. */
+ producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, rb_fd, page_size);
+ ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
+
+ /* Map the data pages as RW. */
+ data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ ASSERT_OK_PTR(data_ptr, "rw_data");
+
+ memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
+ *(__u32 *)data_ptr = size;
+
+ /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
+ smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
+
+ drain_current_samples();
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
+ ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
+
+ ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
+ ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
+}
+
+static void test_user_ringbuf_post_misaligned(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+ __u32 size = (1 << 5) + 7;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "misaligned_skel"))
+ return;
+
+ manually_write_test_invalid_sample(skel, size, size, -EINVAL);
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_post_producer_wrong_offset(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+ __u32 size = (1 << 5);
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "wrong_offset_skel"))
+ return;
+
+ manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+ __u32 size = c_ringbuf_size;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "huge_sample_skel"))
+ return;
+
+ manually_write_test_invalid_sample(skel, size, size, -E2BIG);
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_basic(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "ringbuf_basic_skel"))
+ return;
+
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
+
+ err = write_samples(ringbuf, 2);
+ if (!ASSERT_OK(err, "write_samples"))
+ goto cleanup;
+
+ ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
+
+cleanup:
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_sample_full_ring_buffer(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+ void *sample;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
+ return;
+
+ sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
+ if (!ASSERT_OK_PTR(sample, "full_sample"))
+ goto cleanup;
+
+ user_ring_buffer__submit(ringbuf, sample);
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
+ drain_current_samples();
+ ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
+
+cleanup:
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_post_alignment_autoadjust(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ struct sample *sample;
+ int err;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
+ return;
+
+ /* libbpf should automatically round any sample up to an 8-byte alignment. */
+ sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
+ ASSERT_OK_PTR(sample, "reserve_autoaligned");
+ user_ring_buffer__submit(ringbuf, sample);
+
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
+ drain_current_samples();
+ ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
+
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_overfill(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (err)
+ return;
+
+ err = write_samples(ringbuf, c_max_entries * 5);
+ ASSERT_ERR(err, "write_samples");
+ ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
+
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_discards_properly_ignored(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err, num_discarded = 0;
+ __u64 *token;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (err)
+ return;
+
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
+
+ while (1) {
+ /* Write samples until the buffer is full. */
+ token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
+ if (!token)
+ break;
+
+ user_ring_buffer__discard(ringbuf, token);
+ num_discarded++;
+ }
+
+ if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
+ goto cleanup;
+
+ /* Should not read any samples, as they are all discarded. */
+ ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
+ drain_current_samples();
+ ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
+
+ /* Now that the ring buffer has been drained, we should be able to
+ * reserve another token.
+ */
+ token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
+
+ if (!ASSERT_OK_PTR(token, "new_token"))
+ goto cleanup;
+
+ user_ring_buffer__discard(ringbuf, token);
+cleanup:
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void test_user_ringbuf_loop(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ uint32_t total_samples = 8192;
+ uint32_t remaining_samples = total_samples;
+ int err;
+
+ if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries"))
+ return;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (err)
+ return;
+
+ do {
+ uint32_t curr_samples;
+
+ curr_samples = remaining_samples > c_max_entries
+ ? c_max_entries : remaining_samples;
+ err = write_samples(ringbuf, curr_samples);
+ if (err != 0) {
+ /* Assert inside of if statement to avoid flooding logs
+ * on the success path.
+ */
+ ASSERT_OK(err, "write_samples");
+ goto cleanup;
+ }
+
+ remaining_samples -= curr_samples;
+ ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
+ "current_batched_entries");
+ } while (remaining_samples > 0);
+ ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
+
+cleanup:
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static int send_test_message(struct user_ring_buffer *ringbuf,
+ enum test_msg_op op, s64 operand_64,
+ s32 operand_32)
+{
+ struct test_msg *msg;
+
+ msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
+ if (!msg) {
+ /* Assert on the error path to avoid spamming logs with mostly
+ * success messages.
+ */
+ ASSERT_OK_PTR(msg, "reserve_msg");
+ return -ENOMEM;
+ }
+
+ msg->msg_op = op;
+
+ switch (op) {
+ case TEST_MSG_OP_INC64:
+ case TEST_MSG_OP_MUL64:
+ msg->operand_64 = operand_64;
+ break;
+ case TEST_MSG_OP_INC32:
+ case TEST_MSG_OP_MUL32:
+ msg->operand_32 = operand_32;
+ break;
+ default:
+ PRINT_FAIL("Invalid operand %d\n", op);
+ user_ring_buffer__discard(ringbuf, msg);
+ return -EINVAL;
+ }
+
+ user_ring_buffer__submit(ringbuf, msg);
+
+ return 0;
+}
+
+static void kick_kernel_read_messages(void)
+{
+ syscall(__NR_prctl);
+}
+
+static int handle_kernel_msg(void *ctx, void *data, size_t len)
+{
+ struct user_ringbuf_success *skel = ctx;
+ struct test_msg *msg = data;
+
+ switch (msg->msg_op) {
+ case TEST_MSG_OP_INC64:
+ skel->bss->user_mutated += msg->operand_64;
+ return 0;
+ case TEST_MSG_OP_INC32:
+ skel->bss->user_mutated += msg->operand_32;
+ return 0;
+ case TEST_MSG_OP_MUL64:
+ skel->bss->user_mutated *= msg->operand_64;
+ return 0;
+ case TEST_MSG_OP_MUL32:
+ skel->bss->user_mutated *= msg->operand_32;
+ return 0;
+ default:
+ fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
+ return -EINVAL;
+ }
+}
+
+static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
+ struct user_ringbuf_success *skel)
+{
+ int cnt;
+
+ cnt = ring_buffer__consume(kern_ringbuf);
+ ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
+ ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
+}
+
+static void test_user_ringbuf_msg_protocol(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *user_ringbuf;
+ struct ring_buffer *kern_ringbuf;
+ int err, i;
+ __u64 expected_kern = 0;
+
+ err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
+ if (!ASSERT_OK(err, "create_ringbufs"))
+ return;
+
+ for (i = 0; i < 64; i++) {
+ enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
+ __u64 operand_64 = TEST_OP_64;
+ __u32 operand_32 = TEST_OP_32;
+
+ err = send_test_message(user_ringbuf, op, operand_64, operand_32);
+ if (err) {
+ /* Only assert on a failure to avoid spamming success logs. */
+ ASSERT_OK(err, "send_test_message");
+ goto cleanup;
+ }
+
+ switch (op) {
+ case TEST_MSG_OP_INC64:
+ expected_kern += operand_64;
+ break;
+ case TEST_MSG_OP_INC32:
+ expected_kern += operand_32;
+ break;
+ case TEST_MSG_OP_MUL64:
+ expected_kern *= operand_64;
+ break;
+ case TEST_MSG_OP_MUL32:
+ expected_kern *= operand_32;
+ break;
+ default:
+ PRINT_FAIL("Unexpected op %d\n", op);
+ goto cleanup;
+ }
+
+ if (i % 8 == 0) {
+ kick_kernel_read_messages();
+ ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
+ ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
+ drain_kernel_messages_buffer(kern_ringbuf, skel);
+ }
+ }
+
+cleanup:
+ ring_buffer__free(kern_ringbuf);
+ user_ring_buffer__free(user_ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+static void *kick_kernel_cb(void *arg)
+{
+ /* Kick the kernel, causing it to drain the ring buffer and then wake
+ * up the test thread waiting on epoll.
+ */
+ syscall(__NR_prlimit64);
+
+ return NULL;
+}
+
+static int spawn_kick_thread_for_poll(void)
+{
+ pthread_t thread;
+
+ return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
+}
+
+static void test_user_ringbuf_blocking_reserve(void)
+{
+ struct user_ringbuf_success *skel;
+ struct user_ring_buffer *ringbuf;
+ int err, num_written = 0;
+ __u64 *token;
+
+ err = load_skel_create_user_ringbuf(&skel, &ringbuf);
+ if (err)
+ return;
+
+ ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
+
+ while (1) {
+ /* Write samples until the buffer is full. */
+ token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
+ if (!token)
+ break;
+
+ *token = 0xdeadbeef;
+
+ user_ring_buffer__submit(ringbuf, token);
+ num_written++;
+ }
+
+ if (!ASSERT_GE(num_written, 0, "num_written"))
+ goto cleanup;
+
+ /* Should not have read any samples until the kernel is kicked. */
+ ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
+
+ /* We correctly time out after 1 second, without a sample. */
+ token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
+ if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
+ goto cleanup;
+
+ err = spawn_kick_thread_for_poll();
+ if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
+ goto cleanup;
+
+ /* After spawning another thread that asynchronously kicks the kernel to
+ * drain the messages, we're able to block and successfully get a
+ * sample once we receive an event notification.
+ */
+ token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
+
+ if (!ASSERT_OK_PTR(token, "block_token"))
+ goto cleanup;
+
+ ASSERT_GT(skel->bss->read, 0, "num_post_kill");
+ ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
+ ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
+ user_ring_buffer__discard(ringbuf, token);
+
+cleanup:
+ user_ring_buffer__free(ringbuf);
+ user_ringbuf_success__destroy(skel);
+}
+
+#define SUCCESS_TEST(_func) { _func, #_func }
+
+static struct {
+ void (*test_callback)(void);
+ const char *test_name;
+} success_tests[] = {
+ SUCCESS_TEST(test_user_ringbuf_mappings),
+ SUCCESS_TEST(test_user_ringbuf_post_misaligned),
+ SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
+ SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
+ SUCCESS_TEST(test_user_ringbuf_basic),
+ SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
+ SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
+ SUCCESS_TEST(test_user_ringbuf_overfill),
+ SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
+ SUCCESS_TEST(test_user_ringbuf_loop),
+ SUCCESS_TEST(test_user_ringbuf_msg_protocol),
+ SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
+};
+
+void test_user_ringbuf(void)
+{
+ int i;
+
+ c_ringbuf_size = getpagesize(); /* 1 page */
+ c_max_entries = c_ringbuf_size / c_sample_size;
+
+ for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+ if (!test__start_subtest(success_tests[i].test_name))
+ continue;
+
+ success_tests[i].test_callback();
+ }
+
+ RUN_TESTS(user_ringbuf_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c
index dd324b4933db..4d7056f8f177 100644
--- a/tools/testing/selftests/bpf/prog_tests/varlen.c
+++ b/tools/testing/selftests/bpf/prog_tests/varlen.c
@@ -63,6 +63,13 @@ void test_varlen(void)
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!\n");
+
+ CHECK_VAL(bss->ret_bad_read, -EFAULT);
+ CHECK_VAL(data->payload_bad[0], 0x42);
+ CHECK_VAL(data->payload_bad[1], 0x42);
+ CHECK_VAL(data->payload_bad[2], 0);
+ CHECK_VAL(data->payload_bad[3], 0x42);
+ CHECK_VAL(data->payload_bad[4], 0x42);
cleanup:
test_varlen__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c
new file mode 100644
index 000000000000..af4b95f57ac1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+
+#include "trace_vprintk.lskel.h"
+
+void test_verif_stats(void)
+{
+ __u32 len = sizeof(struct bpf_prog_info);
+ struct trace_vprintk_lskel *skel;
+ struct bpf_prog_info info = {};
+ int err;
+
+ skel = trace_vprintk_lskel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load"))
+ goto cleanup;
+
+ err = bpf_prog_get_info_by_fd(skel->progs.sys_enter.prog_fd,
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto cleanup;
+
+ if (!ASSERT_GT(info.verified_insns, 0, "verified_insns"))
+ goto cleanup;
+
+cleanup:
+ trace_vprintk_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
new file mode 100644
index 000000000000..4b4b081b46cc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+
+#include "cap_helpers.h"
+#include "verifier_and.skel.h"
+#include "verifier_arena.skel.h"
+#include "verifier_arena_large.skel.h"
+#include "verifier_array_access.skel.h"
+#include "verifier_async_cb_context.skel.h"
+#include "verifier_basic_stack.skel.h"
+#include "verifier_bitfield_write.skel.h"
+#include "verifier_bounds.skel.h"
+#include "verifier_bounds_deduction.skel.h"
+#include "verifier_bounds_deduction_non_const.skel.h"
+#include "verifier_bounds_mix_sign_unsign.skel.h"
+#include "verifier_bpf_get_stack.skel.h"
+#include "verifier_bpf_trap.skel.h"
+#include "verifier_bswap.skel.h"
+#include "verifier_btf_ctx_access.skel.h"
+#include "verifier_btf_unreliable_prog.skel.h"
+#include "verifier_cfg.skel.h"
+#include "verifier_cgroup_inv_retcode.skel.h"
+#include "verifier_cgroup_skb.skel.h"
+#include "verifier_cgroup_storage.skel.h"
+#include "verifier_const.skel.h"
+#include "verifier_const_or.skel.h"
+#include "verifier_ctx.skel.h"
+#include "verifier_ctx_sk_msg.skel.h"
+#include "verifier_d_path.skel.h"
+#include "verifier_direct_packet_access.skel.h"
+#include "verifier_direct_stack_access_wraparound.skel.h"
+#include "verifier_div0.skel.h"
+#include "verifier_div_overflow.skel.h"
+#include "verifier_global_subprogs.skel.h"
+#include "verifier_global_ptr_args.skel.h"
+#include "verifier_gotol.skel.h"
+#include "verifier_gotox.skel.h"
+#include "verifier_helper_access_var_len.skel.h"
+#include "verifier_helper_packet_access.skel.h"
+#include "verifier_helper_restricted.skel.h"
+#include "verifier_helper_value_access.skel.h"
+#include "verifier_int_ptr.skel.h"
+#include "verifier_iterating_callbacks.skel.h"
+#include "verifier_jeq_infer_not_null.skel.h"
+#include "verifier_jit_convergence.skel.h"
+#include "verifier_ld_ind.skel.h"
+#include "verifier_ldsx.skel.h"
+#include "verifier_leak_ptr.skel.h"
+#include "verifier_linked_scalars.skel.h"
+#include "verifier_live_stack.skel.h"
+#include "verifier_load_acquire.skel.h"
+#include "verifier_loops1.skel.h"
+#include "verifier_lwt.skel.h"
+#include "verifier_map_in_map.skel.h"
+#include "verifier_map_ptr.skel.h"
+#include "verifier_map_ptr_mixing.skel.h"
+#include "verifier_map_ret_val.skel.h"
+#include "verifier_masking.skel.h"
+#include "verifier_may_goto_1.skel.h"
+#include "verifier_may_goto_2.skel.h"
+#include "verifier_meta_access.skel.h"
+#include "verifier_movsx.skel.h"
+#include "verifier_mtu.skel.h"
+#include "verifier_mul.skel.h"
+#include "verifier_netfilter_ctx.skel.h"
+#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_bpf_fastcall.skel.h"
+#include "verifier_or_jmp32_k.skel.h"
+#include "verifier_precision.skel.h"
+#include "verifier_prevent_map_lookup.skel.h"
+#include "verifier_private_stack.skel.h"
+#include "verifier_raw_stack.skel.h"
+#include "verifier_raw_tp_writable.skel.h"
+#include "verifier_reg_equal.skel.h"
+#include "verifier_ref_tracking.skel.h"
+#include "verifier_regalloc.skel.h"
+#include "verifier_ringbuf.skel.h"
+#include "verifier_runtime_jit.skel.h"
+#include "verifier_scalar_ids.skel.h"
+#include "verifier_sdiv.skel.h"
+#include "verifier_search_pruning.skel.h"
+#include "verifier_sock.skel.h"
+#include "verifier_sock_addr.skel.h"
+#include "verifier_sockmap_mutate.skel.h"
+#include "verifier_spill_fill.skel.h"
+#include "verifier_spin_lock.skel.h"
+#include "verifier_stack_ptr.skel.h"
+#include "verifier_store_release.skel.h"
+#include "verifier_subprog_precision.skel.h"
+#include "verifier_subreg.skel.h"
+#include "verifier_tailcall.skel.h"
+#include "verifier_tailcall_jit.skel.h"
+#include "verifier_typedef.skel.h"
+#include "verifier_uninit.skel.h"
+#include "verifier_unpriv.skel.h"
+#include "verifier_unpriv_perf.skel.h"
+#include "verifier_value_adj_spill.skel.h"
+#include "verifier_value.skel.h"
+#include "verifier_value_illegal_alu.skel.h"
+#include "verifier_value_or_null.skel.h"
+#include "verifier_value_ptr_arith.skel.h"
+#include "verifier_var_off.skel.h"
+#include "verifier_vfs_accept.skel.h"
+#include "verifier_vfs_reject.skel.h"
+#include "verifier_xadd.skel.h"
+#include "verifier_xdp.skel.h"
+#include "verifier_xdp_direct_packet_access.skel.h"
+#include "verifier_bits_iter.skel.h"
+#include "verifier_lsm.skel.h"
+#include "irq.skel.h"
+
+#define MAX_ENTRIES 11
+
+struct test_val {
+ unsigned int index;
+ int foo[MAX_ENTRIES];
+};
+
+__maybe_unused
+static void run_tests_aux(const char *skel_name,
+ skel_elf_bytes_fn elf_bytes_factory,
+ pre_execution_cb pre_execution_cb)
+{
+ struct test_loader tester = {};
+ __u64 old_caps;
+ int err;
+
+ /* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */
+ err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps);
+ if (err) {
+ PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(-err));
+ return;
+ }
+
+ test_loader__set_pre_execution_cb(&tester, pre_execution_cb);
+ test_loader__run_subtests(&tester, skel_name, elf_bytes_factory);
+ test_loader_fini(&tester);
+
+ err = cap_enable_effective(old_caps, NULL);
+ if (err)
+ PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(-err));
+}
+
+#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)
+
+void test_verifier_and(void) { RUN(verifier_and); }
+void test_verifier_arena(void) { RUN(verifier_arena); }
+void test_verifier_arena_large(void) { RUN(verifier_arena_large); }
+void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
+void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); }
+void test_verifier_bounds(void) { RUN(verifier_bounds); }
+void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
+void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); }
+void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
+void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); }
+void test_verifier_bpf_trap(void) { RUN(verifier_bpf_trap); }
+void test_verifier_bswap(void) { RUN(verifier_bswap); }
+void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); }
+void test_verifier_btf_unreliable_prog(void) { RUN(verifier_btf_unreliable_prog); }
+void test_verifier_cfg(void) { RUN(verifier_cfg); }
+void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
+void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
+void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
+void test_verifier_const(void) { RUN(verifier_const); }
+void test_verifier_const_or(void) { RUN(verifier_const_or); }
+void test_verifier_ctx(void) { RUN(verifier_ctx); }
+void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
+void test_verifier_d_path(void) { RUN(verifier_d_path); }
+void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_access); }
+void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
+void test_verifier_div0(void) { RUN(verifier_div0); }
+void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
+void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); }
+void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); }
+void test_verifier_gotol(void) { RUN(verifier_gotol); }
+void test_verifier_gotox(void) { RUN(verifier_gotox); }
+void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
+void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
+void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
+void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
+void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
+void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); }
+void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
+void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); }
+void test_verifier_load_acquire(void) { RUN(verifier_load_acquire); }
+void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
+void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
+void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
+void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); }
+void test_verifier_live_stack(void) { RUN(verifier_live_stack); }
+void test_verifier_loops1(void) { RUN(verifier_loops1); }
+void test_verifier_lwt(void) { RUN(verifier_lwt); }
+void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
+void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); }
+void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); }
+void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
+void test_verifier_masking(void) { RUN(verifier_masking); }
+void test_verifier_may_goto_1(void) { RUN(verifier_may_goto_1); }
+void test_verifier_may_goto_2(void) { RUN(verifier_may_goto_2); }
+void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
+void test_verifier_movsx(void) { RUN(verifier_movsx); }
+void test_verifier_mul(void) { RUN(verifier_mul); }
+void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
+void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); }
+void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
+void test_verifier_precision(void) { RUN(verifier_precision); }
+void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
+void test_verifier_private_stack(void) { RUN(verifier_private_stack); }
+void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }
+void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); }
+void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); }
+void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); }
+void test_verifier_regalloc(void) { RUN(verifier_regalloc); }
+void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
+void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); }
+void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); }
+void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
+void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
+void test_verifier_sock(void) { RUN(verifier_sock); }
+void test_verifier_sock_addr(void) { RUN(verifier_sock_addr); }
+void test_verifier_sockmap_mutate(void) { RUN(verifier_sockmap_mutate); }
+void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
+void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
+void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
+void test_verifier_store_release(void) { RUN(verifier_store_release); }
+void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
+void test_verifier_subreg(void) { RUN(verifier_subreg); }
+void test_verifier_tailcall(void) { RUN(verifier_tailcall); }
+void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
+void test_verifier_typedef(void) { RUN(verifier_typedef); }
+void test_verifier_uninit(void) { RUN(verifier_uninit); }
+void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
+void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); }
+void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); }
+void test_verifier_value(void) { RUN(verifier_value); }
+void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); }
+void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
+void test_verifier_var_off(void) { RUN(verifier_var_off); }
+void test_verifier_vfs_accept(void) { RUN(verifier_vfs_accept); }
+void test_verifier_vfs_reject(void) { RUN(verifier_vfs_reject); }
+void test_verifier_xadd(void) { RUN(verifier_xadd); }
+void test_verifier_xdp(void) { RUN(verifier_xdp); }
+void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
+void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
+void test_verifier_lsm(void) { RUN(verifier_lsm); }
+void test_irq(void) { RUN(irq); }
+void test_verifier_mtu(void) { RUN(verifier_mtu); }
+
+static int init_test_val_map(struct bpf_object *obj, char *map_name)
+{
+ struct test_val value = {
+ .index = (6 + 1) * sizeof(int),
+ .foo[6] = 0xabcdef12,
+ };
+ struct bpf_map *map;
+ int err, key = 0;
+
+ map = bpf_object__find_map_by_name(obj, map_name);
+ if (!map) {
+ PRINT_FAIL("Can't find map '%s'\n", map_name);
+ return -EINVAL;
+ }
+
+ err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0);
+ if (err) {
+ PRINT_FAIL("Error while updating map '%s': %d\n", map_name, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int init_array_access_maps(struct bpf_object *obj)
+{
+ return init_test_val_map(obj, "map_array_ro");
+}
+
+void test_verifier_array_access(void)
+{
+ run_tests_aux("verifier_array_access",
+ verifier_array_access__elf_bytes,
+ init_array_access_maps);
+}
+void test_verifier_async_cb_context(void) { RUN(verifier_async_cb_context); }
+
+static int init_value_ptr_arith_maps(struct bpf_object *obj)
+{
+ return init_test_val_map(obj, "map_array_48b");
+}
+
+void test_verifier_value_ptr_arith(void)
+{
+ run_tests_aux("verifier_value_ptr_arith",
+ verifier_value_ptr_arith__elf_bytes,
+ init_value_ptr_arith_maps);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_kfunc_prog_types.c b/tools/testing/selftests/bpf/prog_tests/verifier_kfunc_prog_types.c
new file mode 100644
index 000000000000..3918ecc2ee91
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/verifier_kfunc_prog_types.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+
+#include "verifier_kfunc_prog_types.skel.h"
+
+void test_verifier_kfunc_prog_types(void)
+{
+ RUN_TESTS(verifier_kfunc_prog_types);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c
new file mode 100644
index 000000000000..8337c6bc5b95
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_buf.skel.h"
+
+
+static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
+{
+ if (expect_err) {
+ if (!ASSERT_LT(prog_fd, 0, tag)) {
+ close(prog_fd);
+ return false;
+ }
+ } else /* !expect_err */ {
+ if (!ASSERT_GT(prog_fd, 0, tag))
+ return false;
+ }
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return true;
+}
+
+static struct {
+ /* strategically placed before others to avoid accidental modification by kernel */
+ char filler[1024];
+ char buf[1024];
+ /* strategically placed after buf[] to catch more accidental corruptions */
+ char reference[1024];
+} logs;
+static const struct bpf_insn *insns;
+static size_t insn_cnt;
+
+static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
+{
+ int prog_fd;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog",
+ "GPL", insns, insn_cnt, opts);
+ check_prog_load(prog_fd, expect_load_error, "prog_load");
+
+ return prog_fd;
+}
+
+static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ char *exp_log, prog_name[16], op_name[32];
+ struct test_log_buf *skel;
+ struct bpf_program *prog;
+ size_t fixed_log_sz;
+ __u32 log_true_sz_fixed, log_true_sz_rolling;
+ int i, mode, err, prog_fd, res;
+
+ skel = test_log_buf__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ if (strcmp(bpf_program__name(prog), name) == 0)
+ bpf_program__set_autoload(prog, true);
+ else
+ bpf_program__set_autoload(prog, false);
+ }
+
+ err = test_log_buf__load(skel);
+ if (!expect_load_error && !ASSERT_OK(err, "unexpected_load_failure"))
+ goto cleanup;
+ if (expect_load_error && !ASSERT_ERR(err, "unexpected_load_success"))
+ goto cleanup;
+
+ insns = bpf_program__insns(skel->progs.good_prog);
+ insn_cnt = bpf_program__insn_cnt(skel->progs.good_prog);
+
+ opts.log_buf = logs.reference;
+ opts.log_size = sizeof(logs.reference);
+ opts.log_level = log_level | 8 /* BPF_LOG_FIXED */;
+ load_prog(&opts, expect_load_error);
+
+ fixed_log_sz = strlen(logs.reference) + 1;
+ if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
+ goto cleanup;
+ memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
+
+ /* validate BPF_LOG_FIXED works as verifier log used to work, that is:
+ * we get -ENOSPC and beginning of the full verifier log. This only
+ * works for log_level 2 and log_level 1 + failed program. For log
+ * level 2 we don't reset log at all. For log_level 1 + failed program
+ * we don't get to verification stats output. With log level 1
+ * for successful program final result will be just verifier stats.
+ * But if provided too short log buf, kernel will NULL-out log->ubuf
+ * and will stop emitting further log. This means we'll never see
+ * predictable verifier stats.
+ * Long story short, we do the following -ENOSPC test only for
+ * predictable combinations.
+ */
+ if (log_level >= 2 || expect_load_error) {
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* fixed-length log */
+ opts.log_size = 25;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25",
+ "GPL", insns, insn_cnt, &opts);
+ if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) {
+ if (prog_fd >= 0)
+ close(prog_fd);
+ goto cleanup;
+ }
+ if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25"))
+ goto cleanup;
+ if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, "log_fixed_contents_25"))
+ goto cleanup;
+ }
+
+ /* validate rolling verifier log logic: try all variations of log buf
+ * length to force various truncation scenarios
+ */
+ opts.log_buf = logs.buf;
+
+ /* rotating mode, then fixed mode */
+ for (mode = 1; mode >= 0; mode--) {
+ /* prefill logs.buf with 'A's to detect any write beyond allowed length */
+ memset(logs.filler, 'A', sizeof(logs.filler));
+ logs.filler[sizeof(logs.filler) - 1] = '\0';
+ memset(logs.buf, 'A', sizeof(logs.buf));
+ logs.buf[sizeof(logs.buf) - 1] = '\0';
+
+ for (i = 1; i < fixed_log_sz; i++) {
+ opts.log_size = i;
+ opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */);
+
+ snprintf(prog_name, sizeof(prog_name),
+ "log_%s_%d", mode ? "roll" : "fixed", i);
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, prog_name,
+ "GPL", insns, insn_cnt, &opts);
+
+ snprintf(op_name, sizeof(op_name),
+ "log_%s_prog_load_%d", mode ? "roll" : "fixed", i);
+ if (!ASSERT_EQ(prog_fd, -ENOSPC, op_name)) {
+ if (prog_fd >= 0)
+ close(prog_fd);
+ goto cleanup;
+ }
+
+ snprintf(op_name, sizeof(op_name),
+ "log_%s_strlen_%d", mode ? "roll" : "fixed", i);
+ ASSERT_EQ(strlen(logs.buf), i - 1, op_name);
+
+ if (mode)
+ exp_log = logs.reference + fixed_log_sz - i;
+ else
+ exp_log = logs.reference;
+
+ snprintf(op_name, sizeof(op_name),
+ "log_%s_contents_%d", mode ? "roll" : "fixed", i);
+ if (!ASSERT_STRNEQ(logs.buf, exp_log, i - 1, op_name)) {
+ printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
+ strncmp(logs.buf, exp_log, i - 1),
+ logs.buf, exp_log);
+ goto cleanup;
+ }
+
+ /* check that unused portions of logs.buf is not overwritten */
+ snprintf(op_name, sizeof(op_name),
+ "log_%s_unused_%d", mode ? "roll" : "fixed", i);
+ if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
+ printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
+ strcmp(logs.buf + i, logs.filler + i),
+ logs.buf + i, logs.filler + i);
+ goto cleanup;
+ }
+ }
+ }
+
+ /* (FIXED) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = sizeof(logs.buf);
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed");
+
+ log_true_sz_fixed = opts.log_true_size;
+ ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
+
+ /* (FIXED, NULL) get actual log size */
+ opts.log_buf = NULL;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = 0;
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed_null");
+ ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
+
+ /* (ROLLING) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = sizeof(logs.buf);
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling");
+
+ log_true_sz_rolling = opts.log_true_size;
+ ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
+
+ /* (ROLLING, NULL) get actual log size */
+ opts.log_buf = NULL;
+ opts.log_level = log_level;
+ opts.log_size = 0;
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling_null");
+ ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
+
+ /* (FIXED) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_true_sz_fixed - 1;
+ opts.log_true_size = 0;
+ res = load_prog(&opts, true /* should fail */);
+ ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed");
+
+ /* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_true_sz_fixed;
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed");
+
+ /* (ROLLING) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = log_true_sz_rolling - 1;
+ res = load_prog(&opts, true /* should fail */);
+ ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling");
+
+ /* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = log_level;
+ opts.log_size = log_true_sz_rolling;
+ opts.log_true_size = 0;
+ res = load_prog(&opts, expect_load_error);
+ ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling");
+
+cleanup:
+ test_log_buf__destroy(skel);
+}
+
+static const void *btf_data;
+static u32 btf_data_sz;
+
+static int load_btf(struct bpf_btf_load_opts *opts, bool expect_err)
+{
+ int fd;
+
+ fd = bpf_btf_load(btf_data, btf_data_sz, opts);
+ if (fd >= 0)
+ close(fd);
+ if (expect_err)
+ ASSERT_LT(fd, 0, "btf_load_failure");
+ else /* !expect_err */
+ ASSERT_GT(fd, 0, "btf_load_success");
+ return fd;
+}
+
+static void verif_btf_log_subtest(bool bad_btf)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ struct btf *btf;
+ struct btf_type *t;
+ char *exp_log, op_name[32];
+ size_t fixed_log_sz;
+ __u32 log_true_sz_fixed, log_true_sz_rolling;
+ int i, res;
+
+ /* prepare simple BTF contents */
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "btf_new_empty"))
+ return;
+ res = btf__add_int(btf, "whatever", 4, 0);
+ if (!ASSERT_GT(res, 0, "btf_add_int_id"))
+ goto cleanup;
+ if (bad_btf) {
+ /* btf__add_int() doesn't allow bad value of size, so we'll just
+ * force-cast btf_type pointer and manually override size to invalid
+ * 3 if we need to simulate failure
+ */
+ t = (void *)btf__type_by_id(btf, res);
+ if (!ASSERT_OK_PTR(t, "int_btf_type"))
+ goto cleanup;
+ t->size = 3;
+ }
+
+ btf_data = btf__raw_data(btf, &btf_data_sz);
+ if (!ASSERT_OK_PTR(btf_data, "btf_data"))
+ goto cleanup;
+
+ load_btf(&opts, bad_btf);
+
+ opts.log_buf = logs.reference;
+ opts.log_size = sizeof(logs.reference);
+ opts.log_level = 1 | 8 /* BPF_LOG_FIXED */;
+ load_btf(&opts, bad_btf);
+
+ fixed_log_sz = strlen(logs.reference) + 1;
+ if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
+ goto cleanup;
+ memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
+
+ /* validate BPF_LOG_FIXED truncation works as verifier log used to work */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1 | 8; /* fixed-length log */
+ opts.log_size = 25;
+ res = load_btf(&opts, true);
+ ASSERT_EQ(res, -ENOSPC, "half_log_fd");
+ ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25");
+ ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name);
+
+ /* validate rolling verifier log logic: try all variations of log buf
+ * length to force various truncation scenarios
+ */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1; /* rolling log */
+
+ /* prefill logs.buf with 'A's to detect any write beyond allowed length */
+ memset(logs.filler, 'A', sizeof(logs.filler));
+ logs.filler[sizeof(logs.filler) - 1] = '\0';
+ memset(logs.buf, 'A', sizeof(logs.buf));
+ logs.buf[sizeof(logs.buf) - 1] = '\0';
+
+ for (i = 1; i < fixed_log_sz; i++) {
+ opts.log_size = i;
+
+ snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i);
+ res = load_btf(&opts, true);
+ if (!ASSERT_EQ(res, -ENOSPC, op_name))
+ goto cleanup;
+
+ exp_log = logs.reference + fixed_log_sz - i;
+ snprintf(op_name, sizeof(op_name), "log_roll_contents_%d", i);
+ if (!ASSERT_STREQ(logs.buf, exp_log, op_name)) {
+ printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
+ strcmp(logs.buf, exp_log),
+ logs.buf, exp_log);
+ goto cleanup;
+ }
+
+ /* check that unused portions of logs.buf are not overwritten */
+ snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i);
+ if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
+ printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
+ strcmp(logs.buf + i, logs.filler + i),
+ logs.buf + i, logs.filler + i);
+ goto cleanup;
+ }
+ }
+
+ /* (FIXED) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
+ opts.log_size = sizeof(logs.buf);
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed");
+
+ log_true_sz_fixed = opts.log_true_size;
+ ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
+
+ /* (FIXED, NULL) get actual log size */
+ opts.log_buf = NULL;
+ opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
+ opts.log_size = 0;
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed_null");
+ ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
+
+ /* (ROLLING) get actual log size */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1;
+ opts.log_size = sizeof(logs.buf);
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling");
+
+ log_true_sz_rolling = opts.log_true_size;
+ ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
+
+ /* (ROLLING, NULL) get actual log size */
+ opts.log_buf = NULL;
+ opts.log_level = 1;
+ opts.log_size = 0;
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling_null");
+ ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
+
+ /* (FIXED) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_true_sz_fixed - 1;
+ opts.log_true_size = 0;
+ res = load_btf(&opts, true);
+ ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_fixed");
+
+ /* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
+ opts.log_size = log_true_sz_fixed;
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_fixed");
+
+ /* (ROLLING) expect -ENOSPC for one byte short log */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1;
+ opts.log_size = log_true_sz_rolling - 1;
+ res = load_btf(&opts, true);
+ ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_rolling");
+
+ /* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
+ opts.log_buf = logs.buf;
+ opts.log_level = 1;
+ opts.log_size = log_true_sz_rolling;
+ opts.log_true_size = 0;
+ res = load_btf(&opts, bad_btf);
+ ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_rolling");
+
+cleanup:
+ btf__free(btf);
+}
+
+void test_verifier_log(void)
+{
+ if (test__start_subtest("good_prog-level1"))
+ verif_log_subtest("good_prog", false, 1);
+ if (test__start_subtest("good_prog-level2"))
+ verif_log_subtest("good_prog", false, 2);
+ if (test__start_subtest("bad_prog-level1"))
+ verif_log_subtest("bad_prog", true, 1);
+ if (test__start_subtest("bad_prog-level2"))
+ verif_log_subtest("bad_prog", true, 2);
+ if (test__start_subtest("bad_btf"))
+ verif_btf_log_subtest(true /* bad btf */);
+ if (test__start_subtest("good_btf"))
+ verif_btf_log_subtest(false /* !bad btf */);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
new file mode 100644
index 000000000000..4d69d9d55e17
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <endian.h>
+#include <limits.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <linux/keyctl.h>
+#include <sys/xattr.h>
+#include <linux/fsverity.h>
+#include <test_progs.h>
+
+#include "test_verify_pkcs7_sig.skel.h"
+#include "test_sig_in_xattr.skel.h"
+
+#define MAX_DATA_SIZE (1024 * 1024)
+#define MAX_SIG_SIZE 1024
+
+#define VERIFY_USE_SECONDARY_KEYRING (1UL)
+#define VERIFY_USE_PLATFORM_KEYRING (2UL)
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+#define MODULE_SIG_STRING "~Module signature appended~\n"
+
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ __u8 algo; /* Public-key crypto algorithm [0] */
+ __u8 hash; /* Digest algorithm [0] */
+ __u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
+ __u8 signer_len; /* Length of signer's name [0] */
+ __u8 key_id_len; /* Length of key identifier [0] */
+ __u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
+
+struct data {
+ __u8 data[MAX_DATA_SIZE];
+ __u32 data_len;
+ __u8 sig[MAX_SIG_SIZE];
+ __u32 sig_len;
+};
+
+static bool kfunc_not_supported;
+
+static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
+ va_list args)
+{
+ if (level == LIBBPF_WARN)
+ vprintf(fmt, args);
+
+ if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n"))
+ return 0;
+
+ if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature"))
+ return 0;
+
+ kfunc_not_supported = true;
+ return 0;
+}
+
+static int _run_setup_process(const char *setup_dir, const char *cmd)
+{
+ int child_pid, child_status;
+
+ child_pid = fork();
+ if (child_pid == 0) {
+ execlp("./verify_sig_setup.sh", "./verify_sig_setup.sh", cmd,
+ setup_dir, NULL);
+ exit(errno);
+
+ } else if (child_pid > 0) {
+ waitpid(child_pid, &child_status, 0);
+ return WEXITSTATUS(child_status);
+ }
+
+ return -EINVAL;
+}
+
+static int populate_data_item_str(const char *tmp_dir, struct data *data_item)
+{
+ struct stat st;
+ char data_template[] = "/tmp/dataXXXXXX";
+ char path[PATH_MAX];
+ int ret, fd, child_status, child_pid;
+
+ data_item->data_len = 4;
+ memcpy(data_item->data, "test", data_item->data_len);
+
+ fd = mkstemp(data_template);
+ if (fd == -1)
+ return -errno;
+
+ ret = write(fd, data_item->data, data_item->data_len);
+
+ close(fd);
+
+ if (ret != data_item->data_len) {
+ ret = -EIO;
+ goto out;
+ }
+
+ child_pid = fork();
+
+ if (child_pid == -1) {
+ ret = -errno;
+ goto out;
+ }
+
+ if (child_pid == 0) {
+ snprintf(path, sizeof(path), "%s/signing_key.pem", tmp_dir);
+
+ return execlp("./sign-file", "./sign-file", "-d", "sha256",
+ path, path, data_template, NULL);
+ }
+
+ waitpid(child_pid, &child_status, 0);
+
+ ret = WEXITSTATUS(child_status);
+ if (ret)
+ goto out;
+
+ snprintf(path, sizeof(path), "%s.p7s", data_template);
+
+ ret = stat(path, &st);
+ if (ret == -1) {
+ ret = -errno;
+ goto out;
+ }
+
+ if (st.st_size > sizeof(data_item->sig)) {
+ ret = -EINVAL;
+ goto out_sig;
+ }
+
+ data_item->sig_len = st.st_size;
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1) {
+ ret = -errno;
+ goto out_sig;
+ }
+
+ ret = read(fd, data_item->sig, data_item->sig_len);
+
+ close(fd);
+
+ if (ret != data_item->sig_len) {
+ ret = -EIO;
+ goto out_sig;
+ }
+
+ ret = 0;
+out_sig:
+ unlink(path);
+out:
+ unlink(data_template);
+ return ret;
+}
+
+static int populate_data_item_mod(struct data *data_item)
+{
+ char mod_path[PATH_MAX], *mod_path_ptr;
+ struct stat st;
+ void *mod;
+ FILE *fp;
+ struct module_signature ms;
+ int ret, fd, modlen, marker_len, sig_len;
+
+ data_item->data_len = 0;
+
+ if (stat("/lib/modules", &st) == -1)
+ return 0;
+
+ /* Requires CONFIG_TCP_CONG_BIC=m. */
+ fp = popen("find /lib/modules/$(uname -r) -name tcp_bic.ko", "r");
+ if (!fp)
+ return 0;
+
+ mod_path_ptr = fgets(mod_path, sizeof(mod_path), fp);
+ pclose(fp);
+
+ if (!mod_path_ptr)
+ return 0;
+
+ mod_path_ptr = strchr(mod_path, '\n');
+ if (!mod_path_ptr)
+ return 0;
+
+ *mod_path_ptr = '\0';
+
+ if (stat(mod_path, &st) == -1)
+ return 0;
+
+ modlen = st.st_size;
+ marker_len = sizeof(MODULE_SIG_STRING) - 1;
+
+ fd = open(mod_path, O_RDONLY);
+ if (fd == -1)
+ return -errno;
+
+ mod = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+ close(fd);
+
+ if (mod == MAP_FAILED)
+ return -errno;
+
+ if (strncmp(mod + modlen - marker_len, MODULE_SIG_STRING, marker_len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ modlen -= marker_len;
+
+ memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
+
+ sig_len = __be32_to_cpu(ms.sig_len);
+ modlen -= sig_len + sizeof(ms);
+
+ if (modlen > sizeof(data_item->data)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy(data_item->data, mod, modlen);
+ data_item->data_len = modlen;
+
+ if (sig_len > sizeof(data_item->sig)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy(data_item->sig, mod + modlen, sig_len);
+ data_item->sig_len = sig_len;
+ ret = 0;
+out:
+ munmap(mod, st.st_size);
+ return ret;
+}
+
+static void test_verify_pkcs7_sig_from_map(void)
+{
+ libbpf_print_fn_t old_print_cb;
+ char tmp_dir_template[] = "/tmp/verify_sigXXXXXX";
+ char *tmp_dir;
+ struct test_verify_pkcs7_sig *skel = NULL;
+ struct bpf_map *map;
+ struct data data = {};
+ int ret, zero = 0;
+
+ /* Trigger creation of session keyring. */
+ syscall(__NR_request_key, "keyring", "_uid.0", NULL,
+ KEY_SPEC_SESSION_KEYRING);
+
+ tmp_dir = mkdtemp(tmp_dir_template);
+ if (!ASSERT_OK_PTR(tmp_dir, "mkdtemp"))
+ return;
+
+ ret = _run_setup_process(tmp_dir, "setup");
+ if (!ASSERT_OK(ret, "_run_setup_process"))
+ goto close_prog;
+
+ skel = test_verify_pkcs7_sig__open();
+ if (!ASSERT_OK_PTR(skel, "test_verify_pkcs7_sig__open"))
+ goto close_prog;
+
+ old_print_cb = libbpf_set_print(libbpf_print_cb);
+ ret = test_verify_pkcs7_sig__load(skel);
+ libbpf_set_print(old_print_cb);
+
+ if (ret < 0 && kfunc_not_supported) {
+ printf(
+ "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
+ __func__);
+ test__skip();
+ goto close_prog;
+ }
+
+ if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__load"))
+ goto close_prog;
+
+ ret = test_verify_pkcs7_sig__attach(skel);
+ if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__attach"))
+ goto close_prog;
+
+ map = bpf_object__find_map_by_name(skel->obj, "data_input");
+ if (!ASSERT_OK_PTR(map, "data_input not found"))
+ goto close_prog;
+
+ skel->bss->monitored_pid = getpid();
+
+ /* Test without data and signature. */
+ skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING;
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ /* Test successful signature verification with session keyring. */
+ ret = populate_data_item_str(tmp_dir, &data);
+ if (!ASSERT_OK(ret, "populate_data_item_str"))
+ goto close_prog;
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ /* Test successful signature verification with testing keyring. */
+ skel->bss->user_keyring_serial = syscall(__NR_request_key, "keyring",
+ "ebpf_testing_keyring", NULL,
+ KEY_SPEC_SESSION_KEYRING);
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ /*
+ * Ensure key_task_permission() is called and rejects the keyring
+ * (no Search permission).
+ */
+ syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial,
+ 0x37373737);
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial,
+ 0x3f3f3f3f);
+
+ /*
+ * Ensure key_validate() is called and rejects the keyring (key expired)
+ */
+ syscall(__NR_keyctl, KEYCTL_SET_TIMEOUT,
+ skel->bss->user_keyring_serial, 1);
+ sleep(1);
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING;
+
+ /* Test with corrupted data (signature verification should fail). */
+ data.data[0] = 'a';
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY);
+ if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ ret = populate_data_item_mod(&data);
+ if (!ASSERT_OK(ret, "populate_data_item_mod"))
+ goto close_prog;
+
+ /* Test signature verification with system keyrings. */
+ if (data.data_len) {
+ skel->bss->user_keyring_serial = 0;
+ skel->bss->system_keyring_id = 0;
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
+ BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ skel->bss->system_keyring_id = VERIFY_USE_SECONDARY_KEYRING;
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
+ BPF_ANY);
+ if (!ASSERT_OK(ret, "bpf_map_update_elem data_input"))
+ goto close_prog;
+
+ skel->bss->system_keyring_id = VERIFY_USE_PLATFORM_KEYRING;
+
+ ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data,
+ BPF_ANY);
+ ASSERT_LT(ret, 0, "bpf_map_update_elem data_input");
+ }
+
+close_prog:
+ _run_setup_process(tmp_dir, "cleanup");
+
+ if (!skel)
+ return;
+
+ skel->bss->monitored_pid = 0;
+ test_verify_pkcs7_sig__destroy(skel);
+}
+
+static int get_signature_size(const char *sig_path)
+{
+ struct stat st;
+
+ if (stat(sig_path, &st) == -1)
+ return -1;
+
+ return st.st_size;
+}
+
+static int add_signature_to_xattr(const char *data_path, const char *sig_path)
+{
+ char sig[MAX_SIG_SIZE] = {0};
+ int fd, size, ret;
+
+ if (sig_path) {
+ fd = open(sig_path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ size = read(fd, sig, MAX_SIG_SIZE);
+ close(fd);
+ if (size <= 0)
+ return -1;
+ } else {
+ /* no sig_path, just write 32 bytes of zeros */
+ size = 32;
+ }
+ ret = setxattr(data_path, "user.sig", sig, size, 0);
+ if (!ASSERT_OK(ret, "setxattr"))
+ return -1;
+
+ return 0;
+}
+
+static int test_open_file(struct test_sig_in_xattr *skel, char *data_path,
+ pid_t pid, bool should_success, char *name)
+{
+ int ret;
+
+ skel->bss->monitored_pid = pid;
+ ret = open(data_path, O_RDONLY);
+ close(ret);
+ skel->bss->monitored_pid = 0;
+
+ if (should_success) {
+ if (!ASSERT_GE(ret, 0, name))
+ return -1;
+ } else {
+ if (!ASSERT_LT(ret, 0, name))
+ return -1;
+ }
+ return 0;
+}
+
+static void test_pkcs7_sig_fsverity(void)
+{
+ char data_path[PATH_MAX];
+ char sig_path[PATH_MAX];
+ char tmp_dir_template[] = "/tmp/verify_sigXXXXXX";
+ char *tmp_dir;
+ struct test_sig_in_xattr *skel = NULL;
+ pid_t pid;
+ int ret;
+
+ tmp_dir = mkdtemp(tmp_dir_template);
+ if (!ASSERT_OK_PTR(tmp_dir, "mkdtemp"))
+ return;
+
+ snprintf(data_path, PATH_MAX, "%s/data-file", tmp_dir);
+ snprintf(sig_path, PATH_MAX, "%s/sig-file", tmp_dir);
+
+ ret = _run_setup_process(tmp_dir, "setup");
+ if (!ASSERT_OK(ret, "_run_setup_process"))
+ goto out;
+
+ ret = _run_setup_process(tmp_dir, "fsverity-create-sign");
+
+ if (ret) {
+ printf("%s: SKIP: fsverity [sign|enable] doesn't work.\n"
+ "To run this test, try enable CONFIG_FS_VERITY and enable FSVerity for the filesystem.\n",
+ __func__);
+ test__skip();
+ goto out;
+ }
+
+ skel = test_sig_in_xattr__open();
+ if (!ASSERT_OK_PTR(skel, "test_sig_in_xattr__open"))
+ goto out;
+ ret = get_signature_size(sig_path);
+ if (!ASSERT_GT(ret, 0, "get_signature_size"))
+ goto out;
+ skel->bss->sig_size = ret;
+ skel->bss->user_keyring_serial = syscall(__NR_request_key, "keyring",
+ "ebpf_testing_keyring", NULL,
+ KEY_SPEC_SESSION_KEYRING);
+ memcpy(skel->bss->digest, "FSVerity", 8);
+
+ ret = test_sig_in_xattr__load(skel);
+ if (!ASSERT_OK(ret, "test_sig_in_xattr__load"))
+ goto out;
+
+ ret = test_sig_in_xattr__attach(skel);
+ if (!ASSERT_OK(ret, "test_sig_in_xattr__attach"))
+ goto out;
+
+ pid = getpid();
+
+ /* Case 1: fsverity is not enabled, open should succeed */
+ if (test_open_file(skel, data_path, pid, true, "open_1"))
+ goto out;
+
+ /* Case 2: fsverity is enabled, xattr is missing, open should
+ * fail
+ */
+ ret = _run_setup_process(tmp_dir, "fsverity-enable");
+ if (!ASSERT_OK(ret, "fsverity-enable"))
+ goto out;
+ if (test_open_file(skel, data_path, pid, false, "open_2"))
+ goto out;
+
+ /* Case 3: fsverity is enabled, xattr has valid signature, open
+ * should succeed
+ */
+ ret = add_signature_to_xattr(data_path, sig_path);
+ if (!ASSERT_OK(ret, "add_signature_to_xattr_1"))
+ goto out;
+
+ if (test_open_file(skel, data_path, pid, true, "open_3"))
+ goto out;
+
+ /* Case 4: fsverity is enabled, xattr has invalid signature, open
+ * should fail
+ */
+ ret = add_signature_to_xattr(data_path, NULL);
+ if (!ASSERT_OK(ret, "add_signature_to_xattr_2"))
+ goto out;
+ test_open_file(skel, data_path, pid, false, "open_4");
+
+out:
+ _run_setup_process(tmp_dir, "cleanup");
+ if (!skel)
+ return;
+
+ skel->bss->monitored_pid = 0;
+ test_sig_in_xattr__destroy(skel);
+}
+
+void test_verify_pkcs7_sig(void)
+{
+ if (test__start_subtest("pkcs7_sig_from_map"))
+ test_verify_pkcs7_sig_from_map();
+ if (test__start_subtest("pkcs7_sig_fsverity"))
+ test_pkcs7_sig_fsverity();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
index 72310cfc6474..6fb2217d940b 100644
--- a/tools/testing/selftests/bpf/prog_tests/vmlinux.c
+++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
@@ -16,27 +16,27 @@ static void nsleep()
void test_vmlinux(void)
{
- int duration = 0, err;
+ int err;
struct test_vmlinux* skel;
struct test_vmlinux__bss *bss;
skel = test_vmlinux__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ if (!ASSERT_OK_PTR(skel, "test_vmlinux__open_and_load"))
return;
bss = skel->bss;
err = test_vmlinux__attach(skel);
- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "test_vmlinux__attach"))
goto cleanup;
/* trigger everything */
nsleep();
- CHECK(!bss->tp_called, "tp", "not called\n");
- CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
- CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
- CHECK(!bss->kprobe_called, "kprobe", "not called\n");
- CHECK(!bss->fentry_called, "fentry", "not called\n");
+ ASSERT_TRUE(bss->tp_called, "tp");
+ ASSERT_TRUE(bss->raw_tp_called, "raw_tp");
+ ASSERT_TRUE(bss->tp_btf_called, "tp_btf");
+ ASSERT_TRUE(bss->kprobe_called, "kprobe");
+ ASSERT_TRUE(bss->fentry_called, "fentry");
cleanup:
test_vmlinux__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c
new file mode 100644
index 000000000000..2a5e207edad6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Topology:
+ * ---------
+ * NS0 namespace | NS1 namespace
+ * |
+ * +--------------+ | +--------------+
+ * | veth01 |----------| veth10 |
+ * | 172.16.1.100 | | | 172.16.1.200 |
+ * | bpf | | +--------------+
+ * +--------------+ |
+ * server(UDP/TCP) |
+ * +-------------------+ |
+ * | vrf1 | |
+ * | +--------------+ | | +--------------+
+ * | | veth02 |----------| veth20 |
+ * | | 172.16.2.100 | | | | 172.16.2.200 |
+ * | | bpf | | | +--------------+
+ * | +--------------+ | |
+ * | server(UDP/TCP) | |
+ * +-------------------+ |
+ *
+ * Test flow
+ * -----------
+ * The tests verifies that socket lookup via TC is VRF aware:
+ * 1) Creates two veth pairs between NS0 and NS1:
+ * a) veth01 <-> veth10 outside the VRF
+ * b) veth02 <-> veth20 in the VRF
+ * 2) Attaches to veth01 and veth02 a program that calls:
+ * a) bpf_skc_lookup_tcp() with TCP and tcp_skc is true
+ * b) bpf_sk_lookup_tcp() with TCP and tcp_skc is false
+ * c) bpf_sk_lookup_udp() with UDP
+ * The program stores the lookup result in bss->lookup_status.
+ * 3) Creates a socket TCP/UDP server in/outside the VRF.
+ * 4) The test expects lookup_status to be:
+ * a) 0 from device in VRF to server outside VRF
+ * b) 0 from device outside VRF to server in VRF
+ * c) 1 from device in VRF to server in VRF
+ * d) 1 from device outside VRF to server outside VRF
+ */
+
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "vrf_socket_lookup.skel.h"
+
+#define NS0 "vrf_socket_lookup_0"
+#define NS1 "vrf_socket_lookup_1"
+
+#define IP4_ADDR_VETH01 "172.16.1.100"
+#define IP4_ADDR_VETH10 "172.16.1.200"
+#define IP4_ADDR_VETH02 "172.16.2.100"
+#define IP4_ADDR_VETH20 "172.16.2.200"
+
+#define NON_VRF_PORT 5000
+#define IN_VRF_PORT 5001
+
+#define TIMEOUT_MS 3000
+
+static int make_socket(int sotype, const char *ip, int port,
+ struct sockaddr_storage *addr)
+{
+ int err, fd;
+
+ err = make_sockaddr(AF_INET, ip, port, addr, NULL);
+ if (!ASSERT_OK(err, "make_address"))
+ return -1;
+
+ fd = socket(AF_INET, sotype, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ return -1;
+
+ if (!ASSERT_OK(settimeo(fd, TIMEOUT_MS), "settimeo"))
+ goto fail;
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+static int make_server(int sotype, const char *ip, int port, const char *ifname)
+{
+ int err, fd = -1;
+
+ fd = start_server(AF_INET, sotype, ip, port, TIMEOUT_MS);
+ if (!ASSERT_GE(fd, 0, "start_server"))
+ return -1;
+
+ if (ifname) {
+ err = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
+ ifname, strlen(ifname) + 1);
+ if (!ASSERT_OK(err, "setsockopt(SO_BINDTODEVICE)"))
+ goto fail;
+ }
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+static int attach_progs(char *ifname, int tc_prog_fd, int xdp_prog_fd)
+{
+ LIBBPF_OPTS(bpf_tc_hook, hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1,
+ .prog_fd = tc_prog_fd);
+ int ret, ifindex;
+
+ ifindex = if_nametoindex(ifname);
+ if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex"))
+ return -1;
+ hook.ifindex = ifindex;
+
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ return ret;
+
+ ret = bpf_tc_attach(&hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ ret = bpf_xdp_attach(ifindex, xdp_prog_fd, 0, NULL);
+ if (!ASSERT_OK(ret, "bpf_xdp_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cleanup(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete "
+ NS0);
+ SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete "
+ NS1);
+}
+
+static int setup(struct vrf_socket_lookup *skel)
+{
+ int tc_prog_fd, xdp_prog_fd, ret = 0;
+ struct nstoken *nstoken = NULL;
+
+ SYS(fail, "ip netns add " NS0);
+ SYS(fail, "ip netns add " NS1);
+
+ /* NS0 <-> NS1 [veth01 <-> veth10] */
+ SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10"
+ " netns " NS1);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01");
+ SYS(fail, "ip -net " NS0 " link set dev veth01 up");
+ SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10");
+ SYS(fail, "ip -net " NS1 " link set dev veth10 up");
+
+ /* NS0 <-> NS1 [veth02 <-> veth20] */
+ SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20"
+ " netns " NS1);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02");
+ SYS(fail, "ip -net " NS0 " link set dev veth02 up");
+ SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH20 "/24 dev veth20");
+ SYS(fail, "ip -net " NS1 " link set dev veth20 up");
+
+ /* veth02 -> vrf1 */
+ SYS(fail, "ip -net " NS0 " link add vrf1 type vrf table 11");
+ SYS(fail, "ip -net " NS0 " route add vrf vrf1 unreachable default"
+ " metric 4278198272");
+ SYS(fail, "ip -net " NS0 " link set vrf1 alias vrf");
+ SYS(fail, "ip -net " NS0 " link set vrf1 up");
+ SYS(fail, "ip -net " NS0 " link set veth02 master vrf1");
+
+ /* Attach TC and XDP progs to veth devices in NS0 */
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
+ goto fail;
+ tc_prog_fd = bpf_program__fd(skel->progs.tc_socket_lookup);
+ if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__tc_fd"))
+ goto fail;
+ xdp_prog_fd = bpf_program__fd(skel->progs.xdp_socket_lookup);
+ if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__xdp_fd"))
+ goto fail;
+
+ if (attach_progs("veth01", tc_prog_fd, xdp_prog_fd))
+ goto fail;
+
+ if (attach_progs("veth02", tc_prog_fd, xdp_prog_fd))
+ goto fail;
+
+ goto close;
+fail:
+ ret = -1;
+close:
+ if (nstoken)
+ close_netns(nstoken);
+ return ret;
+}
+
+static int test_lookup(struct vrf_socket_lookup *skel, int sotype,
+ const char *ip, int port, bool test_xdp, bool tcp_skc,
+ int lookup_status_exp)
+{
+ static const char msg[] = "Hello Server";
+ struct sockaddr_storage addr = {};
+ int fd, ret = 0;
+
+ fd = make_socket(sotype, ip, port, &addr);
+ if (fd < 0)
+ return -1;
+
+ skel->bss->test_xdp = test_xdp;
+ skel->bss->tcp_skc = tcp_skc;
+ skel->bss->lookup_status = -1;
+
+ if (sotype == SOCK_STREAM)
+ connect(fd, (void *)&addr, sizeof(struct sockaddr_in));
+ else
+ sendto(fd, msg, sizeof(msg), 0, (void *)&addr,
+ sizeof(struct sockaddr_in));
+
+ if (!ASSERT_EQ(skel->bss->lookup_status, lookup_status_exp,
+ "lookup_status"))
+ goto fail;
+
+ goto close;
+
+fail:
+ ret = -1;
+close:
+ close(fd);
+ return ret;
+}
+
+static void _test_vrf_socket_lookup(struct vrf_socket_lookup *skel, int sotype,
+ bool test_xdp, bool tcp_skc)
+{
+ int in_vrf_server = -1, non_vrf_server = -1;
+ struct nstoken *nstoken = NULL;
+
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
+ goto done;
+
+ /* Open sockets in and outside VRF */
+ non_vrf_server = make_server(sotype, "0.0.0.0", NON_VRF_PORT, NULL);
+ if (!ASSERT_GE(non_vrf_server, 0, "make_server__outside_vrf_fd"))
+ goto done;
+
+ in_vrf_server = make_server(sotype, "0.0.0.0", IN_VRF_PORT, "veth02");
+ if (!ASSERT_GE(in_vrf_server, 0, "make_server__in_vrf_fd"))
+ goto done;
+
+ /* Perform test from NS1 */
+ close_netns(nstoken);
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS1))
+ goto done;
+
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, NON_VRF_PORT,
+ test_xdp, tcp_skc, 0), "in_to_out"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, IN_VRF_PORT,
+ test_xdp, tcp_skc, 1), "in_to_in"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, NON_VRF_PORT,
+ test_xdp, tcp_skc, 1), "out_to_out"))
+ goto done;
+ if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, IN_VRF_PORT,
+ test_xdp, tcp_skc, 0), "out_to_in"))
+ goto done;
+
+done:
+ if (non_vrf_server >= 0)
+ close(non_vrf_server);
+ if (in_vrf_server >= 0)
+ close(in_vrf_server);
+ if (nstoken)
+ close_netns(nstoken);
+}
+
+void test_vrf_socket_lookup(void)
+{
+ struct vrf_socket_lookup *skel;
+
+ cleanup();
+
+ skel = vrf_socket_lookup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "vrf_socket_lookup__open_and_load"))
+ return;
+
+ if (!ASSERT_OK(setup(skel), "setup"))
+ goto done;
+
+ if (test__start_subtest("tc_socket_lookup_tcp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("tc_socket_lookup_tcp_skc"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("tc_socket_lookup_udp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false);
+ if (test__start_subtest("xdp_socket_lookup_tcp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+ if (test__start_subtest("xdp_socket_lookup_tcp_skc"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+ if (test__start_subtest("xdp_socket_lookup_udp"))
+ _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false);
+
+done:
+ vrf_socket_lookup__destroy(skel);
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/wq.c b/tools/testing/selftests/bpf/prog_tests/wq.c
new file mode 100644
index 000000000000..15c67d23128b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/wq.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Benjamin Tissoires */
+#include <test_progs.h>
+#include "wq.skel.h"
+#include "wq_failures.skel.h"
+
+void serial_test_wq(void)
+{
+ struct wq *wq_skel = NULL;
+ int err, prog_fd;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ RUN_TESTS(wq);
+
+ /* re-run the success test to check if the timer was actually executed */
+
+ wq_skel = wq__open_and_load();
+ if (!ASSERT_OK_PTR(wq_skel, "wq_skel_load"))
+ return;
+
+ err = wq__attach(wq_skel);
+ if (!ASSERT_OK(err, "wq_attach"))
+ return;
+
+ prog_fd = bpf_program__fd(wq_skel->progs.test_syscall_array_sleepable);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ usleep(50); /* 10 usecs should be enough, but give it extra */
+
+ ASSERT_EQ(wq_skel->bss->ok_sleepable, (1 << 1), "ok_sleepable");
+ wq__destroy(wq_skel);
+}
+
+void serial_test_failures_wq(void)
+{
+ RUN_TESTS(wq_failures);
+}
+
+static void test_failure_map_no_btf(void)
+{
+ struct wq *skel = NULL;
+ char log[8192];
+ const struct bpf_insn *insns;
+ size_t insn_cnt;
+ int ret, err, map_fd;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_size = sizeof(log), .log_buf = log,
+ .log_level = 2);
+
+ skel = wq__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = bpf_object__prepare(skel->obj);
+ if (!ASSERT_OK(err, "skel__prepare"))
+ goto out;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "map_no_btf", sizeof(__u32), sizeof(__u64), 100,
+ NULL);
+ if (!ASSERT_GT(map_fd, -1, "map create"))
+ goto out;
+
+ err = bpf_map__reuse_fd(skel->maps.array, map_fd);
+ if (!ASSERT_OK(err, "map reuse fd")) {
+ close(map_fd);
+ goto out;
+ }
+
+ insns = bpf_program__insns(skel->progs.test_map_no_btf);
+ if (!ASSERT_OK_PTR(insns, "insns ptr"))
+ goto out;
+
+ insn_cnt = bpf_program__insn_cnt(skel->progs.test_map_no_btf);
+ if (!ASSERT_GT(insn_cnt, 0u, "insn cnt"))
+ goto out;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ if (!ASSERT_LT(ret, 0, "prog load failed")) {
+ if (ret > 0)
+ close(ret);
+ goto out;
+ }
+
+ ASSERT_HAS_SUBSTR(log, "map 'map_no_btf' has to have BTF in order to use bpf_wq",
+ "log complains no map BTF");
+out:
+ wq__destroy(skel);
+}
+
+void test_wq_custom(void)
+{
+ if (test__start_subtest("test_failure_map_no_btf"))
+ test_failure_map_no_btf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c
index 48921ff74850..947863a1d536 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp.c
@@ -8,15 +8,21 @@ void test_xdp(void)
struct vip key6 = {.protocol = 6, .family = AF_INET6};
struct iptnl_info value4 = {.family = AF_INET};
struct iptnl_info value6 = {.family = AF_INET6};
- const char *file = "./test_xdp.o";
+ const char *file = "./test_xdp.bpf.o";
struct bpf_object *obj;
char buf[128];
- struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
- __u32 duration, retval, size;
+ struct ipv6hdr iph6;
+ struct iphdr iph;
int err, prog_fd, map_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -26,20 +32,23 @@ void test_xdp(void)
bpf_map_update_elem(map_fd, &key4, &value4, 0);
bpf_map_update_elem(map_fd, &key6, &value6, 0);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph));
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 74, "ipv4 test_run data_size_out");
+ ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 test_run iph.protocol");
- CHECK(err || retval != XDP_TX || size != 74 ||
- iph->protocol != IPPROTO_IPIP, "ipv4",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = sizeof(buf);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 114 ||
- iph6->nexthdr != IPPROTO_IPV6, "ipv6",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6));
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 114, "ipv6 test_run data_size_out");
+ ASSERT_EQ(iph6.nexthdr, IPPROTO_IPV6, "ipv6 test_run iph6.nexthdr");
out:
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c
new file mode 100644
index 000000000000..fce203640f8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+static void test_xdp_update_frags(void)
+{
+ const char *file = "./test_xdp_update_frags.bpf.o";
+ int err, prog_fd, max_skb_frags, buf_size, num;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 *offset;
+ __u8 *buf;
+ FILE *f;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ obj = bpf_object__open(file);
+ if (libbpf_get_error(obj))
+ return;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+ return;
+
+ prog_fd = bpf_program__fd(prog);
+
+ buf = malloc(128);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 128b"))
+ goto out;
+
+ memset(buf, 0, 128);
+ offset = (__u32 *)buf;
+ *offset = 16;
+ buf[*offset] = 0xaa; /* marker at offset 16 (head) */
+ buf[*offset + 15] = 0xaa; /* marker at offset 31 (head) */
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 128;
+ topts.data_size_out = 128;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ /* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */
+ ASSERT_OK(err, "xdp_update_frag");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
+ ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]");
+ ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]");
+
+ free(buf);
+
+ buf = malloc(9000);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb"))
+ goto out;
+
+ memset(buf, 0, 9000);
+ offset = (__u32 *)buf;
+ *offset = 5000;
+ buf[*offset] = 0xaa; /* marker at offset 5000 (frag0) */
+ buf[*offset + 15] = 0xaa; /* marker at offset 5015 (frag0) */
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 9000;
+ topts.data_size_out = 9000;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ /* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */
+ ASSERT_OK(err, "xdp_update_frag");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
+ ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]");
+ ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]");
+
+ memset(buf, 0, 9000);
+ offset = (__u32 *)buf;
+ *offset = 3510;
+ buf[*offset] = 0xaa; /* marker at offset 3510 (head) */
+ buf[*offset + 15] = 0xaa; /* marker at offset 3525 (frag0) */
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ /* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */
+ ASSERT_OK(err, "xdp_update_frag");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
+ ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]");
+ ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]");
+
+ memset(buf, 0, 9000);
+ offset = (__u32 *)buf;
+ *offset = 7606;
+ buf[*offset] = 0xaa; /* marker at offset 7606 (frag0) */
+ buf[*offset + 15] = 0xaa; /* marker at offset 7621 (frag1) */
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ /* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */
+ ASSERT_OK(err, "xdp_update_frag");
+ ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval");
+ ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]");
+ ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]");
+
+ free(buf);
+
+ /* test_xdp_update_frags: unsupported buffer size */
+ f = fopen("/proc/sys/net/core/max_skb_frags", "r");
+ if (!ASSERT_OK_PTR(f, "max_skb_frag file pointer"))
+ goto out;
+
+ num = fscanf(f, "%d", &max_skb_frags);
+ fclose(f);
+
+ if (!ASSERT_EQ(num, 1, "max_skb_frags read failed"))
+ goto out;
+
+ /* xdp_buff linear area size is always set to 4096 in the
+ * bpf_prog_test_run_xdp routine.
+ */
+ buf_size = 4096 + (max_skb_frags + 1) * sysconf(_SC_PAGE_SIZE);
+ buf = malloc(buf_size);
+ if (!ASSERT_OK_PTR(buf, "alloc buf"))
+ goto out;
+
+ memset(buf, 0, buf_size);
+ offset = (__u32 *)buf;
+ *offset = 16;
+ buf[*offset] = 0xaa;
+ buf[*offset + 15] = 0xaa;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = buf_size;
+ topts.data_size_out = buf_size;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_EQ(err, -ENOMEM,
+ "unsupported buf size, possible non-default /proc/sys/net/core/max_skb_flags?");
+ free(buf);
+out:
+ bpf_object__close(obj);
+}
+
+void test_xdp_adjust_frags(void)
+{
+ if (test__start_subtest("xdp_adjust_frags"))
+ test_xdp_update_frags();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index d5c98f2cb12f..43264347e7d7 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -2,81 +2,107 @@
#include <test_progs.h>
#include <network_helpers.h>
-void test_xdp_adjust_tail_shrink(void)
+static void test_xdp_adjust_tail_shrink(void)
{
- const char *file = "./test_xdp_adjust_tail_shrink.o";
- __u32 duration, retval, size, expect_sz;
+ const char *file = "./test_xdp_adjust_tail_shrink.bpf.o";
+ __u32 expect_sz;
struct bpf_object *obj;
int err, prog_fd;
char buf[128];
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (CHECK_FAIL(err))
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
return;
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_DROP,
- "ipv4", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv4");
+ ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");
expect_sz = sizeof(pkt_v6) - 20; /* Test shrink with 20 bytes */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != expect_sz,
- "ipv6", "err %d errno %d retval %d size %d expect-size %d\n",
- err, errno, retval, size, expect_sz);
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = sizeof(buf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6");
+ ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
+ ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size");
+
bpf_object__close(obj);
}
-void test_xdp_adjust_tail_grow(void)
+static void test_xdp_adjust_tail_grow(bool is_64k_pagesize)
{
- const char *file = "./test_xdp_adjust_tail_grow.o";
+ const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
struct bpf_object *obj;
- char buf[4096]; /* avoid segfault: large buf to hold grow results */
- __u32 duration, retval, size, expect_sz;
+ char buf[8192]; /* avoid segfault: large buf to hold grow results */
+ __u32 expect_sz;
int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = 1,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (CHECK_FAIL(err))
+ /* topts.data_size_in as a special signal to bpf prog */
+ if (is_64k_pagesize)
+ topts.data_size_in = sizeof(pkt_v4) - 1;
+ else
+ topts.data_size_in = sizeof(pkt_v4);
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_DROP,
- "ipv4", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv4");
+ ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval");
expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6) /* 74 */,
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != expect_sz,
- "ipv6", "err %d errno %d retval %d size %d expect-size %d\n",
- err, errno, retval, size, expect_sz);
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_size_out = sizeof(buf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "ipv6");
+ ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
+ ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size");
bpf_object__close(obj);
}
-void test_xdp_adjust_tail_grow2(void)
+static void test_xdp_adjust_tail_grow2(void)
{
- const char *file = "./test_xdp_adjust_tail_grow.o";
+ const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
char buf[4096]; /* avoid segfault: large buf to hold grow results */
- int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/;
struct bpf_object *obj;
int err, cnt, i;
- int max_grow;
+ int max_grow, prog_fd;
+ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
+#if defined(__s390x__)
+ int tailroom = 512;
+#elif defined(__powerpc__)
+ int tailroom = 384;
+#else
+ int tailroom = 320;
+#endif
- struct bpf_prog_test_run_attr tattr = {
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
.repeat = 1,
.data_in = &buf,
.data_out = &buf,
.data_size_in = 0, /* Per test */
.data_size_out = 0, /* Per test */
- };
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
/* Test case-64 */
@@ -84,58 +110,267 @@ void test_xdp_adjust_tail_grow2(void)
tattr.data_size_in = 64; /* Determine test case via pkt size */
tattr.data_size_out = 128; /* Limit copy_size */
/* Kernel side alloc packet memory area that is zero init */
- err = bpf_prog_test_run_xattr(&tattr);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
- CHECK_ATTR(errno != ENOSPC /* Due limit copy_size in bpf_test_finish */
- || tattr.retval != XDP_TX
- || tattr.data_size_out != 192, /* Expected grow size */
- "case-64",
- "err %d errno %d retval %d size %d\n",
- err, errno, tattr.retval, tattr.data_size_out);
+ ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */
+ ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval");
+ ASSERT_EQ(tattr.data_size_out, 192, "case-64 data_size_out"); /* Expected grow size */
/* Extra checks for data contents */
- CHECK_ATTR(tattr.data_size_out != 192
- || buf[0] != 1 || buf[63] != 1 /* 0-63 memset to 1 */
- || buf[64] != 0 || buf[127] != 0 /* 64-127 memset to 0 */
- || buf[128] != 1 || buf[191] != 1, /*128-191 memset to 1 */
- "case-64-data",
- "err %d errno %d retval %d size %d\n",
- err, errno, tattr.retval, tattr.data_size_out);
+ ASSERT_EQ(buf[0], 1, "case-64-data buf[0]"); /* 0-63 memset to 1 */
+ ASSERT_EQ(buf[63], 1, "case-64-data buf[63]");
+ ASSERT_EQ(buf[64], 0, "case-64-data buf[64]"); /* 64-127 memset to 0 */
+ ASSERT_EQ(buf[127], 0, "case-64-data buf[127]");
+ ASSERT_EQ(buf[128], 1, "case-64-data buf[128]"); /* 128-191 memset to 1 */
+ ASSERT_EQ(buf[191], 1, "case-64-data buf[191]");
/* Test case-128 */
memset(buf, 2, sizeof(buf));
tattr.data_size_in = 128; /* Determine test case via pkt size */
tattr.data_size_out = sizeof(buf); /* Copy everything */
- err = bpf_prog_test_run_xattr(&tattr);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */
- CHECK_ATTR(err
- || tattr.retval != XDP_TX
- || tattr.data_size_out != max_grow,/* Expect max grow size */
- "case-128",
- "err %d errno %d retval %d size %d expect-size %d\n",
- err, errno, tattr.retval, tattr.data_size_out, max_grow);
+ ASSERT_OK(err, "case-128");
+ ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval");
+ ASSERT_EQ(tattr.data_size_out, max_grow, "case-128 data_size_out"); /* Expect max grow */
/* Extra checks for data content: Count grow size, will contain zeros */
for (i = 0, cnt = 0; i < sizeof(buf); i++) {
if (buf[i] == 0)
cnt++;
}
- CHECK_ATTR((cnt != (max_grow - tattr.data_size_in)) /* Grow increase */
- || tattr.data_size_out != max_grow, /* Total grow size */
- "case-128-data",
- "err %d errno %d retval %d size %d grow-size %d\n",
- err, errno, tattr.retval, tattr.data_size_out, cnt);
+ ASSERT_EQ(cnt, max_grow - tattr.data_size_in, "case-128-data cnt"); /* Grow increase */
+ ASSERT_EQ(tattr.data_size_out, max_grow, "case-128-data data_size_out"); /* Total grow */
bpf_object__close(obj);
}
+static void test_xdp_adjust_frags_tail_shrink(void)
+{
+ const char *file = "./test_xdp_adjust_tail_shrink.bpf.o";
+ __u32 exp_size;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int err, prog_fd;
+ __u8 *buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ /* For the individual test cases, the first byte in the packet
+ * indicates which test will be run.
+ */
+ obj = bpf_object__open(file);
+ if (libbpf_get_error(obj))
+ return;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+ return;
+
+ prog_fd = bpf_program__fd(prog);
+
+ buf = malloc(9000);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb"))
+ goto out;
+
+ memset(buf, 0, 9000);
+
+ /* Test case removing 10 bytes from last frag, NOT freeing it */
+ exp_size = 8990; /* 9000 - 10 */
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 9000;
+ topts.data_size_out = 9000;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "9Kb-10b");
+ ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-10b size");
+
+ /* Test case removing one of two pages, assuming 4K pages */
+ buf[0] = 1;
+ exp_size = 4900; /* 9000 - 4100 */
+
+ topts.data_size_out = 9000; /* reset from previous invocation */
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "9Kb-4Kb");
+ ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-4Kb size");
+
+ /* Test case removing two pages resulting in a linear xdp_buff */
+ buf[0] = 2;
+ exp_size = 800; /* 9000 - 8200 */
+ topts.data_size_out = 9000; /* reset from previous invocation */
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "9Kb-9Kb");
+ ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-9Kb size");
+
+ free(buf);
+out:
+ bpf_object__close(obj);
+}
+
+static void test_xdp_adjust_frags_tail_grow_4k(void)
+{
+ const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
+ __u32 exp_size;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int err, i, prog_fd;
+ __u8 *buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ obj = bpf_object__open(file);
+ if (libbpf_get_error(obj))
+ return;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+
+ buf = malloc(16384);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb"))
+ goto out;
+
+ /* Test case add 10 bytes to last frag */
+ memset(buf, 1, 16384);
+ exp_size = 9000 + 10;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 9000;
+ topts.data_size_out = 16384;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "9Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size");
+
+ for (i = 0; i < 9000; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "9Kb+10b-old");
+ }
+
+ for (i = 9000; i < 9010; i++) {
+ if (buf[i] != 0)
+ ASSERT_EQ(buf[i], 0, "9Kb+10b-new");
+ }
+
+ for (i = 9010; i < 16384; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched");
+ }
+
+ /* Test a too large grow */
+ memset(buf, 1, 16384);
+ exp_size = 9001;
+
+ topts.data_in = topts.data_out = buf;
+ topts.data_size_in = 9001;
+ topts.data_size_out = 16384;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "9Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_DROP, "9Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size");
+
+ free(buf);
+out:
+ bpf_object__close(obj);
+}
+
+static void test_xdp_adjust_frags_tail_grow_64k(void)
+{
+ const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
+ __u32 exp_size;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int err, i, prog_fd;
+ __u8 *buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ obj = bpf_object__open(file);
+ if (libbpf_get_error(obj))
+ return;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+
+ buf = malloc(262144);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 256Kb"))
+ goto out;
+
+ /* Test case add 10 bytes to last frag */
+ memset(buf, 1, 262144);
+ exp_size = 90000 + 10;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 90000;
+ topts.data_size_out = 262144;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "90Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
+
+ for (i = 0; i < 90000; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "90Kb+10b-old");
+ }
+
+ for (i = 90000; i < 90010; i++) {
+ if (buf[i] != 0)
+ ASSERT_EQ(buf[i], 0, "90Kb+10b-new");
+ }
+
+ for (i = 90010; i < 262144; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "90Kb+10b-untouched");
+ }
+
+ /* Test a too large grow */
+ memset(buf, 1, 262144);
+ exp_size = 90001;
+
+ topts.data_in = topts.data_out = buf;
+ topts.data_size_in = 90001;
+ topts.data_size_out = 262144;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "90Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_DROP, "90Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
+
+ free(buf);
+out:
+ bpf_object__close(obj);
+}
+
void test_xdp_adjust_tail(void)
{
+ int page_size = getpagesize();
+
if (test__start_subtest("xdp_adjust_tail_shrink"))
test_xdp_adjust_tail_shrink();
if (test__start_subtest("xdp_adjust_tail_grow"))
- test_xdp_adjust_tail_grow();
+ test_xdp_adjust_tail_grow(page_size == 65536);
if (test__start_subtest("xdp_adjust_tail_grow2"))
test_xdp_adjust_tail_grow2();
+ if (test__start_subtest("xdp_adjust_frags_tail_shrink"))
+ test_xdp_adjust_frags_tail_shrink();
+ if (test__start_subtest("xdp_adjust_frags_tail_grow")) {
+ if (page_size == 65536)
+ test_xdp_adjust_frags_tail_grow_64k();
+ else
+ test_xdp_adjust_frags_tail_grow_4k();
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
index 15ef3531483e..e6bcb6051402 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
@@ -1,86 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "test_xdp_attach_fail.skel.h"
#define IFINDEX_LO 1
#define XDP_FLAGS_REPLACE (1U << 4)
-void test_xdp_attach(void)
+static void test_xdp_attach(const char *file)
{
__u32 duration = 0, id1, id2, id0 = 0, len;
struct bpf_object *obj1, *obj2, *obj3;
- const char *file = "./test_xdp.o";
struct bpf_prog_info info = {};
int err, fd1, fd2, fd3;
- DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts,
- .old_fd = -1);
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
len = sizeof(info);
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
if (CHECK_FAIL(err))
return;
- err = bpf_obj_get_info_by_fd(fd1, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd1, &info, &len);
if (CHECK_FAIL(err))
goto out_1;
id1 = info.id;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
if (CHECK_FAIL(err))
goto out_1;
memset(&info, 0, sizeof(info));
- err = bpf_obj_get_info_by_fd(fd2, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd2, &info, &len);
if (CHECK_FAIL(err))
goto out_2;
id2 = info.id;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
if (CHECK_FAIL(err))
goto out_2;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE,
- &opts);
+ err = bpf_xdp_attach(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "load_ok", "initial load failed"))
goto out_close;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err))
goto out_close;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE,
- &opts);
+ err = bpf_xdp_attach(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, &opts);
if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
goto out;
- opts.old_fd = fd1;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts);
+ opts.old_prog_fd = fd1;
+ err = bpf_xdp_attach(IFINDEX_LO, fd2, 0, &opts);
if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
goto out;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != id2, "id2_check",
"loaded prog id %u != id2 %u, err %d", id0, id2, err))
goto out_close;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts);
+ err = bpf_xdp_attach(IFINDEX_LO, fd3, 0, &opts);
if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
goto out;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
+ err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);
if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail"))
goto out;
- opts.old_fd = fd2;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
+ opts.old_prog_fd = fd2;
+ err = bpf_xdp_detach(IFINDEX_LO, 0, &opts);
if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
goto out;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
if (CHECK(err || id0 != 0, "unload_check",
"loaded prog id %u != 0, err %d", id0, err))
goto out_close;
out:
- bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
+ bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
bpf_object__close(obj3);
out_2:
@@ -88,3 +85,75 @@ out_2:
out_1:
bpf_object__close(obj1);
}
+
+#define ERRMSG_LEN 64
+
+struct xdp_errmsg {
+ char msg[ERRMSG_LEN];
+};
+
+static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size)
+{
+ struct xdp_errmsg *ctx_errmg = ctx, *tp_errmsg = data;
+
+ memcpy(&ctx_errmg->msg, &tp_errmsg->msg, ERRMSG_LEN);
+}
+
+static const char tgt_errmsg[] = "Invalid XDP flags for BPF link attachment";
+
+static void test_xdp_attach_fail(const char *file)
+{
+ struct test_xdp_attach_fail *skel = NULL;
+ struct xdp_errmsg errmsg = {};
+ struct perf_buffer *pb = NULL;
+ struct bpf_object *obj = NULL;
+ int err, fd_xdp;
+
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+
+ skel = test_xdp_attach_fail__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_attach_fail__open_and_load"))
+ goto out_close;
+
+ err = test_xdp_attach_fail__attach(skel);
+ if (!ASSERT_EQ(err, 0, "test_xdp_attach_fail__attach"))
+ goto out_close;
+
+ /* set up perf buffer */
+ pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1,
+ on_xdp_errmsg, NULL, &errmsg, NULL);
+ if (!ASSERT_OK_PTR(pb, "perf_buffer__new"))
+ goto out_close;
+
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &fd_xdp);
+ if (!ASSERT_EQ(err, 0, "bpf_prog_test_load"))
+ goto out_close;
+
+ opts.flags = 0xFF; // invalid flags to fail to attach XDP prog
+ err = bpf_link_create(fd_xdp, IFINDEX_LO, BPF_XDP, &opts);
+ if (!ASSERT_EQ(err, -EINVAL, "bpf_link_create"))
+ goto out_close;
+
+ /* read perf buffer */
+ err = perf_buffer__poll(pb, 100);
+ if (!ASSERT_GT(err, -1, "perf_buffer__poll"))
+ goto out_close;
+
+ ASSERT_STRNEQ((const char *) errmsg.msg, tgt_errmsg,
+ 42 /* strlen(tgt_errmsg) */, "check error message");
+
+out_close:
+ perf_buffer__free(pb);
+ bpf_object__close(obj);
+ test_xdp_attach_fail__destroy(skel);
+}
+
+void serial_test_xdp_attach(void)
+{
+ if (test__start_subtest("xdp_attach"))
+ test_xdp_attach("./test_xdp.bpf.o");
+ if (test__start_subtest("xdp_attach_dynptr"))
+ test_xdp_attach("./test_xdp_dynptr.bpf.o");
+ if (test__start_subtest("xdp_attach_failed"))
+ test_xdp_attach_fail("./xdp_dummy.bpf.o");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
new file mode 100644
index 000000000000..fb952703653e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/**
+ * Test XDP bonding support
+ *
+ * Sets up two bonded veth pairs between two fresh namespaces
+ * and verifies that XDP_TX program loaded on a bond device
+ * are correctly loaded onto the slave devices and XDP_TX'd
+ * packets are balanced using bonding.
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <net/if.h>
+#include <linux/if_link.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include <linux/if_bonding.h>
+#include <linux/limits.h>
+#include <netinet/udp.h>
+#include <uapi/linux/netdev.h>
+
+#include "xdp_dummy.skel.h"
+#include "xdp_redirect_multi_kern.skel.h"
+#include "xdp_tx.skel.h"
+
+#define BOND1_MAC {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}
+#define BOND1_MAC_STR "00:11:22:33:44:55"
+#define BOND2_MAC {0x00, 0x22, 0x33, 0x44, 0x55, 0x66}
+#define BOND2_MAC_STR "00:22:33:44:55:66"
+#define NPACKETS 100
+
+static int root_netns_fd = -1;
+
+static void restore_root_netns(void)
+{
+ ASSERT_OK(setns(root_netns_fd, CLONE_NEWNET), "restore_root_netns");
+}
+
+static int setns_by_name(char *name)
+{
+ int nsfd, err;
+ char nspath[PATH_MAX];
+
+ snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+ nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+ if (nsfd < 0)
+ return -1;
+
+ err = setns(nsfd, CLONE_NEWNET);
+ close(nsfd);
+ return err;
+}
+
+static int get_rx_packets(const char *iface)
+{
+ FILE *f;
+ char line[512];
+ int iface_len = strlen(iface);
+
+ f = fopen("/proc/net/dev", "r");
+ if (!f)
+ return -1;
+
+ while (fgets(line, sizeof(line), f)) {
+ char *p = line;
+
+ while (*p == ' ')
+ p++; /* skip whitespace */
+ if (!strncmp(p, iface, iface_len)) {
+ p += iface_len;
+ if (*p++ != ':')
+ continue;
+ while (*p == ' ')
+ p++; /* skip whitespace */
+ while (*p && *p != ' ')
+ p++; /* skip rx bytes */
+ while (*p == ' ')
+ p++; /* skip whitespace */
+ fclose(f);
+ return atoi(p);
+ }
+ }
+ fclose(f);
+ return -1;
+}
+
+#define MAX_BPF_LINKS 8
+
+struct skeletons {
+ struct xdp_dummy *xdp_dummy;
+ struct xdp_tx *xdp_tx;
+ struct xdp_redirect_multi_kern *xdp_redirect_multi_kern;
+
+ int nlinks;
+ struct bpf_link *links[MAX_BPF_LINKS];
+};
+
+static int xdp_attach(struct skeletons *skeletons, struct bpf_program *prog, char *iface)
+{
+ struct bpf_link *link;
+ int ifindex;
+
+ ifindex = if_nametoindex(iface);
+ if (!ASSERT_GT(ifindex, 0, "get ifindex"))
+ return -1;
+
+ if (!ASSERT_LE(skeletons->nlinks+1, MAX_BPF_LINKS, "too many XDP programs attached"))
+ return -1;
+
+ link = bpf_program__attach_xdp(prog, ifindex);
+ if (!ASSERT_OK_PTR(link, "attach xdp program"))
+ return -1;
+
+ skeletons->links[skeletons->nlinks++] = link;
+ return 0;
+}
+
+enum {
+ BOND_ONE_NO_ATTACH = 0,
+ BOND_BOTH_AND_ATTACH,
+};
+
+static const char * const mode_names[] = {
+ [BOND_MODE_ROUNDROBIN] = "balance-rr",
+ [BOND_MODE_ACTIVEBACKUP] = "active-backup",
+ [BOND_MODE_XOR] = "balance-xor",
+ [BOND_MODE_BROADCAST] = "broadcast",
+ [BOND_MODE_8023AD] = "802.3ad",
+ [BOND_MODE_TLB] = "balance-tlb",
+ [BOND_MODE_ALB] = "balance-alb",
+};
+
+static const char * const xmit_policy_names[] = {
+ [BOND_XMIT_POLICY_LAYER2] = "layer2",
+ [BOND_XMIT_POLICY_LAYER34] = "layer3+4",
+ [BOND_XMIT_POLICY_LAYER23] = "layer2+3",
+ [BOND_XMIT_POLICY_ENCAP23] = "encap2+3",
+ [BOND_XMIT_POLICY_ENCAP34] = "encap3+4",
+};
+
+static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy,
+ int bond_both_attach)
+{
+ SYS(fail, "ip netns add ns_dst");
+ SYS(fail, "ip link add veth1_1 type veth peer name veth2_1 netns ns_dst");
+ SYS(fail, "ip link add veth1_2 type veth peer name veth2_2 netns ns_dst");
+
+ SYS(fail, "ip link add bond1 type bond mode %s xmit_hash_policy %s",
+ mode_names[mode], xmit_policy_names[xmit_policy]);
+ SYS(fail, "ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none");
+ SYS(fail, "ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s",
+ mode_names[mode], xmit_policy_names[xmit_policy]);
+ SYS(fail, "ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none");
+
+ SYS(fail, "ip link set veth1_1 master bond1");
+ if (bond_both_attach == BOND_BOTH_AND_ATTACH) {
+ SYS(fail, "ip link set veth1_2 master bond1");
+ } else {
+ SYS(fail, "ip link set veth1_2 up addrgenmode none");
+
+ if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2"))
+ return -1;
+ }
+
+ SYS(fail, "ip -netns ns_dst link set veth2_1 master bond2");
+
+ if (bond_both_attach == BOND_BOTH_AND_ATTACH)
+ SYS(fail, "ip -netns ns_dst link set veth2_2 master bond2");
+ else
+ SYS(fail, "ip -netns ns_dst link set veth2_2 up addrgenmode none");
+
+ /* Load a dummy program on sending side as with veth peer needs to have a
+ * XDP program loaded as well.
+ */
+ if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "bond1"))
+ return -1;
+
+ if (bond_both_attach == BOND_BOTH_AND_ATTACH) {
+ if (!ASSERT_OK(setns_by_name("ns_dst"), "set netns to ns_dst"))
+ return -1;
+
+ if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2"))
+ return -1;
+
+ restore_root_netns();
+ }
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void bonding_cleanup(struct skeletons *skeletons)
+{
+ restore_root_netns();
+ while (skeletons->nlinks) {
+ skeletons->nlinks--;
+ bpf_link__destroy(skeletons->links[skeletons->nlinks]);
+ }
+ ASSERT_OK(system("ip link delete bond1"), "delete bond1");
+ ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1");
+ ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2");
+ ASSERT_OK(system("ip netns delete ns_dst"), "delete ns_dst");
+}
+
+static int send_udp_packets(int vary_dst_ip)
+{
+ struct ethhdr eh = {
+ .h_source = BOND1_MAC,
+ .h_dest = BOND2_MAC,
+ .h_proto = htons(ETH_P_IP),
+ };
+ struct iphdr iph = {};
+ struct udphdr uh = {};
+ uint8_t buf[128];
+ int i, s = -1;
+ int ifindex;
+
+ s = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
+ if (!ASSERT_GE(s, 0, "socket"))
+ goto err;
+
+ ifindex = if_nametoindex("bond1");
+ if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex"))
+ goto err;
+
+ iph.ihl = 5;
+ iph.version = 4;
+ iph.tos = 16;
+ iph.id = 1;
+ iph.ttl = 64;
+ iph.protocol = IPPROTO_UDP;
+ iph.saddr = 1;
+ iph.daddr = 2;
+ iph.tot_len = htons(sizeof(buf) - ETH_HLEN);
+ iph.check = 0;
+
+ for (i = 1; i <= NPACKETS; i++) {
+ int n;
+ struct sockaddr_ll saddr_ll = {
+ .sll_ifindex = ifindex,
+ .sll_halen = ETH_ALEN,
+ .sll_addr = BOND2_MAC,
+ };
+
+ /* vary the UDP destination port for even distribution with roundrobin/xor modes */
+ uh.dest++;
+
+ if (vary_dst_ip)
+ iph.daddr++;
+
+ /* construct a packet */
+ memcpy(buf, &eh, sizeof(eh));
+ memcpy(buf + sizeof(eh), &iph, sizeof(iph));
+ memcpy(buf + sizeof(eh) + sizeof(iph), &uh, sizeof(uh));
+
+ n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll));
+ if (!ASSERT_EQ(n, sizeof(buf), "sendto"))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (s >= 0)
+ close(s);
+ return -1;
+}
+
+static void test_xdp_bonding_with_mode(struct skeletons *skeletons, int mode, int xmit_policy)
+{
+ int bond1_rx;
+
+ if (bonding_setup(skeletons, mode, xmit_policy, BOND_BOTH_AND_ATTACH))
+ goto out;
+
+ if (send_udp_packets(xmit_policy != BOND_XMIT_POLICY_LAYER34))
+ goto out;
+
+ bond1_rx = get_rx_packets("bond1");
+ ASSERT_EQ(bond1_rx, NPACKETS, "expected more received packets");
+
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+ case BOND_MODE_XOR: {
+ int veth1_rx = get_rx_packets("veth1_1");
+ int veth2_rx = get_rx_packets("veth1_2");
+ int diff = abs(veth1_rx - veth2_rx);
+
+ ASSERT_GE(veth1_rx + veth2_rx, NPACKETS, "expected more packets");
+
+ switch (xmit_policy) {
+ case BOND_XMIT_POLICY_LAYER2:
+ ASSERT_GE(diff, NPACKETS,
+ "expected packets on only one of the interfaces");
+ break;
+ case BOND_XMIT_POLICY_LAYER23:
+ case BOND_XMIT_POLICY_LAYER34:
+ ASSERT_LT(diff, NPACKETS/2,
+ "expected even distribution of packets");
+ break;
+ default:
+ PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy);
+ break;
+ }
+ break;
+ }
+ case BOND_MODE_ACTIVEBACKUP: {
+ int veth1_rx = get_rx_packets("veth1_1");
+ int veth2_rx = get_rx_packets("veth1_2");
+ int diff = abs(veth1_rx - veth2_rx);
+
+ ASSERT_GE(diff, NPACKETS,
+ "expected packets on only one of the interfaces");
+ break;
+ }
+ default:
+ PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy);
+ break;
+ }
+
+out:
+ bonding_cleanup(skeletons);
+}
+
+/* Test the broadcast redirection using xdp_redirect_map_multi_prog and adding
+ * all the interfaces to it and checking that broadcasting won't send the packet
+ * to neither the ingress bond device (bond2) or its slave (veth2_1).
+ */
+static void test_xdp_bonding_redirect_multi(struct skeletons *skeletons)
+{
+ static const char * const ifaces[] = {"bond2", "veth2_1", "veth2_2"};
+ int veth1_1_rx, veth1_2_rx;
+ int err;
+
+ if (bonding_setup(skeletons, BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23,
+ BOND_ONE_NO_ATTACH))
+ goto out;
+
+
+ if (!ASSERT_OK(setns_by_name("ns_dst"), "could not set netns to ns_dst"))
+ goto out;
+
+ /* populate the devmap with the relevant interfaces */
+ for (int i = 0; i < ARRAY_SIZE(ifaces); i++) {
+ int ifindex = if_nametoindex(ifaces[i]);
+ int map_fd = bpf_map__fd(skeletons->xdp_redirect_multi_kern->maps.map_all);
+
+ if (!ASSERT_GT(ifindex, 0, "could not get interface index"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &ifindex, &ifindex, 0);
+ if (!ASSERT_OK(err, "add interface to map_all"))
+ goto out;
+ }
+
+ if (xdp_attach(skeletons,
+ skeletons->xdp_redirect_multi_kern->progs.xdp_redirect_map_multi_prog,
+ "bond2"))
+ goto out;
+
+ restore_root_netns();
+
+ if (send_udp_packets(BOND_MODE_ROUNDROBIN))
+ goto out;
+
+ veth1_1_rx = get_rx_packets("veth1_1");
+ veth1_2_rx = get_rx_packets("veth1_2");
+
+ ASSERT_EQ(veth1_1_rx, 0, "expected no packets on veth1_1");
+ ASSERT_GE(veth1_2_rx, NPACKETS, "expected packets on veth1_2");
+
+out:
+ restore_root_netns();
+ bonding_cleanup(skeletons);
+}
+
+/* Test that XDP programs cannot be attached to both the bond master and slaves simultaneously */
+static void test_xdp_bonding_attach(struct skeletons *skeletons)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_link *link2 = NULL;
+ int veth, bond, err;
+
+ if (!ASSERT_OK(system("ip link add veth type veth"), "add veth"))
+ goto out;
+ if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
+ goto out;
+
+ veth = if_nametoindex("veth");
+ if (!ASSERT_GE(veth, 0, "if_nametoindex veth"))
+ goto out;
+ bond = if_nametoindex("bond");
+ if (!ASSERT_GE(bond, 0, "if_nametoindex bond"))
+ goto out;
+
+ /* enslaving with a XDP program loaded is allowed */
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
+ if (!ASSERT_OK_PTR(link, "attach program to veth"))
+ goto out;
+
+ err = system("ip link set veth master bond");
+ if (!ASSERT_OK(err, "set veth master"))
+ goto out;
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ /* attaching to slave when master has no program is allowed */
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
+ if (!ASSERT_OK_PTR(link, "attach program to slave when enslaved"))
+ goto out;
+
+ /* attaching to master not allowed when slave has program loaded */
+ link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
+ if (!ASSERT_ERR_PTR(link2, "attach program to master when slave has program"))
+ goto out;
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ /* attaching XDP program to master allowed when slave has no program */
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
+ if (!ASSERT_OK_PTR(link, "attach program to master"))
+ goto out;
+
+ /* attaching to slave not allowed when master has program loaded */
+ link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth);
+ if (!ASSERT_ERR_PTR(link2, "attach program to slave when master has program"))
+ goto out;
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ /* test program unwinding with a non-XDP slave */
+ if (!ASSERT_OK(system("ip link add vxlan type vxlan id 1 remote 1.2.3.4 dstport 0 dev lo"),
+ "add vxlan"))
+ goto out;
+
+ err = system("ip link set vxlan master bond");
+ if (!ASSERT_OK(err, "set vxlan master"))
+ goto out;
+
+ /* attaching not allowed when one slave does not support XDP */
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
+ if (!ASSERT_ERR_PTR(link, "attach program to master when slave does not support XDP"))
+ goto out;
+
+out:
+ bpf_link__destroy(link);
+ bpf_link__destroy(link2);
+
+ system("ip link del veth");
+ system("ip link del bond");
+ system("ip link del vxlan");
+}
+
+/* Test with nested bonding devices to catch issue with negative jump label count */
+static void test_xdp_bonding_nested(struct skeletons *skeletons)
+{
+ struct bpf_link *link = NULL;
+ int bond, err;
+
+ if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
+ goto out;
+
+ bond = if_nametoindex("bond");
+ if (!ASSERT_GE(bond, 0, "if_nametoindex bond"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add bond_nest1 type bond"), "add bond_nest1"))
+ goto out;
+
+ err = system("ip link set bond_nest1 master bond");
+ if (!ASSERT_OK(err, "set bond_nest1 master"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add bond_nest2 type bond"), "add bond_nest1"))
+ goto out;
+
+ err = system("ip link set bond_nest2 master bond_nest1");
+ if (!ASSERT_OK(err, "set bond_nest2 master"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond);
+ ASSERT_OK_PTR(link, "attach program to master");
+
+out:
+ bpf_link__destroy(link);
+ system("ip link del bond");
+ system("ip link del bond_nest1");
+ system("ip link del bond_nest2");
+}
+
+static void test_xdp_bonding_features(struct skeletons *skeletons)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
+ int bond_idx, veth1_idx, err;
+ struct bpf_link *link = NULL;
+
+ if (!ASSERT_OK(system("ip link add bond type bond"), "add bond"))
+ goto out;
+
+ bond_idx = if_nametoindex("bond");
+ if (!ASSERT_GE(bond_idx, 0, "if_nametoindex bond"))
+ goto out;
+
+ /* query default xdp-feature for bond device */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags, 0,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add veth0 type veth peer name veth1"),
+ "add veth{0,1} pair"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link add veth2 type veth peer name veth3"),
+ "add veth{2,3} pair"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth0 master bond"),
+ "add veth0 to master bond"))
+ goto out;
+
+ /* xdp-feature for bond device should be obtained from the single slave
+ * device (veth0)
+ */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ veth1_idx = if_nametoindex("veth1");
+ if (!ASSERT_GE(veth1_idx, 0, "if_nametoindex veth1"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog,
+ veth1_idx);
+ if (!ASSERT_OK_PTR(link, "attach program to veth1"))
+ goto out;
+
+ /* xdp-feature for veth0 are changed */
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth2 master bond"),
+ "add veth2 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ /* xdp-feature for bond device should be set to the most restrict
+ * value obtained from attached slave devices (veth0 and veth2)
+ */
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth2 nomaster"),
+ "del veth2 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+ if (!ASSERT_OK(system("ip link set veth0 nomaster"),
+ "del veth0 to master bond"))
+ goto out;
+
+ err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+ ASSERT_EQ(query_opts.feature_flags, 0,
+ "bond query_opts.feature_flags");
+out:
+ bpf_link__destroy(link);
+ system("ip link del veth0");
+ system("ip link del veth2");
+ system("ip link del bond");
+}
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level != LIBBPF_WARN)
+ vprintf(format, args);
+ return 0;
+}
+
+struct bond_test_case {
+ char *name;
+ int mode;
+ int xmit_policy;
+};
+
+static struct bond_test_case bond_test_cases[] = {
+ { "xdp_bonding_roundrobin", BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, },
+ { "xdp_bonding_activebackup", BOND_MODE_ACTIVEBACKUP, BOND_XMIT_POLICY_LAYER23 },
+
+ { "xdp_bonding_xor_layer2", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER2, },
+ { "xdp_bonding_xor_layer23", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER23, },
+ { "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, },
+};
+
+void serial_test_xdp_bonding(void)
+{
+ libbpf_print_fn_t old_print_fn;
+ struct skeletons skeletons = {};
+ int i;
+
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+ root_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_GE(root_netns_fd, 0, "open /proc/self/ns/net"))
+ goto out;
+
+ skeletons.xdp_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load"))
+ goto out;
+
+ skeletons.xdp_tx = xdp_tx__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load"))
+ goto out;
+
+ skeletons.xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_redirect_multi_kern,
+ "xdp_redirect_multi_kern__open_and_load"))
+ goto out;
+
+ if (test__start_subtest("xdp_bonding_attach"))
+ test_xdp_bonding_attach(&skeletons);
+
+ if (test__start_subtest("xdp_bonding_nested"))
+ test_xdp_bonding_nested(&skeletons);
+
+ if (test__start_subtest("xdp_bonding_features"))
+ test_xdp_bonding_features(&skeletons);
+
+ for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) {
+ struct bond_test_case *test_case = &bond_test_cases[i];
+
+ if (test__start_subtest(test_case->name))
+ test_xdp_bonding_with_mode(
+ &skeletons,
+ test_case->mode,
+ test_case->xmit_policy);
+ }
+
+ if (test__start_subtest("xdp_bonding_redirect_multi"))
+ test_xdp_bonding_redirect_multi(&skeletons);
+
+out:
+ xdp_dummy__destroy(skeletons.xdp_dummy);
+ xdp_tx__destroy(skeletons.xdp_tx);
+ xdp_redirect_multi_kern__destroy(skeletons.xdp_redirect_multi_kern);
+
+ libbpf_set_print(old_print_fn);
+ if (root_netns_fd >= 0)
+ close(root_netns_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 2c6c570b21f8..76967d8ace9c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -10,50 +10,110 @@ struct meta {
int pkt_len;
};
+struct test_ctx_s {
+ bool passed;
+ int pkt_size;
+};
+
+struct test_ctx_s test_ctx;
+
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
- int duration = 0;
struct meta *meta = (struct meta *)data;
struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
+ unsigned char *raw_pkt = data + sizeof(*meta);
+ struct test_ctx_s *tst_ctx = ctx;
+
+ ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size");
+ ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex");
+ ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len");
+ ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0,
+ "check_packet_content");
+
+ if (meta->pkt_len > sizeof(pkt_v4)) {
+ for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++)
+ ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i,
+ "check_packet_content");
+ }
+
+ tst_ctx->passed = true;
+}
- if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta),
- "check_size", "size %u < %zu\n",
- size, sizeof(pkt_v4) + sizeof(*meta)))
- return;
+#define BUF_SZ 9000
- if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex",
- "meta->ifindex = %d\n", meta->ifindex))
+static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb,
+ struct test_xdp_bpf2bpf *ftrace_skel,
+ int pkt_size)
+{
+ __u8 *buf, *buf_in;
+ int err;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") ||
+ !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size"))
return;
- if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len",
- "meta->pkt_len = %zd\n", sizeof(pkt_v4)))
+ buf_in = malloc(BUF_SZ);
+ if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()"))
return;
- if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)),
- "check_packet_content", "content not the same\n"))
+ buf = malloc(BUF_SZ);
+ if (!ASSERT_OK_PTR(buf, "buf malloc()")) {
+ free(buf_in);
return;
+ }
+
+ test_ctx.passed = false;
+ test_ctx.pkt_size = pkt_size;
+
+ memcpy(buf_in, &pkt_v4, sizeof(pkt_v4));
+ if (pkt_size > sizeof(pkt_v4)) {
+ for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++)
+ buf_in[i + sizeof(pkt_v4)] = i;
+ }
+
+ /* Run test program */
+ topts.data_in = buf_in;
+ topts.data_size_in = pkt_size;
+ topts.data_out = buf;
+ topts.data_size_out = BUF_SZ;
+
+ err = bpf_prog_test_run_opts(pkt_fd, &topts);
+
+ ASSERT_OK(err, "ipv4");
+ ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval");
+ ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size");
+
+ /* Make sure bpf_xdp_output() was triggered and it sent the expected
+ * data to the perf ring buffer.
+ */
+ err = perf_buffer__poll(pb, 100);
- *(bool *)ctx = true;
+ ASSERT_GE(err, 0, "perf_buffer__poll");
+ ASSERT_TRUE(test_ctx.passed, "test passed");
+ /* Verify test results */
+ ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"),
+ "fentry result");
+ ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result");
+
+ free(buf);
+ free(buf_in);
}
void test_xdp_bpf2bpf(void)
{
- __u32 duration = 0, retval, size;
- char buf[128];
int err, pkt_fd, map_fd;
- bool passed = false;
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
- struct iptnl_info value4 = {.family = AF_INET};
+ int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200};
+ struct iptnl_info value4 = {.family = AF_INET6};
struct test_xdp *pkt_skel = NULL;
struct test_xdp_bpf2bpf *ftrace_skel = NULL;
struct vip key4 = {.protocol = 6, .family = AF_INET};
struct bpf_program *prog;
struct perf_buffer *pb = NULL;
- struct perf_buffer_opts pb_opts = {};
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
- if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp skeleton failed\n"))
+ if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load"))
return;
pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel);
@@ -63,7 +123,7 @@ void test_xdp_bpf2bpf(void)
/* Load trace program */
ftrace_skel = test_xdp_bpf2bpf__open();
- if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n"))
+ if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open"))
goto out;
/* Demonstrate the bpf_program__set_attach_target() API rather than
@@ -78,52 +138,24 @@ void test_xdp_bpf2bpf(void)
bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
err = test_xdp_bpf2bpf__load(ftrace_skel);
- if (CHECK(err, "__load", "ftrace skeleton failed\n"))
+ if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load"))
goto out;
err = test_xdp_bpf2bpf__attach(ftrace_skel);
- if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err))
+ if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach"))
goto out;
/* Set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &passed;
- pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
- 1, &pb_opts);
- if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8,
+ on_sample, NULL, &test_ctx, NULL);
+ if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out;
- /* Run test program */
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- if (CHECK(err || retval != XDP_TX || size != 74 ||
- iph->protocol != IPPROTO_IPIP, "ipv4",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size))
- goto out;
-
- /* Make sure bpf_xdp_output() was triggered and it sent the expected
- * data to the perf ring buffer.
- */
- err = perf_buffer__poll(pb, 100);
- if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
- goto out;
-
- CHECK_FAIL(!passed);
-
- /* Verify test results */
- if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"),
- "result", "fentry failed err %llu\n",
- ftrace_skel->bss->test_result_fentry))
- goto out;
-
- CHECK(ftrace_skel->bss->test_result_fexit != XDP_TX, "result",
- "fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit);
-
+ for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++)
+ run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel,
+ pkt_sizes[i]);
out:
- if (pb)
- perf_buffer__free(pb);
+ perf_buffer__free(pb);
test_xdp__destroy(pkt_skel);
test_xdp_bpf2bpf__destroy(ftrace_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
new file mode 100644
index 000000000000..ee94c281888a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_xdp_context_test_run.skel.h"
+#include "test_xdp_meta.skel.h"
+
+#define RX_NAME "veth0"
+#define TX_NAME "veth1"
+#define TX_NETNS "xdp_context_tx"
+#define RX_NETNS "xdp_context_rx"
+#define TAP_NAME "tap0"
+#define DUMMY_NAME "dum0"
+#define TAP_NETNS "xdp_context_tuntap"
+
+#define TEST_PAYLOAD_LEN 32
+static const __u8 test_payload[TEST_PAYLOAD_LEN] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
+};
+
+void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts,
+ __u32 data_meta, __u32 data, __u32 data_end,
+ __u32 ingress_ifindex, __u32 rx_queue_index,
+ __u32 egress_ifindex)
+{
+ struct xdp_md ctx = {
+ .data = data,
+ .data_end = data_end,
+ .data_meta = data_meta,
+ .ingress_ifindex = ingress_ifindex,
+ .rx_queue_index = rx_queue_index,
+ .egress_ifindex = egress_ifindex,
+ };
+ int err;
+
+ opts.ctx_in = &ctx;
+ opts.ctx_size_in = sizeof(ctx);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_EQ(errno, EINVAL, "errno-EINVAL");
+ ASSERT_ERR(err, "bpf_prog_test_run");
+}
+
+void test_xdp_context_test_run(void)
+{
+ struct test_xdp_context_test_run *skel = NULL;
+ char data[sizeof(pkt_v4) + sizeof(__u32)];
+ char bad_ctx[sizeof(struct xdp_md) + 1];
+ struct xdp_md ctx_in, ctx_out;
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .ctx_out = &ctx_out,
+ .ctx_size_out = sizeof(ctx_out),
+ .repeat = 1,
+ );
+ int err, prog_fd;
+
+ skel = test_xdp_context_test_run__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.xdp_context);
+
+ /* Data past the end of the kernel's struct xdp_md must be 0 */
+ bad_ctx[sizeof(bad_ctx) - 1] = 1;
+ opts.ctx_in = bad_ctx;
+ opts.ctx_size_in = sizeof(bad_ctx);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_EQ(errno, E2BIG, "extradata-errno");
+ ASSERT_ERR(err, "bpf_prog_test_run(extradata)");
+
+ *(__u32 *)data = XDP_PASS;
+ *(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4;
+ opts.ctx_in = &ctx_in;
+ opts.ctx_size_in = sizeof(ctx_in);
+ memset(&ctx_in, 0, sizeof(ctx_in));
+ ctx_in.data_meta = 0;
+ ctx_in.data = sizeof(__u32);
+ ctx_in.data_end = ctx_in.data + sizeof(pkt_v4);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(err, "bpf_prog_test_run(valid)");
+ ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval");
+ ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize");
+ ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize");
+ ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta");
+ ASSERT_EQ(ctx_out.data, 0, "valid-data");
+ ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend");
+
+ /* Meta data's size must be a multiple of 4 */
+ test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0);
+
+ /* data_meta must reference the start of data */
+ test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
+ 0, 0, 0);
+
+ /* Meta data must be 255 bytes or smaller */
+ test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
+
+ /* Total size of data must be data_end - data_meta or larger */
+ test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
+ sizeof(data) + 1, 0, 0, 0);
+
+ /* RX queue cannot be specified without specifying an ingress */
+ test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
+ 0, 1, 0);
+
+ /* Interface 1 is always the loopback interface which always has only
+ * one RX queue (index 0). This makes index 1 an invalid rx queue index
+ * for interface 1.
+ */
+ test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
+ 1, 1, 0);
+
+ /* The egress cannot be specified */
+ test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
+ 0, 0, 1);
+
+ test_xdp_context_test_run__destroy(skel);
+}
+
+static int send_test_packet(int ifindex)
+{
+ int n, sock = -1;
+ __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
+
+ /* We use the Ethernet header only to identify the test packet */
+ struct ethhdr eth = {
+ .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
+ };
+
+ memcpy(packet, &eth, sizeof(eth));
+ memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
+
+ sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
+ if (!ASSERT_GE(sock, 0, "socket"))
+ goto err;
+
+ struct sockaddr_ll saddr = {
+ .sll_family = PF_PACKET,
+ .sll_ifindex = ifindex,
+ .sll_halen = ETH_ALEN
+ };
+ n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr,
+ sizeof(saddr));
+ if (!ASSERT_EQ(n, sizeof(packet), "sendto"))
+ goto err;
+
+ close(sock);
+ return 0;
+
+err:
+ if (sock >= 0)
+ close(sock);
+ return -1;
+}
+
+static int write_test_packet(int tap_fd)
+{
+ __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
+ int n;
+
+ /* The Ethernet header is mostly not relevant. We use it to identify the
+ * test packet and some BPF helpers we exercise expect to operate on
+ * Ethernet frames carrying IP packets. Pretend that's the case.
+ */
+ struct ethhdr eth = {
+ .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
+ .h_proto = htons(ETH_P_IP),
+ };
+
+ memcpy(packet, &eth, sizeof(eth));
+ memcpy(packet + sizeof(struct ethhdr), test_payload, TEST_PAYLOAD_LEN);
+
+ n = write(tap_fd, packet, sizeof(packet));
+ if (!ASSERT_EQ(n, sizeof(packet), "write packet"))
+ return -1;
+
+ return 0;
+}
+
+static void dump_err_stream(const struct bpf_program *prog)
+{
+ char buf[512];
+ int ret;
+
+ ret = 0;
+ do {
+ ret = bpf_prog_stream_read(bpf_program__fd(prog),
+ BPF_STREAM_STDERR, buf, sizeof(buf),
+ NULL);
+ if (ret > 0)
+ fwrite(buf, sizeof(buf[0]), ret, stderr);
+ } while (ret > 0);
+}
+
+void test_xdp_context_veth(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ struct netns_obj *rx_ns = NULL, *tx_ns = NULL;
+ struct bpf_program *tc_prog, *xdp_prog;
+ struct test_xdp_meta *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ int rx_ifindex, tx_ifindex;
+ int ret;
+
+ tx_ns = netns_new(TX_NETNS, false);
+ if (!ASSERT_OK_PTR(tx_ns, "create tx_ns"))
+ return;
+
+ rx_ns = netns_new(RX_NETNS, false);
+ if (!ASSERT_OK_PTR(rx_ns, "create rx_ns"))
+ goto close;
+
+ SYS(close, "ip link add " RX_NAME " netns " RX_NETNS
+ " type veth peer name " TX_NAME " netns " TX_NETNS);
+
+ nstoken = open_netns(RX_NETNS);
+ if (!ASSERT_OK_PTR(nstoken, "setns rx_ns"))
+ goto close;
+
+ SYS(close, "ip link set dev " RX_NAME " up");
+
+ skel = test_xdp_meta__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
+ goto close;
+
+ rx_ifindex = if_nametoindex(RX_NAME);
+ if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx"))
+ goto close;
+
+ tc_hook.ifindex = rx_ifindex;
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto close;
+
+ tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls");
+ if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog"))
+ goto close;
+
+ tc_opts.prog_fd = bpf_program__fd(tc_prog);
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+
+ xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp");
+ if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog"))
+ goto close;
+
+ ret = bpf_xdp_attach(rx_ifindex,
+ bpf_program__fd(xdp_prog),
+ 0, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(TX_NETNS);
+ if (!ASSERT_OK_PTR(nstoken, "setns tx_ns"))
+ goto close;
+
+ SYS(close, "ip link set dev " TX_NAME " up");
+
+ tx_ifindex = if_nametoindex(TX_NAME);
+ if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
+ goto close;
+
+ skel->bss->test_pass = false;
+
+ ret = send_test_packet(tx_ifindex);
+ if (!ASSERT_OK(ret, "send_test_packet"))
+ goto close;
+
+ if (!ASSERT_TRUE(skel->bss->test_pass, "test_pass"))
+ dump_err_stream(tc_prog);
+
+close:
+ close_netns(nstoken);
+ test_xdp_meta__destroy(skel);
+ netns_free(rx_ns);
+ netns_free(tx_ns);
+}
+
+static void test_tuntap(struct bpf_program *xdp_prog,
+ struct bpf_program *tc_prio_1_prog,
+ struct bpf_program *tc_prio_2_prog,
+ bool *test_pass)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ struct netns_obj *ns = NULL;
+ int tap_fd = -1;
+ int tap_ifindex;
+ int ret;
+
+ *test_pass = false;
+
+ ns = netns_new(TAP_NETNS, true);
+ if (!ASSERT_OK_PTR(ns, "create and open ns"))
+ return;
+
+ tap_fd = open_tuntap(TAP_NAME, true);
+ if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
+ goto close;
+
+ SYS(close, "ip link set dev " TAP_NAME " up");
+
+ tap_ifindex = if_nametoindex(TAP_NAME);
+ if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
+ goto close;
+
+ tc_hook.ifindex = tap_ifindex;
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto close;
+
+ tc_opts.prog_fd = bpf_program__fd(tc_prio_1_prog);
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+
+ if (tc_prio_2_prog) {
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 2,
+ .prog_fd = bpf_program__fd(tc_prio_2_prog));
+
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+ }
+
+ ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog),
+ 0, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ ret = write_test_packet(tap_fd);
+ if (!ASSERT_OK(ret, "write_test_packet"))
+ goto close;
+
+ if (!ASSERT_TRUE(*test_pass, "test_pass"))
+ dump_err_stream(tc_prio_2_prog ? : tc_prio_1_prog);
+
+close:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ netns_free(ns);
+}
+
+/* Write a packet to a tap dev and copy it to ingress of a dummy dev */
+static void test_tuntap_mirred(struct bpf_program *xdp_prog,
+ struct bpf_program *tc_prog,
+ bool *test_pass)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ struct netns_obj *ns = NULL;
+ int dummy_ifindex;
+ int tap_fd = -1;
+ int tap_ifindex;
+ int ret;
+
+ *test_pass = false;
+
+ ns = netns_new(TAP_NETNS, true);
+ if (!ASSERT_OK_PTR(ns, "netns_new"))
+ return;
+
+ /* Setup dummy interface */
+ SYS(close, "ip link add name " DUMMY_NAME " type dummy");
+ SYS(close, "ip link set dev " DUMMY_NAME " up");
+
+ dummy_ifindex = if_nametoindex(DUMMY_NAME);
+ if (!ASSERT_GE(dummy_ifindex, 0, "if_nametoindex"))
+ goto close;
+
+ tc_hook.ifindex = dummy_ifindex;
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto close;
+
+ tc_opts.prog_fd = bpf_program__fd(tc_prog);
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+
+ /* Setup TAP interface */
+ tap_fd = open_tuntap(TAP_NAME, true);
+ if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
+ goto close;
+
+ SYS(close, "ip link set dev " TAP_NAME " up");
+
+ tap_ifindex = if_nametoindex(TAP_NAME);
+ if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
+ goto close;
+
+ ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog), 0, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ /* Copy all packets received from TAP to dummy ingress */
+ SYS(close, "tc qdisc add dev " TAP_NAME " clsact");
+ SYS(close, "tc filter add dev " TAP_NAME " ingress "
+ "protocol all matchall "
+ "action mirred ingress mirror dev " DUMMY_NAME);
+
+ /* Receive a packet on TAP */
+ ret = write_test_packet(tap_fd);
+ if (!ASSERT_OK(ret, "write_test_packet"))
+ goto close;
+
+ if (!ASSERT_TRUE(*test_pass, "test_pass"))
+ dump_err_stream(tc_prog);
+
+close:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ netns_free(ns);
+}
+
+void test_xdp_context_tuntap(void)
+{
+ struct test_xdp_meta *skel = NULL;
+
+ skel = test_xdp_meta__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
+ return;
+
+ if (test__start_subtest("data_meta"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_read"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_read,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_slice"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_slice,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_write"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_write,
+ skel->progs.ing_cls_dynptr_read,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_slice_rdwr"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_slice_rdwr,
+ skel->progs.ing_cls_dynptr_slice,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_offset"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_offset_wr,
+ skel->progs.ing_cls_dynptr_offset_rd,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_offset_oob"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_offset_oob,
+ skel->progs.ing_cls,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_data_meta_survives_data_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_data_meta_survives_data_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_data_meta_survives_meta_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_data_meta_survives_meta_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_survives_data_slice_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_survives_data_slice_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_survives_meta_slice_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_survives_meta_slice_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_rw_before_data_dynptr_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_rw_before_data_dynptr_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_rw_before_meta_dynptr_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_rw_before_meta_dynptr_write,
+ &skel->bss->test_pass);
+ /* Tests for BPF helpers which touch headroom */
+ if (test__start_subtest("helper_skb_vlan_push_pop"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_vlan_push_pop,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_adjust_room"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_adjust_room,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_change_head_tail"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_change_head_tail,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_change_proto"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_change_proto,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+
+ test_xdp_meta__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
index 0176573fe4e7..df27535995af 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
@@ -2,69 +2,148 @@
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_xdp_with_cpumap_frags_helpers.skel.h"
#include "test_xdp_with_cpumap_helpers.skel.h"
#define IFINDEX_LO 1
+#define TEST_NS "cpu_attach_ns"
-void test_xdp_with_cpumap_helpers(void)
+static void test_xdp_with_cpumap_helpers(void)
{
- struct test_xdp_with_cpumap_helpers *skel;
+ struct test_xdp_with_cpumap_helpers *skel = NULL;
struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
struct bpf_cpumap_val val = {
.qsize = 192,
};
- __u32 duration = 0, idx = 0;
- __u32 len = sizeof(info);
- int err, prog_fd, map_fd;
+ int err, prog_fd, prog_redir_fd, map_fd;
+ struct nstoken *nstoken = NULL;
+ __u32 idx = 0;
+
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+ SYS(out_close, "ip link set dev lo up");
skel = test_xdp_with_cpumap_helpers__open_and_load();
- if (CHECK_FAIL(!skel)) {
- perror("test_xdp_with_cpumap_helpers__open_and_load");
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
return;
- }
- /* can not attach program with cpumaps that allow programs
- * as xdp generic
- */
- prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
- err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
- CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP",
- "should have failed\n");
+ prog_redir_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(IFINDEX_LO, prog_redir_fd, XDP_FLAGS_SKB_MODE, NULL);
+ if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP"))
+ goto out_close;
prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
map_fd = bpf_map__fd(skel->maps.cpu_map);
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
- if (CHECK_FAIL(err))
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = prog_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
- CHECK(err, "Add program to cpumap entry", "err %d errno %d\n",
- err, errno);
+ ASSERT_OK(err, "Add program to cpumap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
- CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno);
- CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry",
- "expected %u read %u\n", info.id, val.bpf_prog.id);
+ ASSERT_OK(err, "Read cpumap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");
+
+ /* send a packet to trigger any potential bugs in there */
+ char data[ETH_HLEN] = {};
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(prog_redir_fd, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed, then check that redirect has been
+ * performed
+ */
+ kern_sync_rcu();
+ ASSERT_NEQ(skel->bss->redirect_count, 0, "redirected packets");
+
+ err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
/* can not attach BPF_XDP_CPUMAP program to a device */
- err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
- CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program",
- "should have failed\n");
+ err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
+ if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program"))
+ bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
val.qsize = 192;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
- CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry",
- "should have failed\n");
+ ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry");
+
+ /* Try to attach BPF_XDP program with frags to cpumap when we have
+ * already loaded a BPF_XDP program on the map
+ */
+ idx = 1;
+ val.qsize = 192;
+ val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags);
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry");
out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
test_xdp_with_cpumap_helpers__destroy(skel);
}
+static void test_xdp_with_cpumap_frags_helpers(void)
+{
+ struct test_xdp_with_cpumap_frags_helpers *skel;
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ struct bpf_cpumap_val val = {
+ .qsize = 192,
+ };
+ int err, frags_prog_fd, map_fd;
+ __u32 idx = 0;
+
+ skel = test_xdp_with_cpumap_frags_helpers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))
+ return;
+
+ frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags);
+ map_fd = bpf_map__fd(skel->maps.cpu_map);
+ err = bpf_prog_get_info_by_fd(frags_prog_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto out_close;
+
+ val.bpf_prog.fd = frags_prog_fd;
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_OK(err, "Add program to cpumap entry");
+
+ err = bpf_map_lookup_elem(map_fd, &idx, &val);
+ ASSERT_OK(err, "Read cpumap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id,
+ "Match program id to cpumap entry prog_id");
+
+ /* Try to attach BPF_XDP program to cpumap when we have
+ * already loaded a BPF_XDP program with frags on the map
+ */
+ idx = 1;
+ val.qsize = 192;
+ val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry");
+
+out_close:
+ test_xdp_with_cpumap_frags_helpers__destroy(skel);
+}
+
void test_xdp_cpumap_attach(void)
{
- if (test__start_subtest("cpumap_with_progs"))
+ if (test__start_subtest("CPUMAP with programs in entries"))
test_xdp_with_cpumap_helpers();
+
+ if (test__start_subtest("CPUMAP with frags programs in entries"))
+ test_xdp_with_cpumap_frags_helpers();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c
new file mode 100644
index 000000000000..7dd18c6d06c6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/if.h>
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#define LOCAL_NETNS "xdp_dev_bound_only_netns"
+
+static int load_dummy_prog(char *name, __u32 ifindex, __u32 flags)
+{
+ struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.prog_flags = flags;
+ opts.prog_ifindex = ifindex;
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, name, "GPL", insns, ARRAY_SIZE(insns), &opts);
+}
+
+/* A test case for bpf_offload_netdev->offload handling bug:
+ * - create a veth device (does not support offload);
+ * - create a device bound XDP program with BPF_F_XDP_DEV_BOUND_ONLY flag
+ * (such programs are not offloaded);
+ * - create a device bound XDP program without flags (such programs are offloaded).
+ * This might lead to 'BUG: kernel NULL pointer dereference'.
+ */
+void test_xdp_dev_bound_only_offdev(void)
+{
+ struct nstoken *tok = NULL;
+ __u32 ifindex;
+ int fd1 = -1;
+ int fd2 = -1;
+
+ SYS(out, "ip netns add " LOCAL_NETNS);
+ tok = open_netns(LOCAL_NETNS);
+ if (!ASSERT_OK_PTR(tok, "open_netns"))
+ goto out;
+ SYS(out, "ip link add eth42 type veth");
+ ifindex = if_nametoindex("eth42");
+ if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) {
+ perror("if_nametoindex");
+ goto out;
+ }
+ fd1 = load_dummy_prog("dummy1", ifindex, BPF_F_XDP_DEV_BOUND_ONLY);
+ if (!ASSERT_GE(fd1, 0, "load_dummy_prog #1")) {
+ perror("load_dummy_prog #1");
+ goto out;
+ }
+ /* Program with ifindex is considered offloaded, however veth
+ * does not support offload => error should be reported.
+ */
+ fd2 = load_dummy_prog("dummy2", ifindex, 0);
+ ASSERT_EQ(fd2, -EINVAL, "load_dummy_prog #2 (offloaded)");
+
+out:
+ close(fd1);
+ close(fd2);
+ close_netns(tok);
+ /* eth42 was added inside netns, removing the netns will
+ * also remove eth42 veth pair.
+ */
+ SYS_NOFAIL("ip netns del " LOCAL_NETNS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
index 88ef3ec8ac4c..a8ab05216c38 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
@@ -1,89 +1,275 @@
// SPDX-License-Identifier: GPL-2.0
+#include <arpa/inet.h>
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
+#include <network_helpers.h>
+#include <net/if.h>
#include <test_progs.h>
#include "test_xdp_devmap_helpers.skel.h"
+#include "test_xdp_devmap_tailcall.skel.h"
+#include "test_xdp_with_devmap_frags_helpers.skel.h"
#include "test_xdp_with_devmap_helpers.skel.h"
#define IFINDEX_LO 1
+#define TEST_NS "devmap_attach_ns"
-void test_xdp_with_devmap_helpers(void)
+static void test_xdp_with_devmap_helpers(void)
{
- struct test_xdp_with_devmap_helpers *skel;
+ struct test_xdp_with_devmap_helpers *skel = NULL;
struct bpf_prog_info info = {};
struct bpf_devmap_val val = {
.ifindex = IFINDEX_LO,
};
__u32 len = sizeof(info);
- __u32 duration = 0, idx = 0;
- int err, dm_fd, map_fd;
+ int err, dm_fd, dm_fd_redir, map_fd;
+ struct nstoken *nstoken = NULL;
+ char data[ETH_HLEN] = {};
+ __u32 idx = 0;
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+ SYS(out_close, "ip link set dev lo up");
skel = test_xdp_with_devmap_helpers__open_and_load();
- if (CHECK_FAIL(!skel)) {
- perror("test_xdp_with_devmap_helpers__open_and_load");
- return;
- }
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
+ goto out_close;
- /* can not attach program with DEVMAPs that allow programs
- * as xdp generic
- */
- dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
- err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE);
- CHECK(err == 0, "Generic attach of program with 8-byte devmap",
- "should have failed\n");
+ dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(IFINDEX_LO, dm_fd_redir, XDP_FLAGS_SKB_MODE, NULL);
+ if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))
+ goto out_close;
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
map_fd = bpf_map__fd(skel->maps.dm_ports);
- err = bpf_obj_get_info_by_fd(dm_fd, &info, &len);
- if (CHECK_FAIL(err))
+ err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto out_close;
val.bpf_prog.fd = dm_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
- CHECK(err, "Add program to devmap entry",
- "err %d errno %d\n", err, errno);
+ ASSERT_OK(err, "Add program to devmap entry");
err = bpf_map_lookup_elem(map_fd, &idx, &val);
- CHECK(err, "Read devmap entry", "err %d errno %d\n", err, errno);
- CHECK(info.id != val.bpf_prog.id, "Expected program id in devmap entry",
- "expected %u read %u\n", info.id, val.bpf_prog.id);
+ ASSERT_OK(err, "Read devmap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
+
+ /* send a packet to trigger any potential bugs in there */
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
/* can not attach BPF_XDP_DEVMAP program to a device */
- err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE);
- CHECK(err == 0, "Attach of BPF_XDP_DEVMAP program",
- "should have failed\n");
+ err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
+ if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program"))
+ bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
val.ifindex = 1;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
- CHECK(err == 0, "Add non-BPF_XDP_DEVMAP program to devmap entry",
- "should have failed\n");
+ ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry");
+
+ /* Try to attach BPF_XDP program with frags to devmap when we have
+ * already loaded a BPF_XDP program on the map
+ */
+ idx = 1;
+ val.ifindex = 1;
+ val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags);
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry");
out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
test_xdp_with_devmap_helpers__destroy(skel);
}
-void test_neg_xdp_devmap_helpers(void)
+static void test_neg_xdp_devmap_helpers(void)
{
struct test_xdp_devmap_helpers *skel;
- __u32 duration = 0;
skel = test_xdp_devmap_helpers__open_and_load();
- if (CHECK(skel,
- "Load of XDP program accessing egress ifindex without attach type",
- "should have failed\n")) {
+ if (!ASSERT_EQ(skel, NULL,
+ "Load of XDP program accessing egress ifindex without attach type")) {
test_xdp_devmap_helpers__destroy(skel);
}
}
+static void test_xdp_devmap_tailcall(enum bpf_attach_type prog_dev,
+ enum bpf_attach_type prog_tail,
+ bool expect_reject)
+{
+ struct test_xdp_devmap_tailcall *skel;
+ int err;
+
+ skel = test_xdp_devmap_tailcall__open();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_devmap_tailcall__open"))
+ return;
+
+ bpf_program__set_expected_attach_type(skel->progs.xdp_devmap, prog_dev);
+ bpf_program__set_expected_attach_type(skel->progs.xdp_entry, prog_tail);
+
+ err = test_xdp_devmap_tailcall__load(skel);
+ if (expect_reject)
+ ASSERT_ERR(err, "test_xdp_devmap_tailcall__load");
+ else
+ ASSERT_OK(err, "test_xdp_devmap_tailcall__load");
+
+ test_xdp_devmap_tailcall__destroy(skel);
+}
-void test_xdp_devmap_attach(void)
+static void test_xdp_with_devmap_frags_helpers(void)
+{
+ struct test_xdp_with_devmap_frags_helpers *skel;
+ struct bpf_prog_info info = {};
+ struct bpf_devmap_val val = {
+ .ifindex = IFINDEX_LO,
+ };
+ __u32 len = sizeof(info);
+ int err, dm_fd_frags, map_fd;
+ __u32 idx = 0;
+
+ skel = test_xdp_with_devmap_frags_helpers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
+ return;
+
+ dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags);
+ map_fd = bpf_map__fd(skel->maps.dm_ports);
+ err = bpf_prog_get_info_by_fd(dm_fd_frags, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto out_close;
+
+ val.bpf_prog.fd = dm_fd_frags;
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_OK(err, "Add frags program to devmap entry");
+
+ err = bpf_map_lookup_elem(map_fd, &idx, &val);
+ ASSERT_OK(err, "Read devmap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id,
+ "Match program id to devmap entry prog_id");
+
+ /* Try to attach BPF_XDP program to devmap when we have
+ * already loaded a BPF_XDP program with frags on the map
+ */
+ idx = 1;
+ val.ifindex = 1;
+ val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry");
+
+out_close:
+ test_xdp_with_devmap_frags_helpers__destroy(skel);
+}
+
+static void test_xdp_with_devmap_helpers_veth(void)
+{
+ struct test_xdp_with_devmap_helpers *skel = NULL;
+ struct bpf_prog_info info = {};
+ struct bpf_devmap_val val = {};
+ struct nstoken *nstoken = NULL;
+ __u32 len = sizeof(info);
+ int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst;
+ char data[ETH_HLEN] = {};
+ __u32 idx = 0;
+
+ SYS(out_close, "ip netns add %s", TEST_NS);
+ nstoken = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto out_close;
+
+ SYS(out_close, "ip link add veth_src type veth peer name veth_dst");
+ SYS(out_close, "ip link set dev veth_src up");
+ SYS(out_close, "ip link set dev veth_dst up");
+
+ val.ifindex = if_nametoindex("veth_src");
+ ifindex_dst = if_nametoindex("veth_dst");
+ if (!ASSERT_NEQ(val.ifindex, 0, "val.ifindex") ||
+ !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
+ goto out_close;
+
+ skel = test_xdp_with_devmap_helpers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
+ goto out_close;
+
+ dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
+ err = bpf_xdp_attach(val.ifindex, dm_fd_redir, XDP_FLAGS_DRV_MODE, NULL);
+ if (!ASSERT_OK(err, "Attach of program with 8-byte devmap"))
+ goto out_close;
+
+ dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
+ map_fd = bpf_map__fd(skel->maps.dm_ports);
+ err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto out_close;
+
+ val.bpf_prog.fd = dm_fd;
+ err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+ ASSERT_OK(err, "Add program to devmap entry");
+
+ err = bpf_map_lookup_elem(map_fd, &idx, &val);
+ ASSERT_OK(err, "Read devmap entry");
+ ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
+
+ /* attach dummy to other side to enable reception */
+ dm_fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
+ err = bpf_xdp_attach(ifindex_dst, dm_fd, XDP_FLAGS_DRV_MODE, NULL);
+ if (!ASSERT_OK(err, "Attach of dummy XDP"))
+ goto out_close;
+
+ /* send a packet to trigger any potential bugs in there */
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+ err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
+ ASSERT_OK(err, "XDP test run");
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ err = bpf_xdp_detach(val.ifindex, XDP_FLAGS_DRV_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
+ err = bpf_xdp_detach(ifindex_dst, XDP_FLAGS_DRV_MODE, NULL);
+ ASSERT_OK(err, "XDP program detach");
+
+out_close:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+ test_xdp_with_devmap_helpers__destroy(skel);
+}
+
+void serial_test_xdp_devmap_attach(void)
{
if (test__start_subtest("DEVMAP with programs in entries"))
test_xdp_with_devmap_helpers();
- if (test__start_subtest("Verifier check of DEVMAP programs"))
+ if (test__start_subtest("DEVMAP with frags programs in entries"))
+ test_xdp_with_devmap_frags_helpers();
+
+ if (test__start_subtest("Verifier check of DEVMAP programs")) {
test_neg_xdp_devmap_helpers();
+ test_xdp_devmap_tailcall(BPF_XDP_DEVMAP, BPF_XDP_DEVMAP, false);
+ test_xdp_devmap_tailcall(0, 0, true);
+ test_xdp_devmap_tailcall(BPF_XDP_DEVMAP, 0, true);
+ test_xdp_devmap_tailcall(0, BPF_XDP_DEVMAP, true);
+ }
+
+ if (test__start_subtest("DEVMAP with programs in entries on veth"))
+ test_xdp_with_devmap_helpers_veth();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
new file mode 100644
index 000000000000..dd34b0cc4b4e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <net/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_link.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <netinet/udp.h>
+#include <bpf/bpf_endian.h>
+#include <uapi/linux/netdev.h>
+#include "test_xdp_do_redirect.skel.h"
+#include "xdp_dummy.skel.h"
+
+struct udp_packet {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct udphdr udp;
+ __u8 payload[64 - sizeof(struct udphdr)
+ - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)];
+} __packed;
+
+static struct udp_packet pkt_udp = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55},
+ .eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb},
+ .iph.version = 6,
+ .iph.nexthdr = IPPROTO_UDP,
+ .iph.payload_len = bpf_htons(sizeof(struct udp_packet)
+ - offsetof(struct udp_packet, udp)),
+ .iph.hop_limit = 2,
+ .iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)},
+ .iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)},
+ .udp.source = bpf_htons(1),
+ .udp.dest = bpf_htons(1),
+ .udp.len = bpf_htons(sizeof(struct udp_packet)
+ - offsetof(struct udp_packet, udp)),
+ .payload = {0x42}, /* receiver XDP program matches on this */
+};
+
+static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
+ int ret;
+
+ ret = bpf_tc_hook_create(hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ ret = bpf_tc_attach(hook, &opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
+ * SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - XDP_PACKET_HEADROOM =
+ * 3408 bytes for 64-byte cacheline and 3216 for 256-byte one.
+ */
+#if defined(__s390x__)
+#define MAX_PKT_SIZE 3216
+#else
+#define MAX_PKT_SIZE 3408
+#endif
+
+#define PAGE_SIZE_4K 4096
+#define PAGE_SIZE_64K 65536
+
+static void test_max_pkt_size(int fd)
+{
+ char data[PAGE_SIZE_64K + 1] = {};
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = 1,
+ );
+
+ if (getpagesize() == PAGE_SIZE_64K)
+ opts.data_size_in = MAX_PKT_SIZE + PAGE_SIZE_64K - PAGE_SIZE_4K;
+ else
+ opts.data_size_in = MAX_PKT_SIZE;
+
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_OK(err, "prog_run_max_size");
+
+ opts.data_size_in += 1;
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_EQ(err, -EINVAL, "prog_run_too_big");
+}
+
+#define NUM_PKTS 10000
+void test_xdp_do_redirect(void)
+{
+ int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
+ char data[sizeof(pkt_udp) + sizeof(__u64)];
+ struct test_xdp_do_redirect *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ struct bpf_link *link;
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
+ struct xdp_md ctx_in = { .data = sizeof(__u64),
+ .data_end = sizeof(data) };
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data,
+ .data_size_in = sizeof(data),
+ .ctx_in = &ctx_in,
+ .ctx_size_in = sizeof(ctx_in),
+ .flags = BPF_F_TEST_XDP_LIVE_FRAMES,
+ .repeat = NUM_PKTS,
+ .batch_size = 64,
+ );
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .attach_point = BPF_TC_INGRESS);
+
+ memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp));
+ ((__u32 *)data)[0] = 0x42; /* metadata test value */
+ ((__u32 *)data)[1] = 0;
+
+ skel = test_xdp_do_redirect__open();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ return;
+
+ /* The XDP program we run with bpf_prog_run() will cycle through all
+ * three xmit (PASS/TX/REDIRECT) return codes starting from above, and
+ * ending up with PASS, so we should end up with two packets on the dst
+ * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP
+ * payload.
+ */
+ SYS(out, "ip netns add testns");
+ nstoken = open_netns("testns");
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ goto out;
+
+ SYS(out, "ip link add veth_src type veth peer name veth_dst");
+ SYS(out, "ip link set dev veth_src address 00:11:22:33:44:55");
+ SYS(out, "ip link set dev veth_dst address 66:77:88:99:aa:bb");
+ SYS(out, "ip link set dev veth_src up");
+ SYS(out, "ip link set dev veth_dst up");
+ SYS(out, "ip addr add dev veth_src fc00::1/64");
+ SYS(out, "ip addr add dev veth_dst fc00::2/64");
+ SYS(out, "ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb");
+
+ /* We enable forwarding in the test namespace because that will cause
+ * the packets that go through the kernel stack (with XDP_PASS) to be
+ * forwarded back out the same interface (because of the packet dst
+ * combined with the interface addresses). When this happens, the
+ * regular forwarding path will end up going through the same
+ * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a
+ * deadlock if it happens on the same CPU. There's a local_bh_disable()
+ * in the test_run code to prevent this, but an earlier version of the
+ * code didn't have this, so we keep the test behaviour to make sure the
+ * bug doesn't resurface.
+ */
+ SYS(out, "sysctl -qw net.ipv6.conf.all.forwarding=1");
+
+ ifindex_src = if_nametoindex("veth_src");
+ ifindex_dst = if_nametoindex("veth_dst");
+ if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") ||
+ !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
+ goto out;
+
+ /* Check xdp features supported by veth driver */
+ err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_src bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "veth_src query_opts.feature_flags"))
+ goto out;
+
+ err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_dst bpf_xdp_query"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG,
+ "veth_dst query_opts.feature_flags"))
+ goto out;
+
+ /* Enable GRO */
+ SYS(out, "ethtool -K veth_src gro on");
+ SYS(out, "ethtool -K veth_dst gro on");
+
+ err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_src query_opts.feature_flags gro on"))
+ goto out;
+
+ err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (!ASSERT_OK(err, "veth_dst bpf_xdp_query gro on"))
+ goto out;
+
+ if (!ASSERT_EQ(query_opts.feature_flags,
+ NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG,
+ "veth_dst query_opts.feature_flags gro on"))
+ goto out;
+
+ memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
+ skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
+ skel->rodata->ifindex_in = ifindex_src;
+ ctx_in.ingress_ifindex = ifindex_src;
+ tc_hook.ifindex = ifindex_src;
+
+ if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto out;
+ skel->links.xdp_count_pkts = link;
+
+ tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts);
+ if (attach_tc_prog(&tc_hook, tc_prog_fd))
+ goto out;
+
+ xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect);
+ err = bpf_prog_test_run_opts(xdp_prog_fd, &opts);
+ if (!ASSERT_OK(err, "prog_run"))
+ goto out_tc;
+
+ /* wait for the packets to be flushed */
+ kern_sync_rcu();
+
+ /* There will be one packet sent through XDP_REDIRECT and one through
+ * XDP_TX; these will show up on the XDP counting program, while the
+ * rest will be counted at the TC ingress hook (and the counting program
+ * resets the packet payload so they don't get counted twice even though
+ * they are re-xmited out the veth device
+ */
+ ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp");
+ ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero");
+ ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc");
+
+ test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts));
+
+out_tc:
+ bpf_tc_hook_destroy(&tc_hook);
+out:
+ if (nstoken)
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del testns");
+ test_xdp_do_redirect__destroy(skel);
+}
+
+#define NS_NB 3
+#define NS0 "NS0"
+#define NS1 "NS1"
+#define NS2 "NS2"
+#define IPV4_NETWORK "10.1.1"
+#define VETH1_INDEX 111
+#define VETH2_INDEX 222
+
+struct test_data {
+ struct netns_obj *ns[NS_NB];
+ u32 xdp_flags;
+};
+
+static void cleanup(struct test_data *data)
+{
+ int i;
+
+ for (i = 0; i < NS_NB; i++)
+ netns_free(data->ns[i]);
+}
+
+/**
+ * ping_setup -
+ * Create two veth peers and forward packets in-between using XDP
+ *
+ * ------------ ------------
+ * | NS1 | | NS2 |
+ * | veth0 | | veth0 |
+ * | 10.1.1.1 | | 10.1.1.2 |
+ * -----|------ ------|-----
+ * | |
+ * | |
+ * -----|-----------------------|-------
+ * | veth1 veth2 |
+ * | (id:111) (id:222) |
+ * | | | |
+ * | ----- xdp forwarding ----- |
+ * | |
+ * | NS0 |
+ * -------------------------------------
+ */
+static int ping_setup(struct test_data *data)
+{
+ int i;
+
+ data->ns[0] = netns_new(NS0, false);
+ if (!ASSERT_OK_PTR(data->ns[0], "create ns"))
+ return -1;
+
+ for (i = 1; i < NS_NB; i++) {
+ char ns_name[4] = {};
+
+ snprintf(ns_name, 4, "NS%d", i);
+ data->ns[i] = netns_new(ns_name, false);
+ if (!ASSERT_OK_PTR(data->ns[i], "create ns"))
+ goto fail;
+
+ SYS(fail,
+ "ip -n %s link add veth%d index %d%d%d type veth peer name veth0 netns %s",
+ NS0, i, i, i, i, ns_name);
+ SYS(fail, "ip -n %s link set veth%d up", NS0, i);
+
+ SYS(fail, "ip -n %s addr add %s.%d/24 dev veth0", ns_name, IPV4_NETWORK, i);
+ SYS(fail, "ip -n %s link set veth0 up", ns_name);
+ }
+
+ return 0;
+
+fail:
+ cleanup(data);
+ return -1;
+}
+
+static void ping_test(struct test_data *data)
+{
+ struct test_xdp_do_redirect *skel = NULL;
+ struct xdp_dummy *skel_dummy = NULL;
+ struct nstoken *nstoken = NULL;
+ int i, ret;
+
+ skel_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(skel_dummy, "open and load xdp_dummy skeleton"))
+ goto close;
+
+ for (i = 1; i < NS_NB; i++) {
+ char ns_name[4] = {};
+
+ snprintf(ns_name, 4, "NS%d", i);
+ nstoken = open_netns(ns_name);
+ if (!ASSERT_OK_PTR(nstoken, "open ns"))
+ goto close;
+
+ ret = bpf_xdp_attach(if_nametoindex("veth0"),
+ bpf_program__fd(skel_dummy->progs.xdp_dummy_prog),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach dummy_prog"))
+ goto close;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+ }
+
+ skel = test_xdp_do_redirect__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
+ goto close;
+
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "open NS0"))
+ goto close;
+
+ ret = bpf_xdp_attach(VETH2_INDEX,
+ bpf_program__fd(skel->progs.xdp_redirect_to_111),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ ret = bpf_xdp_attach(VETH1_INDEX,
+ bpf_program__fd(skel->progs.xdp_redirect_to_222),
+ data->xdp_flags, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto close;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+
+ nstoken = open_netns(NS1);
+ if (!ASSERT_OK_PTR(nstoken, "open NS1"))
+ goto close;
+
+ SYS(close, "ping -c 1 %s.2 > /dev/null", IPV4_NETWORK);
+
+close:
+ close_netns(nstoken);
+ xdp_dummy__destroy(skel_dummy);
+ test_xdp_do_redirect__destroy(skel);
+}
+
+
+static void xdp_redirect_ping(u32 xdp_flags)
+{
+ struct test_data data = {};
+
+ if (ping_setup(&data) < 0)
+ return;
+
+ data.xdp_flags = xdp_flags;
+ ping_test(&data);
+ cleanup(&data);
+}
+
+void test_xdp_index_redirect(void)
+{
+ if (test__start_subtest("noflag"))
+ xdp_redirect_ping(0);
+
+ if (test__start_subtest("drvflag"))
+ xdp_redirect_ping(XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("skbflag"))
+ xdp_redirect_ping(XDP_FLAGS_SKB_MODE);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
new file mode 100644
index 000000000000..3f9146d83d79
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <bpf/btf.h>
+#include <linux/if_link.h>
+#include <netinet/udp.h>
+#include <net/if.h>
+#include <unistd.h>
+
+#include "xdp_flowtable.skel.h"
+
+#define TX_NETNS_NAME "ns0"
+#define RX_NETNS_NAME "ns1"
+
+#define TX_NAME "v0"
+#define FORWARD_NAME "v1"
+#define RX_NAME "d0"
+
+#define TX_MAC "00:00:00:00:00:01"
+#define FORWARD_MAC "00:00:00:00:00:02"
+#define RX_MAC "00:00:00:00:00:03"
+#define DST_MAC "00:00:00:00:00:04"
+
+#define TX_ADDR "10.0.0.1"
+#define FORWARD_ADDR "10.0.0.2"
+#define RX_ADDR "20.0.0.1"
+#define DST_ADDR "20.0.0.2"
+
+#define PREFIX_LEN "8"
+#define N_PACKETS 10
+#define UDP_PORT 12345
+#define UDP_PORT_STR "12345"
+
+static int send_udp_traffic(void)
+{
+ struct sockaddr_storage addr;
+ int i, sock;
+
+ if (make_sockaddr(AF_INET, DST_ADDR, UDP_PORT, &addr, NULL))
+ return -EINVAL;
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0)
+ return sock;
+
+ for (i = 0; i < N_PACKETS; i++) {
+ unsigned char buf[] = { 0xaa, 0xbb, 0xcc };
+ int n;
+
+ n = sendto(sock, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM,
+ (struct sockaddr *)&addr, sizeof(addr));
+ if (n != sizeof(buf)) {
+ close(sock);
+ return -EINVAL;
+ }
+
+ usleep(50000); /* 50ms */
+ }
+ close(sock);
+
+ return 0;
+}
+
+void test_xdp_flowtable(void)
+{
+ struct xdp_flowtable *skel = NULL;
+ struct nstoken *tok = NULL;
+ int iifindex, stats_fd;
+ __u32 value, key = 0;
+ struct bpf_link *link;
+
+ if (SYS_NOFAIL("nft -v")) {
+ fprintf(stdout, "Missing required nft tool\n");
+ test__skip();
+ return;
+ }
+
+ SYS(out, "ip netns add " TX_NETNS_NAME);
+ SYS(out, "ip netns add " RX_NETNS_NAME);
+
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "sysctl -qw net.ipv4.conf.all.forwarding=1");
+
+ SYS(out, "ip link add " TX_NAME " type veth peer " FORWARD_NAME);
+ SYS(out, "ip link set " TX_NAME " netns " TX_NETNS_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " address " FORWARD_MAC);
+ SYS(out,
+ "ip addr add " FORWARD_ADDR "/" PREFIX_LEN " dev " FORWARD_NAME);
+ SYS(out, "ip link set dev " FORWARD_NAME " up");
+
+ SYS(out, "ip link add " RX_NAME " type dummy");
+ SYS(out, "ip link set dev " RX_NAME " address " RX_MAC);
+ SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
+ SYS(out, "ip link set dev " RX_NAME " up");
+
+ /* configure the flowtable */
+ SYS(out, "nft add table ip filter");
+ SYS(out,
+ "nft add flowtable ip filter f { hook ingress priority 0\\; "
+ "devices = { " FORWARD_NAME ", " RX_NAME " }\\; }");
+ SYS(out,
+ "nft add chain ip filter forward "
+ "{ type filter hook forward priority 0\\; }");
+ SYS(out,
+ "nft add rule ip filter forward ip protocol udp th dport "
+ UDP_PORT_STR " flow add @f");
+
+ /* Avoid ARP calls */
+ SYS(out,
+ "ip -4 neigh add " DST_ADDR " lladdr " DST_MAC " dev " RX_NAME);
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME);
+ SYS(out, "ip link set dev " TX_NAME " address " TX_MAC);
+ SYS(out, "ip link set dev " TX_NAME " up");
+ SYS(out, "ip route add default via " FORWARD_ADDR);
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ iifindex = if_nametoindex(FORWARD_NAME);
+ if (!ASSERT_NEQ(iifindex, 0, "iifindex"))
+ goto out;
+
+ skel = xdp_flowtable__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto out;
+
+ link = bpf_program__attach_xdp(skel->progs.xdp_flowtable_do_lookup,
+ iifindex);
+ if (!ASSERT_OK_PTR(link, "prog_attach"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ if (!ASSERT_OK(send_udp_traffic(), "send udp"))
+ goto out;
+
+ close_netns(tok);
+ tok = open_netns(RX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+
+ stats_fd = bpf_map__fd(skel->maps.stats);
+ if (!ASSERT_OK(bpf_map_lookup_elem(stats_fd, &key, &value),
+ "bpf_map_update_elem stats"))
+ goto out;
+
+ ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed");
+out:
+ xdp_flowtable__destroy(skel);
+ if (tok)
+ close_netns(tok);
+ SYS_NOFAIL("ip netns del " TX_NETNS_NAME);
+ SYS_NOFAIL("ip netns del " RX_NETNS_NAME);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
index d2d7a283d72f..1dbddcab87a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
@@ -4,23 +4,24 @@
#define IFINDEX_LO 1
-void test_xdp_info(void)
+void serial_test_xdp_info(void)
{
__u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id;
- const char *file = "./xdp_dummy.o";
+ const char *file = "./xdp_dummy.bpf.o";
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
struct bpf_prog_info info = {};
struct bpf_object *obj;
int err, prog_fd;
/* Get prog_id for XDP_ATTACHED_NONE mode */
- err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp_none", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id))
return;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
+ err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno))
return;
if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n",
@@ -29,40 +30,47 @@ void test_xdp_info(void)
/* Setup prog */
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (CHECK(err, "get_prog_info", "errno=%d\n", errno))
goto out_close;
- err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
+ err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL);
if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno))
goto out_close;
/* Get prog_id for single prog mode */
- err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id);
if (CHECK(err, "get_xdp", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n"))
goto out;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE);
+ err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id);
if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n"))
goto out;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE);
+ err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id);
if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno))
goto out;
if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id))
goto out;
+ /* Check xdp features supported by lo device */
+ opts.feature_flags = ~0;
+ err = bpf_xdp_query(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &opts);
+ if (!ASSERT_OK(err, "bpf_xdp_query"))
+ goto out;
+
+ ASSERT_EQ(opts.feature_flags, 0, "opts.feature_flags");
out:
- bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
+ bpf_xdp_detach(IFINDEX_LO, 0, NULL);
out_close:
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
index 6f814999b395..e7e9f3c22edf 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c
@@ -6,96 +6,96 @@
#define IFINDEX_LO 1
-void test_xdp_link(void)
+void serial_test_xdp_link(void)
{
- __u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err;
- DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1);
struct test_xdp_link *skel1 = NULL, *skel2 = NULL;
+ __u32 id1, id2, id0 = 0, prog_fd1, prog_fd2;
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
struct bpf_link_info link_info;
struct bpf_prog_info prog_info;
struct bpf_link *link;
+ int err;
__u32 link_info_len = sizeof(link_info);
__u32 prog_info_len = sizeof(prog_info);
skel1 = test_xdp_link__open_and_load();
- if (CHECK(!skel1, "skel_load", "skeleton open and load failed\n"))
+ if (!ASSERT_OK_PTR(skel1, "skel_load"))
goto cleanup;
prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler);
skel2 = test_xdp_link__open_and_load();
- if (CHECK(!skel2, "skel_load", "skeleton open and load failed\n"))
+ if (!ASSERT_OK_PTR(skel2, "skel_load"))
goto cleanup;
prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler);
memset(&prog_info, 0, sizeof(prog_info));
- err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
- if (CHECK(err, "fd_info1", "failed %d\n", -errno))
+ err = bpf_prog_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
+ if (!ASSERT_OK(err, "fd_info1"))
goto cleanup;
id1 = prog_info.id;
memset(&prog_info, 0, sizeof(prog_info));
- err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
- if (CHECK(err, "fd_info2", "failed %d\n", -errno))
+ err = bpf_prog_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
+ if (!ASSERT_OK(err, "fd_info2"))
goto cleanup;
id2 = prog_info.id;
/* set initial prog attachment */
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);
- if (CHECK(err, "fd_attach", "initial prog attach failed: %d\n", err))
+ err = bpf_xdp_attach(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);
+ if (!ASSERT_OK(err, "fd_attach"))
goto cleanup;
/* validate prog ID */
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
- CHECK(err || id0 != id1, "id1_check",
- "loaded prog id %u != id1 %u, err %d", id0, id1, err);
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
+ if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
+ goto cleanup;
/* BPF link is not allowed to replace prog attachment */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
/* best-effort detach prog */
- opts.old_fd = prog_fd1;
- bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts);
+ opts.old_prog_fd = prog_fd1;
+ bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);
goto cleanup;
}
/* detach BPF program */
- opts.old_fd = prog_fd1;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts);
- if (CHECK(err, "prog_detach", "failed %d\n", err))
+ opts.old_prog_fd = prog_fd1;
+ err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts);
+ if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
/* now BPF link should attach successfully */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel1->links.xdp_handler = link;
/* validate prog ID */
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
- if (CHECK(err || id0 != id1, "id1_check",
- "loaded prog id %u != id1 %u, err %d", id0, id1, err))
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
+ if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
goto cleanup;
/* BPF prog attach is not allowed to replace BPF link */
- opts.old_fd = prog_fd1;
- err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);
- if (CHECK(!err, "prog_attach_fail", "unexpected success\n"))
+ opts.old_prog_fd = prog_fd1;
+ err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);
+ if (!ASSERT_ERR(err, "prog_attach_fail"))
goto cleanup;
/* Can't force-update when BPF link is active */
- err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0);
- if (CHECK(!err, "prog_update_fail", "unexpected success\n"))
+ err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, 0, NULL);
+ if (!ASSERT_ERR(err, "prog_update_fail"))
goto cleanup;
/* Can't force-detach when BPF link is active */
- err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
- if (CHECK(!err, "prog_detach_fail", "unexpected success\n"))
+ err = bpf_xdp_detach(IFINDEX_LO, 0, NULL);
+ if (!ASSERT_ERR(err, "prog_detach_fail"))
goto cleanup;
/* BPF link is not allowed to replace another BPF link */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
+ if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) {
bpf_link__destroy(link);
goto cleanup;
}
@@ -105,45 +105,46 @@ void test_xdp_link(void)
/* new link attach should succeed */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
- if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
+ if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel2->links.xdp_handler = link;
- err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
- if (CHECK(err || id0 != id2, "id2_check",
- "loaded prog id %u != id2 %u, err %d", id0, id1, err))
+ err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0);
+ if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val"))
goto cleanup;
/* updating program under active BPF link works as expected */
err = bpf_link__update_program(link, skel1->progs.xdp_handler);
- if (CHECK(err, "link_upd", "failed: %d\n", err))
+ if (!ASSERT_OK(err, "link_upd"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
- if (CHECK(err, "link_info", "failed: %d\n", err))
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info, &link_info_len);
+ if (!ASSERT_OK(err, "link_info"))
goto cleanup;
- CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type",
- "got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP);
- CHECK(link_info.prog_id != id1, "link_prog_id",
- "got %u != exp %u\n", link_info.prog_id, id1);
- CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex",
- "got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO);
+ ASSERT_EQ(link_info.type, BPF_LINK_TYPE_XDP, "link_type");
+ ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
+ ASSERT_EQ(link_info.xdp.ifindex, IFINDEX_LO, "link_ifindex");
+
+ /* updating program under active BPF link with different type fails */
+ err = bpf_link__update_program(link, skel1->progs.tc_handler);
+ if (!ASSERT_ERR(err, "link_upd_invalid"))
+ goto cleanup;
err = bpf_link__detach(link);
- if (CHECK(err, "link_detach", "failed %d\n", err))
+ if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
- err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
- if (CHECK(err, "link_info", "failed: %d\n", err))
- goto cleanup;
- CHECK(link_info.prog_id != id1, "link_prog_id",
- "got %u != exp %u\n", link_info.prog_id, id1);
+ err = bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info, &link_info_len);
+
+ ASSERT_OK(err, "link_info");
+ ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
/* ifindex should be zeroed out */
- CHECK(link_info.xdp.ifindex != 0, "link_ifindex",
- "got %u != exp %u\n", link_info.xdp.ifindex, 0);
+ ASSERT_EQ(link_info.xdp.ifindex, 0, "link_ifindex");
cleanup:
test_xdp_link__destroy(skel1);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
new file mode 100644
index 000000000000..19f92affc2da
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "xdp_metadata.skel.h"
+#include "xdp_metadata2.skel.h"
+#include "xdp_metadata.h"
+#include "xsk.h"
+
+#include <bpf/btf.h>
+#include <linux/errqueue.h>
+#include <linux/if_link.h>
+#include <linux/net_tstamp.h>
+#include <netinet/udp.h>
+#include <sys/mman.h>
+#include <net/if.h>
+#include <poll.h>
+
+#define TX_NAME "veTX"
+#define RX_NAME "veRX"
+
+#define UDP_PAYLOAD_BYTES 4
+
+#define UDP_SOURCE_PORT 1234
+#define AF_XDP_CONSUMER_PORT 8080
+
+#define UMEM_NUM 16
+#define UMEM_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
+#define UMEM_SIZE (UMEM_FRAME_SIZE * UMEM_NUM)
+#define XDP_FLAGS XDP_FLAGS_DRV_MODE
+#define QUEUE_ID 0
+
+#define TX_ADDR "10.0.0.1"
+#define RX_ADDR "10.0.0.2"
+#define PREFIX_LEN "8"
+#define FAMILY AF_INET
+#define TX_NETNS_NAME "xdp_metadata_tx"
+#define RX_NETNS_NAME "xdp_metadata_rx"
+#define TX_MAC "00:00:00:00:00:01"
+#define RX_MAC "00:00:00:00:00:02"
+
+#define VLAN_ID 59
+#define VLAN_PROTO "802.1Q"
+#define VLAN_PID htons(ETH_P_8021Q)
+#define TX_NAME_VLAN TX_NAME "." TO_STR(VLAN_ID)
+
+#define XDP_RSS_TYPE_L4 BIT(3)
+#define VLAN_VID_MASK 0xfff
+
+struct xsk {
+ void *umem_area;
+ struct xsk_umem *umem;
+ struct xsk_ring_prod fill;
+ struct xsk_ring_cons comp;
+ struct xsk_ring_prod tx;
+ struct xsk_ring_cons rx;
+ struct xsk_socket *socket;
+};
+
+static int open_xsk(int ifindex, struct xsk *xsk)
+{
+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+ const struct xsk_socket_config socket_config = {
+ .rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .bind_flags = XDP_COPY,
+ };
+ const struct xsk_umem_config umem_config = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG | XDP_UMEM_TX_SW_CSUM |
+ XDP_UMEM_TX_METADATA_LEN,
+ .tx_metadata_len = sizeof(struct xsk_tx_metadata),
+ };
+ __u32 idx;
+ u64 addr;
+ int ret;
+ int i;
+
+ xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (!ASSERT_NEQ(xsk->umem_area, MAP_FAILED, "mmap"))
+ return -1;
+
+ ret = xsk_umem__create(&xsk->umem,
+ xsk->umem_area, UMEM_SIZE,
+ &xsk->fill,
+ &xsk->comp,
+ &umem_config);
+ if (!ASSERT_OK(ret, "xsk_umem__create"))
+ return ret;
+
+ ret = xsk_socket__create(&xsk->socket, ifindex, QUEUE_ID,
+ xsk->umem,
+ &xsk->rx,
+ &xsk->tx,
+ &socket_config);
+ if (!ASSERT_OK(ret, "xsk_socket__create"))
+ return ret;
+
+ /* First half of umem is for TX. This way address matches 1-to-1
+ * to the completion queue index.
+ */
+
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = i * UMEM_FRAME_SIZE;
+ printf("%p: tx_desc[%d] -> %lx\n", xsk, i, addr);
+ }
+
+ /* Second half of umem is for RX. */
+
+ ret = xsk_ring_prod__reserve(&xsk->fill, UMEM_NUM / 2, &idx);
+ if (!ASSERT_EQ(UMEM_NUM / 2, ret, "xsk_ring_prod__reserve"))
+ return ret;
+ if (!ASSERT_EQ(idx, 0, "fill idx != 0"))
+ return -1;
+
+ for (i = 0; i < UMEM_NUM / 2; i++) {
+ addr = (UMEM_NUM / 2 + i) * UMEM_FRAME_SIZE;
+ printf("%p: rx_desc[%d] -> %lx\n", xsk, i, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, i) = addr;
+ }
+ xsk_ring_prod__submit(&xsk->fill, ret);
+
+ return 0;
+}
+
+static void close_xsk(struct xsk *xsk)
+{
+ if (xsk->umem)
+ xsk_umem__delete(xsk->umem);
+ if (xsk->socket)
+ xsk_socket__delete(xsk->socket);
+ munmap(xsk->umem_area, UMEM_SIZE);
+}
+
+static int generate_packet(struct xsk *xsk, __u16 dst_port)
+{
+ struct xsk_tx_metadata *meta;
+ struct xdp_desc *tx_desc;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ void *data;
+ __u32 idx;
+ int ret;
+
+ ret = xsk_ring_prod__reserve(&xsk->tx, 1, &idx);
+ if (!ASSERT_EQ(ret, 1, "xsk_ring_prod__reserve"))
+ return -1;
+
+ tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx);
+ tx_desc->addr = idx % (UMEM_NUM / 2) * UMEM_FRAME_SIZE + sizeof(struct xsk_tx_metadata);
+ printf("%p: tx_desc[%u]->addr=%llx\n", xsk, idx, tx_desc->addr);
+ data = xsk_umem__get_data(xsk->umem_area, tx_desc->addr);
+
+ meta = data - sizeof(struct xsk_tx_metadata);
+ memset(meta, 0, sizeof(*meta));
+ meta->flags = XDP_TXMD_FLAGS_TIMESTAMP;
+
+ eth = data;
+ iph = (void *)(eth + 1);
+ udph = (void *)(iph + 1);
+
+ memcpy(eth->h_dest, "\x00\x00\x00\x00\x00\x02", ETH_ALEN);
+ memcpy(eth->h_source, "\x00\x00\x00\x00\x00\x01", ETH_ALEN);
+ eth->h_proto = htons(ETH_P_IP);
+
+ iph->version = 0x4;
+ iph->ihl = 0x5;
+ iph->tos = 0x9;
+ iph->tot_len = htons(sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES);
+ iph->id = 0;
+ iph->frag_off = 0;
+ iph->ttl = 0;
+ iph->protocol = IPPROTO_UDP;
+ ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)");
+ ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)");
+ iph->check = build_ip_csum(iph);
+
+ udph->source = htons(UDP_SOURCE_PORT);
+ udph->dest = htons(dst_port);
+ udph->len = htons(sizeof(*udph) + UDP_PAYLOAD_BYTES);
+ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ ntohs(udph->len), IPPROTO_UDP, 0);
+
+ memset(udph + 1, 0xAA, UDP_PAYLOAD_BYTES);
+
+ meta->flags |= XDP_TXMD_FLAGS_CHECKSUM;
+ meta->request.csum_start = sizeof(*eth) + sizeof(*iph);
+ meta->request.csum_offset = offsetof(struct udphdr, check);
+
+ tx_desc->len = sizeof(*eth) + sizeof(*iph) + sizeof(*udph) + UDP_PAYLOAD_BYTES;
+ tx_desc->options |= XDP_TX_METADATA;
+ xsk_ring_prod__submit(&xsk->tx, 1);
+
+ ret = sendto(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ if (!ASSERT_GE(ret, 0, "sendto"))
+ return ret;
+
+ return 0;
+}
+
+static int generate_packet_inet(void)
+{
+ char udp_payload[UDP_PAYLOAD_BYTES];
+ struct sockaddr_in rx_addr;
+ int sock_fd, err = 0;
+
+ /* Build a packet */
+ memset(udp_payload, 0xAA, UDP_PAYLOAD_BYTES);
+ rx_addr.sin_addr.s_addr = inet_addr(RX_ADDR);
+ rx_addr.sin_family = AF_INET;
+ rx_addr.sin_port = htons(AF_XDP_CONSUMER_PORT);
+
+ sock_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (!ASSERT_GE(sock_fd, 0, "socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)"))
+ return sock_fd;
+
+ err = sendto(sock_fd, udp_payload, UDP_PAYLOAD_BYTES, MSG_DONTWAIT,
+ (void *)&rx_addr, sizeof(rx_addr));
+ ASSERT_GE(err, 0, "sendto");
+
+ close(sock_fd);
+ return err;
+}
+
+static void complete_tx(struct xsk *xsk)
+{
+ struct xsk_tx_metadata *meta;
+ __u64 addr;
+ void *data;
+ __u32 idx;
+
+ if (ASSERT_EQ(xsk_ring_cons__peek(&xsk->comp, 1, &idx), 1, "xsk_ring_cons__peek")) {
+ addr = *xsk_ring_cons__comp_addr(&xsk->comp, idx);
+
+ printf("%p: complete tx idx=%u addr=%llx\n", xsk, idx, addr);
+
+ data = xsk_umem__get_data(xsk->umem_area, addr);
+ meta = data - sizeof(struct xsk_tx_metadata);
+
+ ASSERT_NEQ(meta->completion.tx_timestamp, 0, "tx_timestamp");
+
+ xsk_ring_cons__release(&xsk->comp, 1);
+ }
+}
+
+static void refill_rx(struct xsk *xsk, __u64 addr)
+{
+ __u32 idx;
+
+ if (ASSERT_EQ(xsk_ring_prod__reserve(&xsk->fill, 1, &idx), 1, "xsk_ring_prod__reserve")) {
+ printf("%p: complete idx=%u addr=%llx\n", xsk, idx, addr);
+ *xsk_ring_prod__fill_addr(&xsk->fill, idx) = addr;
+ xsk_ring_prod__submit(&xsk->fill, 1);
+ }
+}
+
+static int verify_xsk_metadata(struct xsk *xsk, bool sent_from_af_xdp)
+{
+ const struct xdp_desc *rx_desc;
+ struct pollfd fds = {};
+ struct xdp_meta *meta;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ __u64 comp_addr;
+ void *data;
+ __u64 addr;
+ __u32 idx = 0;
+ int ret;
+
+ ret = recvfrom(xsk_socket__fd(xsk->socket), NULL, 0, MSG_DONTWAIT, NULL, NULL);
+ if (!ASSERT_EQ(ret, 0, "recvfrom"))
+ return -1;
+
+ fds.fd = xsk_socket__fd(xsk->socket);
+ fds.events = POLLIN;
+
+ ret = poll(&fds, 1, 1000);
+ if (!ASSERT_GT(ret, 0, "poll"))
+ return -1;
+
+ ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx);
+ if (!ASSERT_EQ(ret, 1, "xsk_ring_cons__peek"))
+ return -2;
+
+ rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx);
+ comp_addr = xsk_umem__extract_addr(rx_desc->addr);
+ addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
+ printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
+ xsk, idx, rx_desc->addr, addr, comp_addr);
+ data = xsk_umem__get_data(xsk->umem_area, addr);
+
+ /* Make sure we got the packet offset correctly. */
+
+ eth = data;
+ ASSERT_EQ(eth->h_proto, htons(ETH_P_IP), "eth->h_proto");
+ iph = (void *)(eth + 1);
+ ASSERT_EQ((int)iph->version, 4, "iph->version");
+ udph = (void *)(iph + 1);
+
+ /* custom metadata */
+
+ meta = data - sizeof(struct xdp_meta);
+
+ if (!ASSERT_NEQ(meta->rx_timestamp, 0, "rx_timestamp"))
+ return -1;
+
+ if (!ASSERT_NEQ(meta->rx_hash, 0, "rx_hash"))
+ return -1;
+
+ if (!sent_from_af_xdp) {
+ if (!ASSERT_NEQ(meta->rx_hash_type & XDP_RSS_TYPE_L4, 0, "rx_hash_type"))
+ return -1;
+
+ if (!ASSERT_EQ(meta->rx_vlan_tci & VLAN_VID_MASK, VLAN_ID, "rx_vlan_tci"))
+ return -1;
+
+ if (!ASSERT_EQ(meta->rx_vlan_proto, VLAN_PID, "rx_vlan_proto"))
+ return -1;
+ goto done;
+ }
+
+ ASSERT_EQ(meta->rx_hash_type, 0, "rx_hash_type");
+
+ /* checksum offload */
+ ASSERT_EQ(udph->check, htons(0x721c), "csum");
+
+done:
+ xsk_ring_cons__release(&xsk->rx, 1);
+ refill_rx(xsk, comp_addr);
+
+ return 0;
+}
+
+static void switch_ns_to_rx(struct nstoken **tok)
+{
+ close_netns(*tok);
+ *tok = open_netns(RX_NETNS_NAME);
+}
+
+static void switch_ns_to_tx(struct nstoken **tok)
+{
+ close_netns(*tok);
+ *tok = open_netns(TX_NETNS_NAME);
+}
+
+void test_xdp_metadata(void)
+{
+ struct xdp_metadata2 *bpf_obj2 = NULL;
+ struct xdp_metadata *bpf_obj = NULL;
+ struct bpf_program *new_prog, *prog;
+ struct bpf_devmap_val devmap_e = {};
+ struct bpf_map *prog_arr, *devmap;
+ struct nstoken *tok = NULL;
+ __u32 queue_id = QUEUE_ID;
+ struct xsk tx_xsk = {};
+ struct xsk rx_xsk = {};
+ __u32 val, key = 0;
+ int retries = 10;
+ int rx_ifindex;
+ int tx_ifindex;
+ int sock_fd;
+ int ret;
+
+ /* Setup new networking namespaces, with a veth pair. */
+ SYS(out, "ip netns add " TX_NETNS_NAME);
+ SYS(out, "ip netns add " RX_NETNS_NAME);
+
+ tok = open_netns(TX_NETNS_NAME);
+ if (!ASSERT_OK_PTR(tok, "setns"))
+ goto out;
+ SYS(out, "ip link add numtxqueues 1 numrxqueues 1 " TX_NAME
+ " type veth peer " RX_NAME " numtxqueues 1 numrxqueues 1");
+ SYS(out, "ip link set " RX_NAME " netns " RX_NETNS_NAME);
+
+ SYS(out, "ip link set dev " TX_NAME " address " TX_MAC);
+ SYS(out, "ip link set dev " TX_NAME " up");
+
+ SYS(out, "ip link add link " TX_NAME " " TX_NAME_VLAN
+ " type vlan proto " VLAN_PROTO " id " TO_STR(VLAN_ID));
+ SYS(out, "ip link set dev " TX_NAME_VLAN " up");
+ SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME_VLAN);
+
+ /* Avoid ARP calls */
+ SYS(out, "ip -4 neigh add " RX_ADDR " lladdr " RX_MAC " dev " TX_NAME_VLAN);
+
+ switch_ns_to_rx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns rx"))
+ goto out;
+
+ SYS(out, "ip link set dev " RX_NAME " address " RX_MAC);
+ SYS(out, "ip link set dev " RX_NAME " up");
+ SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME);
+
+ rx_ifindex = if_nametoindex(RX_NAME);
+
+ /* Setup separate AF_XDP for RX interface. */
+
+ ret = open_xsk(rx_ifindex, &rx_xsk);
+ if (!ASSERT_OK(ret, "open_xsk(RX_NAME)"))
+ goto out;
+
+ bpf_obj = xdp_metadata__open();
+ if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(bpf_obj->obj, "rx");
+ bpf_program__set_ifindex(prog, rx_ifindex);
+ bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY);
+
+ /* Make sure we can load a dev-bound program that performs
+ * XDP_REDIRECT into a devmap.
+ */
+ new_prog = bpf_object__find_program_by_name(bpf_obj->obj, "redirect");
+ bpf_program__set_ifindex(new_prog, rx_ifindex);
+ bpf_program__set_flags(new_prog, BPF_F_XDP_DEV_BOUND_ONLY);
+
+ if (!ASSERT_OK(xdp_metadata__load(bpf_obj), "load skeleton"))
+ goto out;
+
+ /* Make sure we can't add dev-bound programs to prog maps. */
+ prog_arr = bpf_object__find_map_by_name(bpf_obj->obj, "prog_arr");
+ if (!ASSERT_OK_PTR(prog_arr, "no prog_arr map"))
+ goto out;
+
+ val = bpf_program__fd(prog);
+ if (!ASSERT_ERR(bpf_map__update_elem(prog_arr, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY),
+ "update prog_arr"))
+ goto out;
+
+ /* Make sure we can't add dev-bound programs to devmaps. */
+ devmap = bpf_object__find_map_by_name(bpf_obj->obj, "dev_map");
+ if (!ASSERT_OK_PTR(devmap, "no dev_map found"))
+ goto out;
+
+ devmap_e.bpf_prog.fd = val;
+ if (!ASSERT_ERR(bpf_map__update_elem(devmap, &key, sizeof(key),
+ &devmap_e, sizeof(devmap_e),
+ BPF_ANY),
+ "update dev_map"))
+ goto out;
+
+ /* Attach BPF program to RX interface. */
+
+ ret = bpf_xdp_attach(rx_ifindex,
+ bpf_program__fd(bpf_obj->progs.rx),
+ XDP_FLAGS, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
+ goto out;
+
+ sock_fd = xsk_socket__fd(rx_xsk.socket);
+ ret = bpf_map_update_elem(bpf_map__fd(bpf_obj->maps.xsk), &queue_id, &sock_fd, 0);
+ if (!ASSERT_GE(ret, 0, "bpf_map_update_elem"))
+ goto out;
+
+ switch_ns_to_tx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns tx"))
+ goto out;
+
+ /* Setup separate AF_XDP for TX interface nad send packet to the RX socket. */
+ tx_ifindex = if_nametoindex(TX_NAME);
+ ret = open_xsk(tx_ifindex, &tx_xsk);
+ if (!ASSERT_OK(ret, "open_xsk(TX_NAME)"))
+ goto out;
+
+ if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
+ "generate AF_XDP_CONSUMER_PORT"))
+ goto out;
+
+ switch_ns_to_rx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns rx"))
+ goto out;
+
+ /* Verify packet sent from AF_XDP has proper metadata. */
+ if (!ASSERT_GE(verify_xsk_metadata(&rx_xsk, true), 0,
+ "verify_xsk_metadata"))
+ goto out;
+
+ switch_ns_to_tx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns tx"))
+ goto out;
+ complete_tx(&tx_xsk);
+
+ /* Now check metadata of packet, generated with network stack */
+ if (!ASSERT_GE(generate_packet_inet(), 0, "generate UDP packet"))
+ goto out;
+
+ switch_ns_to_rx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns rx"))
+ goto out;
+
+ if (!ASSERT_GE(verify_xsk_metadata(&rx_xsk, false), 0,
+ "verify_xsk_metadata"))
+ goto out;
+
+ /* Make sure freplace correctly picks up original bound device
+ * and doesn't crash.
+ */
+
+ bpf_obj2 = xdp_metadata2__open();
+ if (!ASSERT_OK_PTR(bpf_obj2, "open skeleton"))
+ goto out;
+
+ new_prog = bpf_object__find_program_by_name(bpf_obj2->obj, "freplace_rx");
+ bpf_program__set_attach_target(new_prog, bpf_program__fd(prog), "rx");
+
+ if (!ASSERT_OK(xdp_metadata2__load(bpf_obj2), "load freplace skeleton"))
+ goto out;
+
+ if (!ASSERT_OK(xdp_metadata2__attach(bpf_obj2), "attach freplace"))
+ goto out;
+
+ switch_ns_to_tx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns tx"))
+ goto out;
+
+ /* Send packet to trigger . */
+ if (!ASSERT_GE(generate_packet(&tx_xsk, AF_XDP_CONSUMER_PORT), 0,
+ "generate freplace packet"))
+ goto out;
+
+ switch_ns_to_rx(&tok);
+ if (!ASSERT_OK_PTR(tok, "setns rx"))
+ goto out;
+
+ while (!retries--) {
+ if (bpf_obj2->bss->called)
+ break;
+ usleep(10);
+ }
+ ASSERT_GT(bpf_obj2->bss->called, 0, "not called");
+
+out:
+ close_xsk(&rx_xsk);
+ close_xsk(&tx_xsk);
+ xdp_metadata2__destroy(bpf_obj2);
+ xdp_metadata__destroy(bpf_obj);
+ if (tok)
+ close_netns(tok);
+ SYS_NOFAIL("ip netns del " RX_NETNS_NAME);
+ SYS_NOFAIL("ip netns del " TX_NETNS_NAME);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
index 0281095de266..92ef0aa50866 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -25,43 +25,49 @@ void test_xdp_noinline(void)
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
- __u32 duration = 0, retval, size;
int err, i;
__u64 bytes = 0, pkts = 0;
char buf[128];
u32 *magic = (u32 *)buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = sizeof(buf),
+ .repeat = NUM_ITER,
+ );
skel = test_xdp_noinline__open_and_load();
- if (CHECK(!skel, "skel_open_and_load", "failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);
bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);
bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0);
- err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4),
- NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v4), &topts);
+ ASSERT_OK(err, "ipv4 test_run");
+ ASSERT_EQ(topts.retval, 1, "ipv4 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out");
+ ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 test_run magic");
- err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6),
- NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
+ topts.data_in = &pkt_v6;
+ topts.data_size_in = sizeof(pkt_v6);
+ topts.data_out = buf;
+ topts.data_size_out = sizeof(buf);
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v6), &topts);
+ ASSERT_OK(err, "ipv6 test_run");
+ ASSERT_EQ(topts.retval, 1, "ipv6 test_run retval");
+ ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out");
+ ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 test_run magic");
bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
- CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2,
- "stats", "bytes %lld pkts %lld\n",
- (unsigned long long)bytes, (unsigned long long)pkts);
+ ASSERT_EQ(bytes, MAGIC_BYTES * NUM_ITER * 2, "stats bytes");
+ ASSERT_EQ(pkts, NUM_ITER * 2, "stats pkts");
test_xdp_noinline__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
index 7185bee16fe4..ec5369f247cb 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
@@ -3,23 +3,26 @@
void test_xdp_perf(void)
{
- const char *file = "./xdp_dummy.o";
- __u32 duration, retval, size;
+ const char *file = "./xdp_dummy.bpf.o";
struct bpf_object *obj;
char in[128], out[128];
int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = in,
+ .data_size_in = sizeof(in),
+ .data_out = out,
+ .data_size_out = sizeof(out),
+ .repeat = 1000000,
+ );
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
- err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128,
- out, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_PASS || size != 128,
- "xdp-perf",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, XDP_PASS, "test_run retval");
+ ASSERT_EQ(topts.data_size_out, 128, "test_run data_size_out");
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
new file mode 100644
index 000000000000..efa350d04ec5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_xdp_pull_data.skel.h"
+
+#define PULL_MAX (1 << 31)
+#define PULL_PLUS_ONE (1 << 30)
+
+#define XDP_PACKET_HEADROOM 256
+
+/* Find headroom and tailroom occupied by struct xdp_frame and struct
+ * skb_shared_info so that we can calculate the maximum pull lengths for
+ * test cases. They might not be the real size of the structures due to
+ * cache alignment.
+ */
+static int find_xdp_sizes(struct test_xdp_pull_data *skel, int frame_sz)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct xdp_md ctx = {};
+ int prog_fd, err;
+ __u8 *buf;
+
+ buf = calloc(frame_sz, sizeof(__u8));
+ if (!ASSERT_OK_PTR(buf, "calloc buf"))
+ return -ENOMEM;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = frame_sz;
+ topts.data_size_out = frame_sz;
+ /* Pass a data_end larger than the linear space available to make sure
+ * bpf_prog_test_run_xdp() will fill the linear data area so that
+ * xdp_find_sizes can infer the size of struct skb_shared_info
+ */
+ ctx.data_end = frame_sz;
+ topts.ctx_in = &ctx;
+ topts.ctx_out = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
+ topts.ctx_size_out = sizeof(ctx);
+
+ prog_fd = bpf_program__fd(skel->progs.xdp_find_sizes);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ free(buf);
+
+ return err;
+}
+
+/* xdp_pull_data_prog will directly read a marker 0xbb stored at buf[1024]
+ * so caller expecting XDP_PASS should always pass pull_len no less than 1024
+ */
+static void run_test(struct test_xdp_pull_data *skel, int retval,
+ int frame_sz, int buff_len, int meta_len, int data_len,
+ int pull_len)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct xdp_md ctx = {};
+ int prog_fd, err;
+ __u8 *buf;
+
+ buf = calloc(buff_len, sizeof(__u8));
+ if (!ASSERT_OK_PTR(buf, "calloc buf"))
+ return;
+
+ buf[meta_len + 1023] = 0xaa;
+ buf[meta_len + 1024] = 0xbb;
+ buf[meta_len + 1025] = 0xcc;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = buff_len;
+ topts.data_size_out = buff_len;
+ ctx.data = meta_len;
+ ctx.data_end = meta_len + data_len;
+ topts.ctx_in = &ctx;
+ topts.ctx_out = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
+ topts.ctx_size_out = sizeof(ctx);
+
+ skel->bss->data_len = data_len;
+ if (pull_len & PULL_MAX) {
+ int headroom = XDP_PACKET_HEADROOM - meta_len - skel->bss->xdpf_sz;
+ int tailroom = frame_sz - XDP_PACKET_HEADROOM -
+ data_len - skel->bss->sinfo_sz;
+
+ pull_len = pull_len & PULL_PLUS_ONE ? 1 : 0;
+ pull_len += headroom + tailroom + data_len;
+ }
+ skel->bss->pull_len = pull_len;
+
+ prog_fd = bpf_program__fd(skel->progs.xdp_pull_data_prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(topts.retval, retval, "xdp_pull_data_prog retval");
+
+ if (retval == XDP_DROP)
+ goto out;
+
+ ASSERT_EQ(ctx.data_end, meta_len + pull_len, "linear data size");
+ ASSERT_EQ(topts.data_size_out, buff_len, "linear + non-linear data size");
+ /* Make sure data around xdp->data_end was not messed up by
+ * bpf_xdp_pull_data()
+ */
+ ASSERT_EQ(buf[meta_len + 1023], 0xaa, "data[1023]");
+ ASSERT_EQ(buf[meta_len + 1024], 0xbb, "data[1024]");
+ ASSERT_EQ(buf[meta_len + 1025], 0xcc, "data[1025]");
+out:
+ free(buf);
+}
+
+static void test_xdp_pull_data_basic(void)
+{
+ u32 pg_sz, max_meta_len, max_data_len;
+ struct test_xdp_pull_data *skel;
+
+ skel = test_xdp_pull_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_pull_data__open_and_load"))
+ return;
+
+ pg_sz = sysconf(_SC_PAGE_SIZE);
+
+ if (find_xdp_sizes(skel, pg_sz))
+ goto out;
+
+ max_meta_len = XDP_PACKET_HEADROOM - skel->bss->xdpf_sz;
+ max_data_len = pg_sz - XDP_PACKET_HEADROOM - skel->bss->sinfo_sz;
+
+ /* linear xdp pkt, pull 0 byte */
+ run_test(skel, XDP_PASS, pg_sz, 2048, 0, 2048, 2048);
+
+ /* multi-buf pkt, pull results in linear xdp pkt */
+ run_test(skel, XDP_PASS, pg_sz, 2048, 0, 1024, 2048);
+
+ /* multi-buf pkt, pull 1 byte to linear data area */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 1024, 1025);
+
+ /* multi-buf pkt, pull 0 byte to linear data area */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 1025, 1025);
+
+ /* multi-buf pkt, empty linear data area, pull requires memmove */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 0, PULL_MAX);
+
+ /* multi-buf pkt, no headroom */
+ run_test(skel, XDP_PASS, pg_sz, 9000, max_meta_len, 1024, PULL_MAX);
+
+ /* multi-buf pkt, no tailroom, pull requires memmove */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, max_data_len, PULL_MAX);
+
+ /* Test cases with invalid pull length */
+
+ /* linear xdp pkt, pull more than total data len */
+ run_test(skel, XDP_DROP, pg_sz, 2048, 0, 2048, 2049);
+
+ /* multi-buf pkt with no space left in linear data area */
+ run_test(skel, XDP_DROP, pg_sz, 9000, max_meta_len, max_data_len,
+ PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, empty linear data area */
+ run_test(skel, XDP_DROP, pg_sz, 9000, 0, 0, PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, no headroom */
+ run_test(skel, XDP_DROP, pg_sz, 9000, max_meta_len, 1024,
+ PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, no tailroom */
+ run_test(skel, XDP_DROP, pg_sz, 9000, 0, max_data_len,
+ PULL_MAX | PULL_PLUS_ONE);
+
+out:
+ test_xdp_pull_data__destroy(skel);
+}
+
+void test_xdp_pull_data(void)
+{
+ if (test__start_subtest("xdp_pull_data"))
+ test_xdp_pull_data_basic();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
new file mode 100644
index 000000000000..8b50a992d233
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <ctype.h>
+
+#define CMD_OUT_BUF_SIZE 1023
+
+#define SYS_OUT(cmd, ...) ({ \
+ char buf[1024]; \
+ snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \
+ FILE *f = popen(buf, "r"); \
+ if (!ASSERT_OK_PTR(f, buf)) \
+ goto out; \
+ f; \
+})
+
+/* out must be at least `size * 4 + 1` bytes long */
+static void escape_str(char *out, const char *in, size_t size)
+{
+ static const char *hex = "0123456789ABCDEF";
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (isprint(in[i]) && in[i] != '\\' && in[i] != '\'') {
+ *out++ = in[i];
+ } else {
+ *out++ = '\\';
+ *out++ = 'x';
+ *out++ = hex[(in[i] >> 4) & 0xf];
+ *out++ = hex[in[i] & 0xf];
+ }
+ }
+ *out++ = '\0';
+}
+
+static bool expect_str(char *buf, size_t size, const char *str, const char *name)
+{
+ static char escbuf_expected[CMD_OUT_BUF_SIZE * 4];
+ static char escbuf_actual[CMD_OUT_BUF_SIZE * 4];
+ static int duration = 0;
+ bool ok;
+
+ ok = size == strlen(str) && !memcmp(buf, str, size);
+
+ if (!ok) {
+ escape_str(escbuf_expected, str, strlen(str));
+ escape_str(escbuf_actual, buf, size);
+ }
+ CHECK(!ok, name, "unexpected %s: actual '%s' != expected '%s'\n",
+ name, escbuf_actual, escbuf_expected);
+
+ return ok;
+}
+
+static void test_synproxy(bool xdp)
+{
+ int server_fd = -1, client_fd = -1, accept_fd = -1;
+ char *prog_id = NULL, *prog_id_end;
+ struct nstoken *ns = NULL;
+ FILE *ctrl_file = NULL;
+ char buf[CMD_OUT_BUF_SIZE];
+ size_t size;
+
+ SYS(out, "ip netns add synproxy");
+
+ SYS(out, "ip link add tmp0 type veth peer name tmp1");
+ SYS(out, "ip link set tmp1 netns synproxy");
+ SYS(out, "ip link set tmp0 up");
+ SYS(out, "ip addr replace 198.18.0.1/24 dev tmp0");
+
+ /* When checksum offload is enabled, the XDP program sees wrong
+ * checksums and drops packets.
+ */
+ SYS(out, "ethtool -K tmp0 tx off");
+ if (xdp)
+ /* Workaround required for veth. */
+ SYS(out, "ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null");
+
+ ns = open_netns("synproxy");
+ if (!ASSERT_OK_PTR(ns, "setns"))
+ goto out;
+
+ SYS(out, "ip link set lo up");
+ SYS(out, "ip link set tmp1 up");
+ SYS(out, "ip addr replace 198.18.0.2/24 dev tmp1");
+ SYS(out, "sysctl -w net.ipv4.tcp_syncookies=2");
+ SYS(out, "sysctl -w net.ipv4.tcp_timestamps=1");
+ SYS(out, "sysctl -w net.netfilter.nf_conntrack_tcp_loose=0");
+ SYS(out, "iptables-legacy -t raw -I PREROUTING \
+ -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack");
+ SYS(out, "iptables-legacy -t filter -A INPUT \
+ -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \
+ -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460");
+ SYS(out, "iptables-legacy -t filter -A INPUT \
+ -i tmp1 -m state --state INVALID -j DROP");
+
+ ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \
+ --single --mss4 1460 --mss6 1440 \
+ --wscale 7 --ttl 64%s", xdp ? "" : " --tc");
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ if (!expect_str(buf, size, "Total SYNACKs generated: 0\n",
+ "initial SYNACKs"))
+ goto out;
+
+ if (!xdp) {
+ ctrl_file = SYS_OUT("tc filter show dev tmp1 ingress");
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ prog_id = memmem(buf, size, " id ", 4);
+ if (!ASSERT_OK_PTR(prog_id, "find prog id"))
+ goto out;
+ prog_id += 4;
+ if (!ASSERT_LT(prog_id, buf + size, "find prog id begin"))
+ goto out;
+ prog_id_end = prog_id;
+ while (prog_id_end < buf + size && *prog_id_end >= '0' &&
+ *prog_id_end <= '9')
+ prog_id_end++;
+ if (!ASSERT_LT(prog_id_end, buf + size, "find prog id end"))
+ goto out;
+ *prog_id_end = '\0';
+ }
+
+ server_fd = start_server(AF_INET, SOCK_STREAM, "198.18.0.2", 8080, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto out;
+
+ close_netns(ns);
+ ns = NULL;
+
+ client_fd = connect_to_fd(server_fd, 10000);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto out;
+
+ accept_fd = accept(server_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto out;
+
+ ns = open_netns("synproxy");
+ if (!ASSERT_OK_PTR(ns, "setns"))
+ goto out;
+
+ if (xdp)
+ ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --single");
+ else
+ ctrl_file = SYS_OUT("./xdp_synproxy --prog %s --single",
+ prog_id);
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ if (!expect_str(buf, size, "Total SYNACKs generated: 1\n",
+ "SYNACKs after connection"))
+ goto out;
+
+out:
+ if (accept_fd >= 0)
+ close(accept_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+ if (server_fd >= 0)
+ close(server_fd);
+ if (ns)
+ close_netns(ns);
+
+ SYS_NOFAIL("ip link del tmp0");
+ SYS_NOFAIL("ip netns del synproxy");
+}
+
+void test_xdp_synproxy(void)
+{
+ if (test__start_subtest("xdp"))
+ test_synproxy(true);
+ if (test__start_subtest("tc"))
+ test_synproxy(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c b/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c
new file mode 100644
index 000000000000..18dd25344de7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_vlan.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Network topology:
+ * ----------- -----------
+ * | NS1 | | NS2 |
+ * | veth0 -|--------|- veth0 |
+ * ----------- -----------
+ *
+ */
+
+#define _GNU_SOURCE
+#include <net/if.h>
+#include <uapi/linux/if_link.h>
+
+#include "network_helpers.h"
+#include "test_progs.h"
+#include "test_xdp_vlan.skel.h"
+
+
+#define VETH_NAME "veth0"
+#define NS_MAX_SIZE 32
+#define NS1_NAME "ns-xdp-vlan-1-"
+#define NS2_NAME "ns-xdp-vlan-2-"
+#define NS1_IP_ADDR "100.64.10.1"
+#define NS2_IP_ADDR "100.64.10.2"
+#define VLAN_ID 4011
+
+static int setup_network(char *ns1, char *ns2)
+{
+ if (!ASSERT_OK(append_tid(ns1, NS_MAX_SIZE), "create ns1 name"))
+ goto fail;
+ if (!ASSERT_OK(append_tid(ns2, NS_MAX_SIZE), "create ns2 name"))
+ goto fail;
+
+ SYS(fail, "ip netns add %s", ns1);
+ SYS(fail, "ip netns add %s", ns2);
+ SYS(fail, "ip -n %s link add %s type veth peer name %s netns %s",
+ ns1, VETH_NAME, VETH_NAME, ns2);
+
+ /* NOTICE: XDP require VLAN header inside packet payload
+ * - Thus, disable VLAN offloading driver features
+ */
+ SYS(fail, "ip netns exec %s ethtool -K %s rxvlan off txvlan off", ns1, VETH_NAME);
+ SYS(fail, "ip netns exec %s ethtool -K %s rxvlan off txvlan off", ns2, VETH_NAME);
+
+ /* NS1 configuration */
+ SYS(fail, "ip -n %s addr add %s/24 dev %s", ns1, NS1_IP_ADDR, VETH_NAME);
+ SYS(fail, "ip -n %s link set %s up", ns1, VETH_NAME);
+
+ /* NS2 configuration */
+ SYS(fail, "ip -n %s link add link %s name %s.%d type vlan id %d",
+ ns2, VETH_NAME, VETH_NAME, VLAN_ID, VLAN_ID);
+ SYS(fail, "ip -n %s addr add %s/24 dev %s.%d", ns2, NS2_IP_ADDR, VETH_NAME, VLAN_ID);
+ SYS(fail, "ip -n %s link set %s up", ns2, VETH_NAME);
+ SYS(fail, "ip -n %s link set %s.%d up", ns2, VETH_NAME, VLAN_ID);
+
+ /* At this point ping should fail because VLAN tags are only used by NS2 */
+ return !SYS_NOFAIL("ip netns exec %s ping -W 1 -c1 %s", ns2, NS1_IP_ADDR);
+
+fail:
+ return -1;
+}
+
+static void cleanup_network(const char *ns1, const char *ns2)
+{
+ SYS_NOFAIL("ip netns del %s", ns1);
+ SYS_NOFAIL("ip netns del %s", ns2);
+}
+
+static void xdp_vlan(struct bpf_program *xdp, struct bpf_program *tc, u32 flags)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_EGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ char ns1[NS_MAX_SIZE] = NS1_NAME;
+ char ns2[NS_MAX_SIZE] = NS2_NAME;
+ struct nstoken *nstoken = NULL;
+ int interface;
+ int ret;
+
+ if (!ASSERT_OK(setup_network(ns1, ns2), "setup network"))
+ goto cleanup;
+
+ nstoken = open_netns(ns1);
+ if (!ASSERT_OK_PTR(nstoken, "open NS1"))
+ goto cleanup;
+
+ interface = if_nametoindex(VETH_NAME);
+ if (!ASSERT_NEQ(interface, 0, "get interface index"))
+ goto cleanup;
+
+ ret = bpf_xdp_attach(interface, bpf_program__fd(xdp), flags, NULL);
+ if (!ASSERT_OK(ret, "attach xdp_vlan_change"))
+ goto cleanup;
+
+ tc_hook.ifindex = interface;
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto detach_xdp;
+
+ /* Now we'll use BPF programs to pop/push the VLAN tags */
+ tc_opts.prog_fd = bpf_program__fd(tc);
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto detach_xdp;
+
+ close_netns(nstoken);
+ nstoken = NULL;
+
+ /* Now the namespaces can reach each-other, test with pings */
+ SYS(detach_tc, "ip netns exec %s ping -i 0.2 -W 2 -c 2 %s > /dev/null", ns1, NS2_IP_ADDR);
+ SYS(detach_tc, "ip netns exec %s ping -i 0.2 -W 2 -c 2 %s > /dev/null", ns2, NS1_IP_ADDR);
+
+
+detach_tc:
+ bpf_tc_detach(&tc_hook, &tc_opts);
+detach_xdp:
+ bpf_xdp_detach(interface, flags, NULL);
+cleanup:
+ close_netns(nstoken);
+ cleanup_network(ns1, ns2);
+}
+
+/* First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
+ * egress use TC to add back VLAN tag 4011
+ */
+void test_xdp_vlan_change(void)
+{
+ struct test_xdp_vlan *skel;
+
+ skel = test_xdp_vlan__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "xdp_vlan__open_and_load"))
+ return;
+
+ if (test__start_subtest("0"))
+ xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push, 0);
+
+ if (test__start_subtest("DRV_MODE"))
+ xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push,
+ XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("SKB_MODE"))
+ xdp_vlan(skel->progs.xdp_vlan_change, skel->progs.tc_vlan_push,
+ XDP_FLAGS_SKB_MODE);
+
+ test_xdp_vlan__destroy(skel);
+}
+
+/* Second test: XDP prog fully remove vlan header
+ *
+ * Catch kernel bug for generic-XDP, that doesn't allow us to
+ * remove a VLAN header, because skb->protocol still contain VLAN
+ * ETH_P_8021Q indication, and this cause overwriting of our changes.
+ */
+void test_xdp_vlan_remove(void)
+{
+ struct test_xdp_vlan *skel;
+
+ skel = test_xdp_vlan__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "xdp_vlan__open_and_load"))
+ return;
+
+ if (test__start_subtest("0"))
+ xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push, 0);
+
+ if (test__start_subtest("DRV_MODE"))
+ xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push,
+ XDP_FLAGS_DRV_MODE);
+
+ if (test__start_subtest("SKB_MODE"))
+ xdp_vlan(skel->progs.xdp_vlan_remove_outer2, skel->progs.tc_vlan_push,
+ XDP_FLAGS_SKB_MODE);
+
+ test_xdp_vlan__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
new file mode 100644
index 000000000000..4599154c8e9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "test_progs.h"
+#include "xdpwall.skel.h"
+
+void test_xdpwall(void)
+{
+ struct xdpwall *skel;
+
+ skel = xdpwall__open_and_load();
+ ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?");
+
+ xdpwall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xfrm_info.c b/tools/testing/selftests/bpf/prog_tests/xfrm_info.c
new file mode 100644
index 000000000000..d37f5394e199
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xfrm_info.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Topology:
+ * ---------
+ * NS0 namespace | NS1 namespace | NS2 namespace
+ * | |
+ * +---------------+ | +---------------+ |
+ * | ipsec0 |---------| ipsec0 | |
+ * | 192.168.1.100 | | | 192.168.1.200 | |
+ * | if_id: bpf | | +---------------+ |
+ * +---------------+ | |
+ * | | | +---------------+
+ * | | | | ipsec0 |
+ * \------------------------------------------| 192.168.1.200 |
+ * | | +---------------+
+ * | |
+ * | | (overlay network)
+ * ------------------------------------------------------
+ * | | (underlay network)
+ * +--------------+ | +--------------+ |
+ * | veth01 |----------| veth10 | |
+ * | 172.16.1.100 | | | 172.16.1.200 | |
+ * ---------------+ | +--------------+ |
+ * | |
+ * +--------------+ | | +--------------+
+ * | veth02 |-----------------------------------| veth20 |
+ * | 172.16.2.100 | | | | 172.16.2.200 |
+ * +--------------+ | | +--------------+
+ *
+ *
+ * Test Packet flow
+ * -----------
+ * The tests perform 'ping 192.168.1.200' from the NS0 namespace:
+ * 1) request is routed to NS0 ipsec0
+ * 2) NS0 ipsec0 tc egress BPF program is triggered and sets the if_id based
+ * on the requested value. This makes the ipsec0 device in external mode
+ * select the destination tunnel
+ * 3) ping reaches the other namespace (NS1 or NS2 based on which if_id was
+ * used) and response is sent
+ * 4) response is received on NS0 ipsec0, tc ingress program is triggered and
+ * records the response if_id
+ * 5) requested if_id is compared with received if_id
+ */
+
+#include <net/if.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_link.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "xfrm_info.skel.h"
+
+#define NS0 "xfrm_test_ns0"
+#define NS1 "xfrm_test_ns1"
+#define NS2 "xfrm_test_ns2"
+
+#define IF_ID_0_TO_1 1
+#define IF_ID_0_TO_2 2
+#define IF_ID_1 3
+#define IF_ID_2 4
+
+#define IP4_ADDR_VETH01 "172.16.1.100"
+#define IP4_ADDR_VETH10 "172.16.1.200"
+#define IP4_ADDR_VETH02 "172.16.2.100"
+#define IP4_ADDR_VETH20 "172.16.2.200"
+
+#define ESP_DUMMY_PARAMS \
+ "proto esp aead 'rfc4106(gcm(aes))' " \
+ "0xe4d8f4b4da1df18a3510b3781496daa82488b713 128 mode tunnel "
+
+static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd)
+{
+ LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, .priority = 1,
+ .prog_fd = igr_fd);
+ LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, .priority = 1,
+ .prog_fd = egr_fd);
+ int ret;
+
+ ret = bpf_tc_hook_create(hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ if (igr_fd >= 0) {
+ hook->attach_point = BPF_TC_INGRESS;
+ ret = bpf_tc_attach(hook, &opts1);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+ }
+
+ if (egr_fd >= 0) {
+ hook->attach_point = BPF_TC_EGRESS;
+ ret = bpf_tc_attach(hook, &opts2);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cleanup(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0);
+ SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1);
+ SYS_NOFAIL("test -f /var/run/netns/" NS2 " && ip netns delete " NS2);
+}
+
+static int config_underlay(void)
+{
+ SYS(fail, "ip netns add " NS0);
+ SYS(fail, "ip netns add " NS1);
+ SYS(fail, "ip netns add " NS2);
+
+ /* NS0 <-> NS1 [veth01 <-> veth10] */
+ SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10 netns " NS1);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01");
+ SYS(fail, "ip -net " NS0 " link set dev veth01 up");
+ SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10");
+ SYS(fail, "ip -net " NS1 " link set dev veth10 up");
+
+ /* NS0 <-> NS2 [veth02 <-> veth20] */
+ SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20 netns " NS2);
+ SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02");
+ SYS(fail, "ip -net " NS0 " link set dev veth02 up");
+ SYS(fail, "ip -net " NS2 " addr add " IP4_ADDR_VETH20 "/24 dev veth20");
+ SYS(fail, "ip -net " NS2 " link set dev veth20 up");
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int setup_xfrm_tunnel_ns(const char *ns, const char *ipv4_local,
+ const char *ipv4_remote, int if_id)
+{
+ /* State: local -> remote */
+ SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 "
+ ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_local, ipv4_remote, if_id);
+
+ /* State: local <- remote */
+ SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 "
+ ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_remote, ipv4_local, if_id);
+
+ /* Policy: local -> remote */
+ SYS(fail, "ip -net %s xfrm policy add dir out src 0.0.0.0/0 dst 0.0.0.0/0 "
+ "if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns,
+ if_id, ipv4_local, ipv4_remote, if_id);
+
+ /* Policy: local <- remote */
+ SYS(fail, "ip -net %s xfrm policy add dir in src 0.0.0.0/0 dst 0.0.0.0/0 "
+ "if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns,
+ if_id, ipv4_remote, ipv4_local, if_id);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int setup_xfrm_tunnel(const char *ns_a, const char *ns_b,
+ const char *ipv4_a, const char *ipv4_b,
+ int if_id_a, int if_id_b)
+{
+ return setup_xfrm_tunnel_ns(ns_a, ipv4_a, ipv4_b, if_id_a) ||
+ setup_xfrm_tunnel_ns(ns_b, ipv4_b, ipv4_a, if_id_b);
+}
+
+static struct rtattr *rtattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned short len)
+{
+ struct rtattr *rta =
+ (struct rtattr *)((uint8_t *)nh + RTA_ALIGN(nh->nlmsg_len));
+ rta->rta_type = type;
+ rta->rta_len = RTA_LENGTH(len);
+ nh->nlmsg_len = RTA_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+ return rta;
+}
+
+static struct rtattr *rtattr_add_str(struct nlmsghdr *nh, unsigned short type,
+ const char *s)
+{
+ struct rtattr *rta = rtattr_add(nh, type, strlen(s));
+
+ memcpy(RTA_DATA(rta), s, strlen(s));
+ return rta;
+}
+
+static struct rtattr *rtattr_begin(struct nlmsghdr *nh, unsigned short type)
+{
+ return rtattr_add(nh, type, 0);
+}
+
+static void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr)
+{
+ uint8_t *end = (uint8_t *)nh + nh->nlmsg_len;
+
+ attr->rta_len = end - (uint8_t *)attr;
+}
+
+static int setup_xfrmi_external_dev(const char *ns)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ unsigned char data[128];
+ } req;
+ struct rtattr *link_info, *info_data;
+ struct nstoken *nstoken;
+ int ret = -1, sock = -1;
+ struct nlmsghdr *nh;
+
+ memset(&req, 0, sizeof(req));
+ nh = &req.nh;
+ nh->nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ nh->nlmsg_type = RTM_NEWLINK;
+ nh->nlmsg_flags |= NLM_F_CREATE | NLM_F_REQUEST;
+
+ rtattr_add_str(nh, IFLA_IFNAME, "ipsec0");
+ link_info = rtattr_begin(nh, IFLA_LINKINFO);
+ rtattr_add_str(nh, IFLA_INFO_KIND, "xfrm");
+ info_data = rtattr_begin(nh, IFLA_INFO_DATA);
+ rtattr_add(nh, IFLA_XFRM_COLLECT_METADATA, 0);
+ rtattr_end(nh, info_data);
+ rtattr_end(nh, link_info);
+
+ nstoken = open_netns(ns);
+ if (!ASSERT_OK_PTR(nstoken, "setns"))
+ goto done;
+
+ sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ if (!ASSERT_GE(sock, 0, "netlink socket"))
+ goto done;
+ ret = send(sock, nh, nh->nlmsg_len, 0);
+ if (!ASSERT_EQ(ret, nh->nlmsg_len, "netlink send length"))
+ goto done;
+
+ ret = 0;
+done:
+ if (sock != -1)
+ close(sock);
+ if (nstoken)
+ close_netns(nstoken);
+ return ret;
+}
+
+static int config_overlay(void)
+{
+ if (setup_xfrm_tunnel(NS0, NS1, IP4_ADDR_VETH01, IP4_ADDR_VETH10,
+ IF_ID_0_TO_1, IF_ID_1))
+ goto fail;
+ if (setup_xfrm_tunnel(NS0, NS2, IP4_ADDR_VETH02, IP4_ADDR_VETH20,
+ IF_ID_0_TO_2, IF_ID_2))
+ goto fail;
+
+ /* Older iproute2 doesn't support this option */
+ if (!ASSERT_OK(setup_xfrmi_external_dev(NS0), "xfrmi"))
+ goto fail;
+
+ SYS(fail, "ip -net " NS0 " addr add 192.168.1.100/24 dev ipsec0");
+ SYS(fail, "ip -net " NS0 " link set dev ipsec0 up");
+
+ SYS(fail, "ip -net " NS1 " link add ipsec0 type xfrm if_id %d", IF_ID_1);
+ SYS(fail, "ip -net " NS1 " addr add 192.168.1.200/24 dev ipsec0");
+ SYS(fail, "ip -net " NS1 " link set dev ipsec0 up");
+
+ SYS(fail, "ip -net " NS2 " link add ipsec0 type xfrm if_id %d", IF_ID_2);
+ SYS(fail, "ip -net " NS2 " addr add 192.168.1.200/24 dev ipsec0");
+ SYS(fail, "ip -net " NS2 " link set dev ipsec0 up");
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int test_xfrm_ping(struct xfrm_info *skel, u32 if_id)
+{
+ skel->bss->req_if_id = if_id;
+
+ SYS(fail, "ping -i 0.01 -c 3 -w 10 -q 192.168.1.200 > /dev/null");
+
+ if (!ASSERT_EQ(skel->bss->resp_if_id, if_id, "if_id"))
+ goto fail;
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void _test_xfrm_info(void)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
+ int get_xfrm_info_prog_fd, set_xfrm_info_prog_fd;
+ struct nstoken *nstoken = NULL;
+ struct xfrm_info *skel;
+ int ifindex;
+
+ /* load and attach bpf progs to ipsec dev tc hook point */
+ skel = xfrm_info__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "xfrm_info__open_and_load"))
+ goto done;
+ nstoken = open_netns(NS0);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS0))
+ goto done;
+ ifindex = if_nametoindex("ipsec0");
+ if (!ASSERT_NEQ(ifindex, 0, "ipsec0 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ set_xfrm_info_prog_fd = bpf_program__fd(skel->progs.set_xfrm_info);
+ get_xfrm_info_prog_fd = bpf_program__fd(skel->progs.get_xfrm_info);
+ if (!ASSERT_GE(set_xfrm_info_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (!ASSERT_GE(get_xfrm_info_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, get_xfrm_info_prog_fd,
+ set_xfrm_info_prog_fd))
+ goto done;
+
+ /* perform test */
+ if (!ASSERT_EQ(test_xfrm_ping(skel, IF_ID_0_TO_1), 0, "ping " NS1))
+ goto done;
+ if (!ASSERT_EQ(test_xfrm_ping(skel, IF_ID_0_TO_2), 0, "ping " NS2))
+ goto done;
+
+done:
+ if (nstoken)
+ close_netns(nstoken);
+ xfrm_info__destroy(skel);
+}
+
+void test_xfrm_info(void)
+{
+ cleanup();
+
+ if (!ASSERT_OK(config_underlay(), "config_underlay"))
+ goto done;
+ if (!ASSERT_OK(config_overlay(), "config_overlay"))
+ goto done;
+
+ if (test__start_subtest("xfrm_info"))
+ _test_xfrm_info();
+
+done:
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xsk.c b/tools/testing/selftests/bpf/prog_tests/xsk.c
new file mode 100644
index 000000000000..dd4c35c0e428
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xsk.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/if.h>
+#include <stdarg.h>
+
+#include "network_helpers.h"
+#include "test_progs.h"
+#include "test_xsk.h"
+#include "xsk_xdp_progs.skel.h"
+
+#define VETH_RX "veth0"
+#define VETH_TX "veth1"
+#define MTU 1500
+
+int setup_veth(bool busy_poll)
+{
+ SYS(fail,
+ "ip link add %s numtxqueues 4 numrxqueues 4 type veth peer name %s numtxqueues 4 numrxqueues 4",
+ VETH_RX, VETH_TX);
+ SYS(fail, "sysctl -wq net.ipv6.conf.%s.disable_ipv6=1", VETH_RX);
+ SYS(fail, "sysctl -wq net.ipv6.conf.%s.disable_ipv6=1", VETH_TX);
+
+ if (busy_poll) {
+ SYS(fail, "echo 2 > /sys/class/net/%s/napi_defer_hard_irqs", VETH_RX);
+ SYS(fail, "echo 200000 > /sys/class/net/%s/gro_flush_timeout", VETH_RX);
+ SYS(fail, "echo 2 > /sys/class/net/%s/napi_defer_hard_irqs", VETH_TX);
+ SYS(fail, "echo 200000 > /sys/class/net/%s/gro_flush_timeout", VETH_TX);
+ }
+
+ SYS(fail, "ip link set %s mtu %d", VETH_RX, MTU);
+ SYS(fail, "ip link set %s mtu %d", VETH_TX, MTU);
+ SYS(fail, "ip link set %s up", VETH_RX);
+ SYS(fail, "ip link set %s up", VETH_TX);
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+void delete_veth(void)
+{
+ SYS_NOFAIL("ip link del %s", VETH_RX);
+ SYS_NOFAIL("ip link del %s", VETH_TX);
+}
+
+int configure_ifobj(struct ifobject *tx, struct ifobject *rx)
+{
+ rx->ifindex = if_nametoindex(VETH_RX);
+ if (!ASSERT_OK_FD(rx->ifindex, "get RX ifindex"))
+ return -1;
+
+ tx->ifindex = if_nametoindex(VETH_TX);
+ if (!ASSERT_OK_FD(tx->ifindex, "get TX ifindex"))
+ return -1;
+
+ tx->shared_umem = false;
+ rx->shared_umem = false;
+
+
+ return 0;
+}
+
+static void test_xsk(const struct test_spec *test_to_run, enum test_mode mode)
+{
+ struct ifobject *ifobj_tx, *ifobj_rx;
+ struct test_spec test;
+ int ret;
+
+ ifobj_tx = ifobject_create();
+ if (!ASSERT_OK_PTR(ifobj_tx, "create ifobj_tx"))
+ return;
+
+ ifobj_rx = ifobject_create();
+ if (!ASSERT_OK_PTR(ifobj_rx, "create ifobj_rx"))
+ goto delete_tx;
+
+ if (!ASSERT_OK(configure_ifobj(ifobj_tx, ifobj_rx), "conigure ifobj"))
+ goto delete_rx;
+
+ ret = get_hw_ring_size(ifobj_tx->ifname, &ifobj_tx->ring);
+ if (!ret) {
+ ifobj_tx->hw_ring_size_supp = true;
+ ifobj_tx->set_ring.default_tx = ifobj_tx->ring.tx_pending;
+ ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending;
+ }
+
+ if (!ASSERT_OK(init_iface(ifobj_rx, worker_testapp_validate_rx), "init RX"))
+ goto delete_rx;
+ if (!ASSERT_OK(init_iface(ifobj_tx, worker_testapp_validate_tx), "init TX"))
+ goto delete_rx;
+
+ test_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
+
+ test.tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ if (!ASSERT_OK_PTR(test.tx_pkt_stream_default, "TX pkt generation"))
+ goto delete_rx;
+ test.rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ if (!ASSERT_OK_PTR(test.rx_pkt_stream_default, "RX pkt generation"))
+ goto delete_rx;
+
+
+ test_init(&test, ifobj_tx, ifobj_rx, mode, test_to_run);
+ ret = test.test_func(&test);
+ if (ret != TEST_SKIP)
+ ASSERT_OK(ret, "Run test");
+ pkt_stream_restore_default(&test);
+
+ if (ifobj_tx->hw_ring_size_supp)
+ hw_ring_size_reset(ifobj_tx);
+
+ pkt_stream_delete(test.tx_pkt_stream_default);
+ pkt_stream_delete(test.rx_pkt_stream_default);
+ xsk_xdp_progs__destroy(ifobj_tx->xdp_progs);
+ xsk_xdp_progs__destroy(ifobj_rx->xdp_progs);
+
+delete_rx:
+ ifobject_delete(ifobj_rx);
+delete_tx:
+ ifobject_delete(ifobj_tx);
+}
+
+void test_ns_xsk_skb(void)
+{
+ int i;
+
+ if (!ASSERT_OK(setup_veth(false), "setup veth"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (test__start_subtest(tests[i].name))
+ test_xsk(&tests[i], TEST_MODE_SKB);
+ }
+
+ delete_veth();
+}
+
+void test_ns_xsk_drv(void)
+{
+ int i;
+
+ if (!ASSERT_OK(setup_veth(false), "setup veth"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (test__start_subtest(tests[i].name))
+ test_xsk(&tests[i], TEST_MODE_DRV);
+ }
+
+ delete_veth();
+}
+
diff --git a/tools/testing/selftests/bpf/progs/access_map_in_map.c b/tools/testing/selftests/bpf/progs/access_map_in_map.c
new file mode 100644
index 000000000000..1126871c2ebd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/access_map_in_map.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_array_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_htab_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+char _license[] SEC("license") = "GPL";
+
+int tgid = 0;
+
+static int acc_map_in_map(void *outer_map)
+{
+ int i, key, value = 0xdeadbeef;
+ void *inner_map;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != tgid)
+ return 0;
+
+ /* Find nonexistent inner map */
+ key = 1;
+ inner_map = bpf_map_lookup_elem(outer_map, &key);
+ if (inner_map)
+ return 0;
+
+ /* Find the old inner map */
+ key = 0;
+ inner_map = bpf_map_lookup_elem(outer_map, &key);
+ if (!inner_map)
+ return 0;
+
+ /* Wait for the old inner map to be replaced */
+ for (i = 0; i < 2048; i++)
+ bpf_map_update_elem(inner_map, &key, &value, 0);
+
+ return 0;
+}
+
+SEC("?kprobe/" SYS_PREFIX "sys_getpgid")
+int access_map_in_array(void *ctx)
+{
+ return acc_map_in_map(&outer_array_map);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int sleepable_access_map_in_array(void *ctx)
+{
+ return acc_map_in_map(&outer_array_map);
+}
+
+SEC("?kprobe/" SYS_PREFIX "sys_getpgid")
+int access_map_in_htab(void *ctx)
+{
+ return acc_map_in_map(&outer_htab_map);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int sleepable_access_map_in_htab(void *ctx)
+{
+ return acc_map_in_map(&outer_htab_map);
+}
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
new file mode 100644
index 000000000000..d1841aac94a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include <stdatomic.h>
+#include "bpf_arena_common.h"
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 10); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+bool skip_all_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_all_tests = true;
+#endif
+
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
+bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_lacq_srel_tests = true;
+#endif
+
+__u32 pid = 0;
+
+__u64 __arena_global add64_value = 1;
+__u64 __arena_global add64_result = 0;
+__u32 __arena_global add32_value = 1;
+__u32 __arena_global add32_result = 0;
+__u64 __arena_global add_stack_value_copy = 0;
+__u64 __arena_global add_stack_result = 0;
+__u64 __arena_global add_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int add(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 add_stack_value = 1;
+
+ add64_result = __sync_fetch_and_add(&add64_value, 2);
+ add32_result = __sync_fetch_and_add(&add32_value, 2);
+ add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
+ add_stack_value_copy = add_stack_value;
+ __sync_fetch_and_add(&add_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__s64 __arena_global sub64_value = 1;
+__s64 __arena_global sub64_result = 0;
+__s32 __arena_global sub32_value = 1;
+__s32 __arena_global sub32_result = 0;
+__s64 __arena_global sub_stack_value_copy = 0;
+__s64 __arena_global sub_stack_result = 0;
+__s64 __arena_global sub_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int sub(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 sub_stack_value = 1;
+
+ sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
+ sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
+ sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
+ sub_stack_value_copy = sub_stack_value;
+ __sync_fetch_and_sub(&sub_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global and64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global and32_value = 0x110;
+#else
+__u64 __arena_global and64_value = (0x110ull << 32);
+__u32 __arena_global and32_value = 0x110;
+#endif
+
+SEC("raw_tp/sys_enter")
+int and(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_and(&and64_value, 0x011ull << 32);
+ __sync_fetch_and_and(&and32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u32 __arena_global or32_value = 0x110;
+_Atomic __u64 __arena_global or64_value = (0x110ull << 32);
+#else
+__u32 __arena_global or32_value = 0x110;
+__u64 __arena_global or64_value = (0x110ull << 32);
+#endif
+
+SEC("raw_tp/sys_enter")
+int or(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_or(&or64_value, 0x011ull << 32);
+ __sync_fetch_and_or(&or32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global xor32_value = 0x110;
+#else
+__u64 __arena_global xor64_value = (0x110ull << 32);
+__u32 __arena_global xor32_value = 0x110;
+#endif
+
+SEC("raw_tp/sys_enter")
+int xor(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
+#else
+ __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
+ __sync_fetch_and_xor(&xor32_value, 0x011);
+#endif
+#endif
+
+ return 0;
+}
+
+__u32 __arena_global cmpxchg32_value = 1;
+__u32 __arena_global cmpxchg32_result_fail = 0;
+__u32 __arena_global cmpxchg32_result_succeed = 0;
+__u64 __arena_global cmpxchg64_value = 1;
+__u64 __arena_global cmpxchg64_result_fail = 0;
+__u64 __arena_global cmpxchg64_result_succeed = 0;
+
+SEC("raw_tp/sys_enter")
+int cmpxchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
+ cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
+
+ cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
+ cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
+#endif
+
+ return 0;
+}
+
+__u64 __arena_global xchg64_value = 1;
+__u64 __arena_global xchg64_result = 0;
+__u32 __arena_global xchg32_value = 1;
+__u32 __arena_global xchg32_result = 0;
+
+SEC("raw_tp/sys_enter")
+int xchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 val64 = 2;
+ __u32 val32 = 2;
+
+ xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
+ xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
+#endif
+
+ return 0;
+}
+
+__u64 __arena_global uaf_sink;
+volatile __u64 __arena_global uaf_recovery_fails;
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
+ !defined(__TARGET_ARCH_x86)
+ __u32 __arena *page32;
+ __u64 __arena *page64;
+ void __arena *page;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ bpf_arena_free_pages(&arena, page, 1);
+ uaf_recovery_fails = 24;
+
+ page32 = (__u32 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page32, 1);
+ uaf_recovery_fails -= 1;
+
+ page64 = (__u64 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page64, 1);
+ uaf_recovery_fails -= 1;
+#endif
+
+ return 0;
+}
+
+#if __clang_major__ >= 18
+__u8 __arena_global load_acquire8_value = 0x12;
+__u16 __arena_global load_acquire16_value = 0x1234;
+__u32 __arena_global load_acquire32_value = 0x12345678;
+__u64 __arena_global load_acquire64_value = 0x1234567890abcdef;
+
+__u8 __arena_global load_acquire8_result = 0;
+__u16 __arena_global load_acquire16_result = 0;
+__u32 __arena_global load_acquire32_result = 0;
+__u64 __arena_global load_acquire64_result = 0;
+#else
+/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
+ * this issue by defining the below variables as 64-bit.
+ */
+__u64 __arena_global load_acquire8_value;
+__u64 __arena_global load_acquire16_value;
+__u64 __arena_global load_acquire32_value;
+__u64 __arena_global load_acquire64_value;
+
+__u64 __arena_global load_acquire8_result;
+__u64 __arena_global load_acquire16_result;
+__u64 __arena_global load_acquire32_result;
+__u64 __arena_global load_acquire64_result;
+#endif
+
+SEC("raw_tp/sys_enter")
+int load_acquire(const void *ctx)
+{
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
+
+#define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST) \
+ { asm volatile ( \
+ "r1 = %[" #SRC "] ll;" \
+ "r1 = addr_space_cast(r1, 0x0, 0x1);" \
+ ".8byte %[load_acquire_insn];" \
+ "r3 = %[" #DST "] ll;" \
+ "r3 = addr_space_cast(r3, 0x0, 0x1);" \
+ "*(" #SIZE " *)(r3 + 0) = r2;" \
+ : \
+ : __imm_addr(SRC), \
+ __imm_insn(load_acquire_insn, \
+ BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_LOAD_ACQ, \
+ BPF_REG_2, BPF_REG_1, 0)), \
+ __imm_addr(DST) \
+ : __clobber_all); } \
+
+ LOAD_ACQUIRE_ARENA(B, u8, load_acquire8_value, load_acquire8_result)
+ LOAD_ACQUIRE_ARENA(H, u16, load_acquire16_value,
+ load_acquire16_result)
+ LOAD_ACQUIRE_ARENA(W, u32, load_acquire32_value,
+ load_acquire32_result)
+ LOAD_ACQUIRE_ARENA(DW, u64, load_acquire64_value,
+ load_acquire64_result)
+#undef LOAD_ACQUIRE_ARENA
+
+#endif
+ return 0;
+}
+
+#if __clang_major__ >= 18
+__u8 __arena_global store_release8_result = 0;
+__u16 __arena_global store_release16_result = 0;
+__u32 __arena_global store_release32_result = 0;
+__u64 __arena_global store_release64_result = 0;
+#else
+/* clang-17 crashes if the .addr_space.1 ELF section has holes. Work around
+ * this issue by defining the below variables as 64-bit.
+ */
+__u64 __arena_global store_release8_result;
+__u64 __arena_global store_release16_result;
+__u64 __arena_global store_release32_result;
+__u64 __arena_global store_release64_result;
+#endif
+
+SEC("raw_tp/sys_enter")
+int store_release(const void *ctx)
+{
+#if defined(ENABLE_ATOMICS_TESTS) && \
+ defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
+
+#define STORE_RELEASE_ARENA(SIZEOP, DST, VAL) \
+ { asm volatile ( \
+ "r1 = " VAL ";" \
+ "r2 = %[" #DST "] ll;" \
+ "r2 = addr_space_cast(r2, 0x0, 0x1);" \
+ ".8byte %[store_release_insn];" \
+ : \
+ : __imm_addr(DST), \
+ __imm_insn(store_release_insn, \
+ BPF_ATOMIC_OP(BPF_##SIZEOP, BPF_STORE_REL, \
+ BPF_REG_2, BPF_REG_1, 0)) \
+ : __clobber_all); } \
+
+ STORE_RELEASE_ARENA(B, store_release8_result, "0x12")
+ STORE_RELEASE_ARENA(H, store_release16_result, "0x1234")
+ STORE_RELEASE_ARENA(W, store_release32_result, "0x12345678")
+ STORE_RELEASE_ARENA(DW, store_release64_result,
+ "0x1234567890abcdef ll")
+#undef STORE_RELEASE_ARENA
+
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
new file mode 100644
index 000000000000..81eaa94afeb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+} arena SEC(".maps");
+
+#include "bpf_arena_htab.h"
+
+void __arena *htab_for_user;
+bool skip = false;
+
+int zero = 0;
+char __arena arr1[100000];
+char arr2[1000];
+
+SEC("syscall")
+int arena_htab_llvm(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
+ struct htab __arena *htab;
+ char __arena *arr = arr1;
+ __u64 i;
+
+ htab = bpf_alloc(sizeof(*htab));
+ cast_kern(htab);
+ htab_init(htab);
+
+ cast_kern(arr);
+
+ /* first run. No old elems in the table */
+ for (i = zero; i < 100000 && can_loop; i++) {
+ htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
+
+ /* should replace some elems with new ones */
+ for (i = zero; i < 1000 && can_loop; i++) {
+ htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
+ cast_user(htab);
+ htab_for_user = htab;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab_asm.c b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
new file mode 100644
index 000000000000..6cd70ea12f0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_ARENA_FORCE_ASM
+#define arena_htab_llvm arena_htab_asm
+#include "arena_htab.c"
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
new file mode 100644
index 000000000000..3a2ddcacbea6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+#include "bpf_arena_alloc.h"
+#include "bpf_arena_list.h"
+
+struct elem {
+ struct arena_list_node node;
+ __u64 value;
+};
+
+struct arena_list_head __arena *list_head;
+int list_sum;
+int cnt;
+bool skip = false;
+
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+long __arena arena_sum;
+int __arena test_val = 1;
+struct arena_list_head __arena global_head;
+#else
+long arena_sum SEC(".addr_space.1");
+int test_val SEC(".addr_space.1");
+#endif
+
+int zero;
+
+SEC("syscall")
+int arena_list_add(void *ctx)
+{
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+ __u64 i;
+
+ list_head = &global_head;
+
+ for (i = zero; i < cnt && can_loop; i++) {
+ struct elem __arena *n = bpf_alloc(sizeof(*n));
+
+ test_val++;
+ n->value = i;
+ arena_sum += i;
+ list_add_head(&n->node, list_head);
+ }
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+int arena_list_del(void *ctx)
+{
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+ struct elem __arena *n;
+ int sum = 0;
+
+ arena_sum = 0;
+ list_for_each_entry(n, list_head, node) {
+ sum += n->value;
+ arena_sum += n->value;
+ list_del(&n->node);
+ bpf_free(n);
+ }
+ list_sum = sum;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
new file mode 100644
index 000000000000..086b57a426cf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_arena_spin_lock.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+int cs_count;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+arena_spinlock_t __arena lock;
+int test_skip = 1;
+#else
+int test_skip = 2;
+#endif
+
+int counter;
+int limit;
+
+SEC("tc")
+int prog(void *ctx)
+{
+ int ret = -2;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ unsigned long flags;
+
+ if ((ret = arena_spin_lock_irqsave(&lock, flags))) {
+ if (ret == -EOPNOTSUPP)
+ test_skip = 3;
+ return ret;
+ }
+ if (counter != limit)
+ counter++;
+ bpf_repeat(cs_count);
+ ret = 0;
+ arena_spin_unlock_irqrestore(&lock, flags);
+#endif
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_strsearch.c b/tools/testing/selftests/bpf/progs/arena_strsearch.c
new file mode 100644
index 000000000000..ef6b76658f7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_strsearch.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+} arena SEC(".maps");
+
+#include "bpf_arena_strsearch.h"
+
+struct glob_test {
+ char const __arena *pat, *str;
+ bool expected;
+};
+
+static bool test(char const __arena *pat, char const __arena *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* bpf_printk("glob_match %s %s res %d ok %d", pat, str, match, success); */
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static const char __arena glob_tests[] =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+bool skip = false;
+
+SEC("syscall")
+int arena_strsearch(void *ctx)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const __arena *p = glob_tests;
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const __arena *pat = p;
+
+ cond_break;
+ p += bpf_arena_strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += bpf_arena_strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ /* bpf_printk("glob: %u self-tests passed, %u failed\n", successes, n); */
+
+ return n ? -1 : 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c
new file mode 100644
index 000000000000..36734683acbd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+__attribute__((noinline))
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ volatile char buf[256] = {};
+ return buf[69];
+}
+
+__attribute__((noinline))
+static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ volatile char buf[300] = {};
+ return buf[255] + timer_cb(NULL, NULL, NULL);
+}
+
+SEC("tc")
+__failure __msg("combined stack size of 2 calls is")
+int pseudo_call_check(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+ volatile char buf[256] = {};
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+
+ timer_cb(NULL, NULL, NULL);
+ return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
+}
+
+SEC("tc")
+__failure __msg("combined stack size of 2 calls is")
+int async_call_root_check(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+ volatile char buf[256] = {};
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+
+ return bpf_timer_set_callback(&elem->timer, bad_timer_cb) + buf[0];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/atomic_bounds.c b/tools/testing/selftests/bpf/progs/atomic_bounds.c
new file mode 100644
index 000000000000..e5fff7fc7f8f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/atomic_bounds.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+#ifdef ENABLE_ATOMICS_TESTS
+bool skip_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_tests = true;
+#endif
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ int a = 0;
+ int b = __sync_fetch_and_add(&a, 1);
+ /* b is certainly 0 here. Can the verifier tell? */
+ while (b)
+ continue;
+#endif
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c
new file mode 100644
index 000000000000..f89c7f0cc53b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/atomics.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+#ifdef ENABLE_ATOMICS_TESTS
+bool skip_tests __attribute((__section__(".data"))) = false;
+#else
+bool skip_tests = true;
+#endif
+
+__u32 pid = 0;
+
+__u64 add64_value = 1;
+__u64 add64_result = 0;
+__u32 add32_value = 1;
+__u32 add32_result = 0;
+__u64 add_stack_value_copy = 0;
+__u64 add_stack_result = 0;
+__u64 add_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int add(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 add_stack_value = 1;
+
+ add64_result = __sync_fetch_and_add(&add64_value, 2);
+ add32_result = __sync_fetch_and_add(&add32_value, 2);
+ add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
+ add_stack_value_copy = add_stack_value;
+ __sync_fetch_and_add(&add_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__s64 sub64_value = 1;
+__s64 sub64_result = 0;
+__s32 sub32_value = 1;
+__s32 sub32_result = 0;
+__s64 sub_stack_value_copy = 0;
+__s64 sub_stack_result = 0;
+__s64 sub_noreturn_value = 1;
+
+SEC("raw_tp/sys_enter")
+int sub(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 sub_stack_value = 1;
+
+ sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
+ sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
+ sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
+ sub_stack_value_copy = sub_stack_value;
+ __sync_fetch_and_sub(&sub_noreturn_value, 2);
+#endif
+
+ return 0;
+}
+
+__u64 and64_value = (0x110ull << 32);
+__u64 and64_result = 0;
+__u32 and32_value = 0x110;
+__u32 and32_result = 0;
+__u64 and_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int and(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+
+ and64_result = __sync_fetch_and_and(&and64_value, 0x011ull << 32);
+ and32_result = __sync_fetch_and_and(&and32_value, 0x011);
+ __sync_fetch_and_and(&and_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 or64_value = (0x110ull << 32);
+__u64 or64_result = 0;
+__u32 or32_value = 0x110;
+__u32 or32_result = 0;
+__u64 or_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int or(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ or64_result = __sync_fetch_and_or(&or64_value, 0x011ull << 32);
+ or32_result = __sync_fetch_and_or(&or32_value, 0x011);
+ __sync_fetch_and_or(&or_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 xor64_value = (0x110ull << 32);
+__u64 xor64_result = 0;
+__u32 xor32_value = 0x110;
+__u32 xor32_result = 0;
+__u64 xor_noreturn_value = (0x110ull << 32);
+
+SEC("raw_tp/sys_enter")
+int xor(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ xor64_result = __sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
+ xor32_result = __sync_fetch_and_xor(&xor32_value, 0x011);
+ __sync_fetch_and_xor(&xor_noreturn_value, 0x011ull << 32);
+#endif
+
+ return 0;
+}
+
+__u64 cmpxchg64_value = 1;
+__u64 cmpxchg64_result_fail = 0;
+__u64 cmpxchg64_result_succeed = 0;
+__u32 cmpxchg32_value = 1;
+__u32 cmpxchg32_result_fail = 0;
+__u32 cmpxchg32_result_succeed = 0;
+
+SEC("raw_tp/sys_enter")
+int cmpxchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
+ cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
+
+ cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
+ cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
+#endif
+
+ return 0;
+}
+
+__u64 xchg64_value = 1;
+__u64 xchg64_result = 0;
+__u32 xchg32_value = 1;
+__u32 xchg32_result = 0;
+
+SEC("raw_tp/sys_enter")
+int xchg(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+ __u64 val64 = 2;
+ __u32 val32 = 2;
+
+ xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
+ xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
new file mode 100644
index 000000000000..b3f77b4561c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1) { return 0; }
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2) { return 0; }
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops2 testmod_2 = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops2.c b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
new file mode 100644
index 000000000000..64a95f6be86d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* This is an unused struct_ops program, it lacks corresponding
+ * struct_ops map, which provides attachment information.
+ * W/o additional configuration attempt to load such
+ * BPF object file would fail.
+ */
+SEC("struct_ops/foo")
+void foo(void) {}
diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
new file mode 100644
index 000000000000..c8ec0d0368e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+long create_errs = 0;
+long create_cnts = 0;
+long kmalloc_cnts = 0;
+__u32 bench_pid = 0;
+
+struct storage {
+ __u8 data[64];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct storage);
+} sk_storage_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct storage);
+} task_storage_map SEC(".maps");
+
+SEC("raw_tp/kmalloc")
+int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
+ int node)
+{
+ __sync_fetch_and_add(&kmalloc_cnts, 1);
+
+ return 0;
+}
+
+SEC("tp_btf/sched_process_fork")
+int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child)
+{
+ struct storage *stg;
+
+ if (parent->tgid != bench_pid)
+ return 0;
+
+ stg = bpf_task_storage_get(&task_storage_map, child, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (stg)
+ __sync_fetch_and_add(&create_cnts, 1);
+ else
+ __sync_fetch_and_add(&create_errs, 1);
+
+ return 0;
+}
+
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ struct sock *sk = sock->sk;
+ struct storage *stg;
+ __u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != bench_pid || !sk)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ if (stg)
+ __sync_fetch_and_add(&create_cnts, 1);
+ else
+ __sync_fetch_and_add(&create_errs, 1);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c
new file mode 100644
index 000000000000..079bf3794b3a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+long process_byte = 0;
+int verdict_dir = 0;
+int dropped = 0;
+int pkt_size = 0;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return pkt_size;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ int one = 1;
+ int ret = bpf_sk_redirect_map(skb, &sock_map_rx, one, verdict_dir);
+
+ if (ret == SK_DROP)
+ dropped++;
+ __sync_fetch_and_add(&process_byte, skb->len);
+ return ret;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_pass(struct __sk_buff *skb)
+{
+ __sync_fetch_and_add(&process_byte, skb->len);
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_skmsg_verdict(struct sk_msg_md *msg)
+{
+ int one = 1;
+
+ __sync_fetch_and_add(&process_byte, msg->size);
+ return bpf_msg_redirect_map(msg, &sock_map_tx, one, verdict_dir);
+}
+
+SEC("sk_msg")
+int prog_skmsg_pass(struct sk_msg_md *msg)
+{
+ __sync_fetch_and_add(&process_byte, msg->size);
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind4_prog.c b/tools/testing/selftests/bpf/progs/bind4_prog.c
index c6520f21f5f5..b7ddf8ec4ee8 100644
--- a/tools/testing/selftests/bpf/progs/bind4_prog.c
+++ b/tools/testing/selftests/bpf/progs/bind4_prog.c
@@ -6,14 +6,14 @@
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <sys/socket.h>
-#include <netinet/tcp.h>
#include <linux/if.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bind_prog.h"
+
#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
#define SERV4_PORT 4040
#define SERV4_REWRITE_IP 0x7f000001U /* 127.0.0.1 */
@@ -29,18 +29,69 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
char veth2[IFNAMSIZ] = "test_sock_addr2";
char missing[IFNAMSIZ] = "nonexistent_dev";
char del_bind[IFNAMSIZ] = "";
+ int veth1_idx, veth2_idx;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &veth1, sizeof(veth1)))
+ &veth1, sizeof(veth1)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &veth2, sizeof(veth2)))
+ &veth2, sizeof(veth2)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
+ veth1_idx == veth2_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &missing, sizeof(missing)) != -ENODEV)
+ &missing, sizeof(missing)) != -ENODEV)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &del_bind, sizeof(del_bind)))
+ &del_bind, sizeof(del_bind)))
+ return 1;
+
+ return 0;
+}
+
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
+static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
+{
+ int old, tmp, new = 0xeb9f;
+
+ /* Socket in test case has guarantee that old never equals to new. */
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
return 1;
return 0;
@@ -69,23 +120,23 @@ int bind_v4_prog(struct bpf_sock_addr *ctx)
// u8 narrow loads:
user_ip4 = 0;
- user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[0] << 0;
- user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[1] << 8;
- user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[2] << 16;
- user_ip4 |= ((volatile __u8 *)&ctx->user_ip4)[3] << 24;
+ user_ip4 |= load_byte(ctx->user_ip4, 0, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 1, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 2, sizeof(user_ip4));
+ user_ip4 |= load_byte(ctx->user_ip4, 3, sizeof(user_ip4));
if (ctx->user_ip4 != user_ip4)
return 0;
user_port = 0;
- user_port |= ((volatile __u8 *)&ctx->user_port)[0] << 0;
- user_port |= ((volatile __u8 *)&ctx->user_port)[1] << 8;
+ user_port |= load_byte(ctx->user_port, 0, sizeof(user_port));
+ user_port |= load_byte(ctx->user_port, 1, sizeof(user_port));
if (ctx->user_port != user_port)
return 0;
// u16 narrow loads:
user_ip4 = 0;
- user_ip4 |= ((volatile __u16 *)&ctx->user_ip4)[0] << 0;
- user_ip4 |= ((volatile __u16 *)&ctx->user_ip4)[1] << 16;
+ user_ip4 |= load_word(ctx->user_ip4, 0, sizeof(user_ip4));
+ user_ip4 |= load_word(ctx->user_ip4, 1, sizeof(user_ip4));
if (ctx->user_ip4 != user_ip4)
return 0;
@@ -93,10 +144,24 @@ int bind_v4_prog(struct bpf_sock_addr *ctx)
if (bind_to_device(ctx))
return 0;
+ /* Test for misc socket options. */
+ if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
+ return 0;
+
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
ctx->user_ip4 = bpf_htonl(SERV4_REWRITE_IP);
ctx->user_port = bpf_htons(SERV4_REWRITE_PORT);
return 1;
}
+SEC("cgroup/bind4")
+int bind_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind6_prog.c b/tools/testing/selftests/bpf/progs/bind6_prog.c
index 4358e44dcf47..501c3fc11d35 100644
--- a/tools/testing/selftests/bpf/progs/bind6_prog.c
+++ b/tools/testing/selftests/bpf/progs/bind6_prog.c
@@ -6,14 +6,14 @@
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <sys/socket.h>
-#include <netinet/tcp.h>
#include <linux/if.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bind_prog.h"
+
#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
#define SERV6_IP_1 0x12345678
#define SERV6_IP_2 0x00000000
@@ -35,18 +35,69 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
char veth2[IFNAMSIZ] = "test_sock_addr2";
char missing[IFNAMSIZ] = "nonexistent_dev";
char del_bind[IFNAMSIZ] = "";
+ int veth1_idx, veth2_idx;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &veth1, sizeof(veth1)))
+ &veth1, sizeof(veth1)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)) || !veth1_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &veth2, sizeof(veth2)))
+ &veth2, sizeof(veth2)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth2_idx, sizeof(veth2_idx)) || !veth2_idx ||
+ veth1_idx == veth2_idx)
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &missing, sizeof(missing)) != -ENODEV)
+ &missing, sizeof(missing)) != -ENODEV)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &veth1_idx, sizeof(veth1_idx)))
return 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
- &del_bind, sizeof(del_bind)))
+ &del_bind, sizeof(del_bind)))
+ return 1;
+
+ return 0;
+}
+
+static __inline int bind_reuseport(struct bpf_sock_addr *ctx)
+{
+ int val = 1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || !val)
+ return 1;
+ val = 0;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_REUSEPORT,
+ &val, sizeof(val)) || val)
+ return 1;
+
+ return 0;
+}
+
+static __inline int misc_opts(struct bpf_sock_addr *ctx, int opt)
+{
+ int old, tmp, new = 0xeb9f;
+
+ /* Socket in test case has guarantee that old never equals to new. */
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+ if (bpf_setsockopt(ctx, SOL_SOCKET, opt, &old, sizeof(old)))
return 1;
return 0;
@@ -80,25 +131,25 @@ int bind_v6_prog(struct bpf_sock_addr *ctx)
// u8 narrow loads:
for (i = 0; i < 4; i++) {
user_ip6 = 0;
- user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[0] << 0;
- user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[1] << 8;
- user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[2] << 16;
- user_ip6 |= ((volatile __u8 *)&ctx->user_ip6[i])[3] << 24;
+ user_ip6 |= load_byte(ctx->user_ip6[i], 0, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 1, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 2, sizeof(user_ip6));
+ user_ip6 |= load_byte(ctx->user_ip6[i], 3, sizeof(user_ip6));
if (ctx->user_ip6[i] != user_ip6)
return 0;
}
user_port = 0;
- user_port |= ((volatile __u8 *)&ctx->user_port)[0] << 0;
- user_port |= ((volatile __u8 *)&ctx->user_port)[1] << 8;
+ user_port |= load_byte(ctx->user_port, 0, sizeof(user_port));
+ user_port |= load_byte(ctx->user_port, 1, sizeof(user_port));
if (ctx->user_port != user_port)
return 0;
// u16 narrow loads:
for (i = 0; i < 4; i++) {
user_ip6 = 0;
- user_ip6 |= ((volatile __u16 *)&ctx->user_ip6[i])[0] << 0;
- user_ip6 |= ((volatile __u16 *)&ctx->user_ip6[i])[1] << 16;
+ user_ip6 |= load_word(ctx->user_ip6[i], 0, sizeof(user_ip6));
+ user_ip6 |= load_word(ctx->user_ip6[i], 1, sizeof(user_ip6));
if (ctx->user_ip6[i] != user_ip6)
return 0;
}
@@ -107,6 +158,14 @@ int bind_v6_prog(struct bpf_sock_addr *ctx)
if (bind_to_device(ctx))
return 0;
+ /* Test for misc socket options. */
+ if (misc_opts(ctx, SO_MARK) || misc_opts(ctx, SO_PRIORITY))
+ return 0;
+
+ /* Set reuseport and unset */
+ if (bind_reuseport(ctx))
+ return 0;
+
ctx->user_ip6[0] = bpf_htonl(SERV6_REWRITE_IP_0);
ctx->user_ip6[1] = bpf_htonl(SERV6_REWRITE_IP_1);
ctx->user_ip6[2] = bpf_htonl(SERV6_REWRITE_IP_2);
@@ -116,4 +175,10 @@ int bind_v6_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/bind6")
+int bind_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind_perm.c b/tools/testing/selftests/bpf/progs/bind_perm.c
new file mode 100644
index 000000000000..7bd2a027025d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind_perm.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+static __always_inline int bind_prog(struct bpf_sock_addr *ctx, int family)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+
+ if (sk->family != family)
+ return 0;
+
+ if (ctx->type != SOCK_STREAM)
+ return 0;
+
+ /* Return 1 OR'ed with the first bit set to indicate
+ * that CAP_NET_BIND_SERVICE should be bypassed.
+ */
+ if (ctx->user_port == bpf_htons(111))
+ return (1 | 2);
+
+ return 1;
+}
+
+SEC("cgroup/bind4")
+int bind_v4_prog(struct bpf_sock_addr *ctx)
+{
+ return bind_prog(ctx, AF_INET);
+}
+
+SEC("cgroup/bind6")
+int bind_v6_prog(struct bpf_sock_addr *ctx)
+{
+ return bind_prog(ctx, AF_INET6);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bind_prog.h b/tools/testing/selftests/bpf/progs/bind_prog.h
new file mode 100644
index 000000000000..e830caa940c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bind_prog.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BIND_PROG_H__
+#define __BIND_PROG_H__
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define load_byte(src, b, s) \
+ (((volatile __u8 *)&(src))[b] << 8 * b)
+#define load_word(src, w, s) \
+ (((volatile __u16 *)&(src))[w] << 16 * w)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define load_byte(src, b, s) \
+ (((volatile __u8 *)&(src))[(b) + (sizeof(src) - (s))] << 8 * ((s) - (b) - 1))
+#define load_word(src, w, s) \
+ (((volatile __u16 *)&(src))[w] << 16 * (((s) / 2) - (w) - 1))
+#else
+# error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_bench.c b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c
new file mode 100644
index 000000000000..7efcbdbe772d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bloom_filter_bench.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map;
+
+__u8 rand_vals[2500000];
+const __u32 nr_rand_bytes = 2500000;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ /* max entries and value_size will be set programmatically.
+ * They are configurable from the userspace bench program.
+ */
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
+ /* max entries, value_size, and # of hash functions will be set
+ * programmatically. They are configurable from the userspace
+ * bench program.
+ */
+ __uint(map_extra, 3);
+} bloom_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ /* max entries, key_size, and value_size, will be set
+ * programmatically. They are configurable from the userspace
+ * bench program.
+ */
+} hashmap SEC(".maps");
+
+struct callback_ctx {
+ struct bpf_map *map;
+ bool update;
+};
+
+/* Tracks the number of hits, drops, and false hits */
+struct {
+ __u32 stats[3];
+} __attribute__((__aligned__(256))) percpu_stats[256];
+
+const __u32 hit_key = 0;
+const __u32 drop_key = 1;
+const __u32 false_hit_key = 2;
+
+__u8 value_size;
+
+const volatile bool hashmap_use_bloom;
+const volatile bool count_false_hits;
+
+int error = 0;
+
+static __always_inline void log_result(__u32 key)
+{
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ percpu_stats[cpu & 255].stats[key]++;
+}
+
+static __u64
+bloom_callback(struct bpf_map *map, __u32 *key, void *val,
+ struct callback_ctx *data)
+{
+ int err;
+
+ if (data->update)
+ err = bpf_map_push_elem(data->map, val, 0);
+ else
+ err = bpf_map_peek_elem(data->map, val);
+
+ if (err) {
+ error |= 1;
+ return 1; /* stop the iteration */
+ }
+
+ log_result(hit_key);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_lookup(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&bloom_map;
+ data.update = false;
+
+ bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_update(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&bloom_map;
+ data.update = true;
+
+ bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int bloom_hashmap_lookup(void *ctx)
+{
+ __u64 *result;
+ int i, err;
+
+ __u32 index = bpf_get_prandom_u32();
+ __u32 bitmask = (1ULL << 21) - 1;
+
+ for (i = 0; i < 1024; i++, index += value_size) {
+ index = index & bitmask;
+
+ if (hashmap_use_bloom) {
+ err = bpf_map_peek_elem(&bloom_map,
+ rand_vals + index);
+ if (err) {
+ if (err != -ENOENT) {
+ error |= 2;
+ return 0;
+ }
+ log_result(hit_key);
+ continue;
+ }
+ }
+
+ result = bpf_map_lookup_elem(&hashmap,
+ rand_vals + index);
+ if (result) {
+ log_result(hit_key);
+ } else {
+ if (hashmap_use_bloom && count_false_hits)
+ log_result(false_hit_key);
+ log_result(drop_key);
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bloom_filter_map.c b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
new file mode 100644
index 000000000000..f245fcfe0c61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bloom_filter_map.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1000);
+} map_random_data SEC(".maps");
+
+struct map_bloom_type {
+ __uint(type, BPF_MAP_TYPE_BLOOM_FILTER);
+ __type(value, __u32);
+ __uint(max_entries, 10000);
+ __uint(map_extra, 5);
+} map_bloom SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct map_bloom_type);
+} outer_map SEC(".maps");
+
+struct callback_ctx {
+ struct bpf_map *map;
+};
+
+int error = 0;
+
+static __u64
+check_elem(struct bpf_map *map, __u32 *key, __u32 *val,
+ struct callback_ctx *data)
+{
+ int err;
+
+ err = bpf_map_peek_elem(data->map, val);
+ if (err) {
+ error |= 1;
+ return 1; /* stop the iteration */
+ }
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int inner_map(void *ctx)
+{
+ struct bpf_map *inner_map;
+ struct callback_ctx data;
+ int key = 0;
+
+ inner_map = bpf_map_lookup_elem(&outer_map, &key);
+ if (!inner_map) {
+ error |= 2;
+ return 0;
+ }
+
+ data.map = inner_map;
+ bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int check_bloom(void *ctx)
+{
+ struct callback_ctx data;
+
+ data.map = (struct bpf_map *)&map_bloom;
+ bpf_for_each_map_elem(&map_random_data, check_elem, &data, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
new file mode 100644
index 000000000000..f90531cf3ee5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#ifndef BPF_ARENA_SPIN_LOCK_H
+#define BPF_ARENA_SPIN_LOCK_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_atomic.h"
+
+#define arch_mcs_spin_lock_contended_label(l, label) smp_cond_load_acquire_label(l, VAL, label)
+#define arch_mcs_spin_unlock_contended(l) smp_store_release((l), 1)
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+
+#define EBUSY 16
+#define EOPNOTSUPP 95
+#define ETIMEDOUT 110
+
+#ifndef __arena
+#define __arena __attribute__((address_space(1)))
+#endif
+
+extern unsigned long CONFIG_NR_CPUS __kconfig;
+
+/*
+ * Typically, we'd just rely on the definition in vmlinux.h for qspinlock, but
+ * PowerPC overrides the definition to define lock->val as u32 instead of
+ * atomic_t, leading to compilation errors. Import a local definition below so
+ * that we don't depend on the vmlinux.h version.
+ */
+
+struct __qspinlock {
+ union {
+ atomic_t val;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
+};
+
+#define arena_spinlock_t struct __qspinlock
+/* FIXME: Using typedef causes CO-RE relocation error */
+/* typedef struct qspinlock arena_spinlock_t; */
+
+struct arena_mcs_spinlock {
+ struct arena_mcs_spinlock __arena *next;
+ int locked;
+ int count;
+};
+
+struct arena_qnode {
+ struct arena_mcs_spinlock mcs;
+};
+
+#define _Q_MAX_NODES 4
+#define _Q_PENDING_LOOPS 1
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ */
+#define _Q_MAX_CPUS 1024
+
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#define _Q_PENDING_BITS 8
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES];
+
+static inline u32 encode_tail(int cpu, int idx)
+{
+ u32 tail;
+
+ tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
+ tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
+
+ return tail;
+}
+
+static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail)
+{
+ u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
+ u32 idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+
+ return &qnodes[cpu][idx].mcs;
+}
+
+static inline
+struct arena_mcs_spinlock __arena *grab_mcs_node(struct arena_mcs_spinlock __arena *base, int idx)
+{
+ return &((struct arena_qnode __arena *)base + idx)->mcs;
+}
+
+#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+
+/**
+ * xchg_tail - Put in the new queue tail code word & retrieve previous one
+ * @lock : Pointer to queued spinlock structure
+ * @tail : The new queue tail code word
+ * Return: The previous queue tail code word
+ *
+ * xchg(lock, tail)
+ *
+ * p,*,* -> n,*,* ; prev = xchg(lock, node)
+ */
+static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail)
+{
+ u32 old, new;
+
+ old = atomic_read(&lock->val);
+ do {
+ new = (old & _Q_LOCKED_PENDING_MASK) | tail;
+ /*
+ * We can use relaxed semantics since the caller ensures that
+ * the MCS node is properly initialized before updating the
+ * tail.
+ */
+ /* These loops are not expected to stall, but we still need to
+ * prove to the verifier they will terminate eventually.
+ */
+ cond_break_label(out);
+ } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new));
+
+ return old;
+out:
+ bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__);
+ return old;
+}
+
+/**
+ * clear_pending - clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,* -> *,0,*
+ */
+static __always_inline void clear_pending(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->pending, 0);
+}
+
+/**
+ * clear_pending_set_locked - take ownership and clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,0 -> *,0,1
+ *
+ * Lock stealing is not allowed if this function is used.
+ */
+static __always_inline void clear_pending_set_locked(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+}
+
+/**
+ * set_locked - Set the lock bit and own the lock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,*,0 -> *,0,1
+ */
+static __always_inline void set_locked(arena_spinlock_t __arena *lock)
+{
+ WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+}
+
+static __always_inline
+u32 arena_fetch_set_pending_acquire(arena_spinlock_t __arena *lock)
+{
+ u32 old, new;
+
+ old = atomic_read(&lock->val);
+ do {
+ new = old | _Q_PENDING_VAL;
+ /*
+ * These loops are not expected to stall, but we still need to
+ * prove to the verifier they will terminate eventually.
+ */
+ cond_break_label(out);
+ } while (!atomic_try_cmpxchg_acquire(&lock->val, &old, new));
+
+ return old;
+out:
+ bpf_printk("RUNTIME ERROR: %s unexpected cond_break exit!!!", __func__);
+ return old;
+}
+
+/**
+ * arena_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock)
+{
+ int val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
+}
+
+__noinline
+int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val)
+{
+ struct arena_mcs_spinlock __arena *prev, *next, *node0, *node;
+ int ret = -ETIMEDOUT;
+ u32 old, tail;
+ int idx;
+
+ /*
+ * Wait for in-progress pending->locked hand-overs with a bounded
+ * number of spins so that we guarantee forward progress.
+ *
+ * 0,1,0 -> 0,0,1
+ */
+ if (val == _Q_PENDING_VAL) {
+ int cnt = _Q_PENDING_LOOPS;
+ val = atomic_cond_read_relaxed_label(&lock->val,
+ (VAL != _Q_PENDING_VAL) || !cnt--,
+ release_err);
+ }
+
+ /*
+ * If we observe any contention; queue.
+ */
+ if (val & ~_Q_LOCKED_MASK)
+ goto queue;
+
+ /*
+ * trylock || pending
+ *
+ * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
+ */
+ val = arena_fetch_set_pending_acquire(lock);
+
+ /*
+ * If we observe contention, there is a concurrent locker.
+ *
+ * Undo and queue; our setting of PENDING might have made the
+ * n,0,0 -> 0,0,0 transition fail and it will now be waiting
+ * on @next to become !NULL.
+ */
+ if (unlikely(val & ~_Q_LOCKED_MASK)) {
+
+ /* Undo PENDING if we set it. */
+ if (!(val & _Q_PENDING_MASK))
+ clear_pending(lock);
+
+ goto queue;
+ }
+
+ /*
+ * We're pending, wait for the owner to go away.
+ *
+ * 0,1,1 -> *,1,0
+ *
+ * this wait loop must be a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because not all
+ * clear_pending_set_locked() implementations imply full
+ * barriers.
+ */
+ if (val & _Q_LOCKED_MASK)
+ (void)smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
+
+ /*
+ * take ownership and clear the pending bit.
+ *
+ * 0,1,0 -> 0,0,1
+ */
+ clear_pending_set_locked(lock);
+ return 0;
+
+ /*
+ * End of pending bit optimistic spinning and beginning of MCS
+ * queuing.
+ */
+queue:
+ node0 = &(qnodes[bpf_get_smp_processor_id()])[0].mcs;
+ idx = node0->count++;
+ tail = encode_tail(bpf_get_smp_processor_id(), idx);
+
+ /*
+ * 4 nodes are allocated based on the assumption that there will not be
+ * nested NMIs taking spinlocks. That may not be true in some
+ * architectures even though the chance of needing more than 4 nodes
+ * will still be extremely unlikely. When that happens, we simply return
+ * an error. Original qspinlock has a trylock fallback in this case.
+ */
+ if (unlikely(idx >= _Q_MAX_NODES)) {
+ ret = -EBUSY;
+ goto release_node_err;
+ }
+
+ node = grab_mcs_node(node0, idx);
+
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+
+ node->locked = 0;
+ node->next = NULL;
+
+ /*
+ * We touched a (possibly) cold cacheline in the per-cpu queue node;
+ * attempt the trylock once more in the hope someone let go while we
+ * weren't watching.
+ */
+ if (arena_spin_trylock(lock))
+ goto release;
+
+ /*
+ * Ensure that the initialisation of @node is complete before we
+ * publish the updated tail via xchg_tail() and potentially link
+ * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
+ */
+ smp_wmb();
+
+ /*
+ * Publish the updated tail.
+ * We have already touched the queueing cacheline; don't bother with
+ * pending stuff.
+ *
+ * p,*,* -> n,*,*
+ */
+ old = xchg_tail(lock, tail);
+ next = NULL;
+
+ /*
+ * if there was a previous node; link it and wait until reaching the
+ * head of the waitqueue.
+ */
+ if (old & _Q_TAIL_MASK) {
+ prev = decode_tail(old);
+
+ /* Link @node into the waitqueue. */
+ WRITE_ONCE(prev->next, node);
+
+ (void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
+
+ /*
+ * While waiting for the MCS lock, the next pointer may have
+ * been set by another lock waiter. We cannot prefetch here
+ * due to lack of equivalent instruction in BPF ISA.
+ */
+ next = READ_ONCE(node->next);
+ }
+
+ /*
+ * we're at the head of the waitqueue, wait for the owner & pending to
+ * go away.
+ *
+ * *,x,y -> *,0,0
+ *
+ * this wait loop must use a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because the set_locked() function below
+ * does not imply a full barrier.
+ */
+ val = atomic_cond_read_acquire_label(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
+ release_node_err);
+
+ /*
+ * claim the lock:
+ *
+ * n,0,0 -> 0,0,1 : lock, uncontended
+ * *,*,0 -> *,*,1 : lock, contended
+ *
+ * If the queue head is the only one in the queue (lock value == tail)
+ * and nobody is pending, clear the tail code and grab the lock.
+ * Otherwise, we only need to grab the lock.
+ */
+
+ /*
+ * In the PV case we might already have _Q_LOCKED_VAL set, because
+ * of lock stealing; therefore we must also allow:
+ *
+ * n,0,1 -> 0,0,1
+ *
+ * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
+ * above wait condition, therefore any concurrent setting of
+ * PENDING will make the uncontended transition fail.
+ */
+ if ((val & _Q_TAIL_MASK) == tail) {
+ if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+ goto release; /* No contention */
+ }
+
+ /*
+ * Either somebody is queued behind us or _Q_PENDING_VAL got set
+ * which will then detect the remaining tail and queue behind us
+ * ensuring we'll see a @next.
+ */
+ set_locked(lock);
+
+ /*
+ * contended path; wait for next if not observed yet, release.
+ */
+ if (!next)
+ next = smp_cond_load_relaxed_label(&node->next, (VAL), release_node_err);
+
+ arch_mcs_spin_unlock_contended(&next->locked);
+
+release:;
+ /*
+ * release the node
+ *
+ * Doing a normal dec vs this_cpu_dec is fine. An upper context always
+ * decrements count it incremented before returning, thus we're fine.
+ * For contexts interrupting us, they either observe our dec or not.
+ * Just ensure the compiler doesn't reorder this statement, as a
+ * this_cpu_dec implicitly implied that.
+ */
+ barrier();
+ node0->count--;
+ return 0;
+release_node_err:
+ barrier();
+ node0->count--;
+ goto release_err;
+release_err:
+ return ret;
+}
+
+/**
+ * arena_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * On error, returned value will be negative.
+ * On success, zero is returned.
+ *
+ * The return value _must_ be tested against zero for success,
+ * instead of checking it against negative, for passing the
+ * BPF verifier.
+ *
+ * The user should do:
+ * if (arena_spin_lock(...) != 0) // failure
+ * or
+ * if (arena_spin_lock(...) == 0) // success
+ * or
+ * if (arena_spin_lock(...)) // failure
+ * or
+ * if (!arena_spin_lock(...)) // success
+ * instead of:
+ * if (arena_spin_lock(...) < 0) // failure
+ *
+ * The return value can still be inspected later.
+ */
+static __always_inline int arena_spin_lock(arena_spinlock_t __arena *lock)
+{
+ int val = 0;
+
+ if (CONFIG_NR_CPUS > 1024)
+ return -EOPNOTSUPP;
+
+ bpf_preempt_disable();
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return 0;
+
+ val = arena_spin_lock_slowpath(lock, val);
+ /* FIXME: bpf_assert_range(-MAX_ERRNO, 0) once we have it working for all cases. */
+ if (val)
+ bpf_preempt_enable();
+ return val;
+}
+
+/**
+ * arena_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void arena_spin_unlock(arena_spinlock_t __arena *lock)
+{
+ /*
+ * unlock() needs release semantics:
+ */
+ smp_store_release(&lock->locked, 0);
+ bpf_preempt_enable();
+}
+
+#define arena_spin_lock_irqsave(lock, flags) \
+ ({ \
+ int __ret; \
+ bpf_local_irq_save(&(flags)); \
+ __ret = arena_spin_lock((lock)); \
+ if (__ret) \
+ bpf_local_irq_restore(&(flags)); \
+ (__ret); \
+ })
+
+#define arena_spin_unlock_irqrestore(lock, flags) \
+ ({ \
+ arena_spin_unlock((lock)); \
+ bpf_local_irq_restore(&(flags)); \
+ })
+
+#endif
+
+#endif /* BPF_ARENA_SPIN_LOCK_H */
diff --git a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
new file mode 100644
index 000000000000..9af19dfe4e80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Highlights:
+ * 1. The major difference between this bpf program and tcp_cubic.c
+ * is that this bpf program relies on `cong_control` rather than
+ * `cong_avoid` in the struct tcp_congestion_ops.
+ * 2. Logic such as tcp_cwnd_reduction, tcp_cong_avoid, and
+ * tcp_update_pacing_rate is bypassed when `cong_control` is
+ * defined, so moving these logic to `cong_control`.
+ * 3. WARNING: This bpf program is NOT the same as tcp_cubic.c.
+ * The main purpose is to show use cases of the arguments in
+ * `cong_control`. For simplicity's sake, it reuses tcp cubic's
+ * kernel functions.
+ */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define USEC_PER_SEC 1000000UL
+#define TCP_PACING_SS_RATIO (200)
+#define TCP_PACING_CA_RATIO (120)
+#define TCP_REORDERING (12)
+
+extern void cubictcp_init(struct sock *sk) __ksym;
+extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern __u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
+extern void cubictcp_state(struct sock *sk, __u8 new_state) __ksym;
+extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
+extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
+extern void cubictcp_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
+
+static __u64 div64_u64(__u64 dividend, __u64 divisor)
+{
+ return dividend / divisor;
+}
+
+static void tcp_update_pacing_rate(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ __u64 rate;
+
+ /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
+ rate = (__u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
+
+ /* current rate is (cwnd * mss) / srtt
+ * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
+ * In Congestion Avoidance phase, set it to 120 % the current rate.
+ *
+ * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
+ * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
+ * end of slow start and should slow down.
+ */
+ if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+ rate *= TCP_PACING_SS_RATIO;
+ else
+ rate *= TCP_PACING_CA_RATIO;
+
+ rate *= max(tp->snd_cwnd, tp->packets_out);
+
+ if (tp->srtt_us)
+ rate = div64_u64(rate, (__u64)tp->srtt_us);
+
+ sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+}
+
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+ int newly_lost, int flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int sndcnt = 0;
+ __u32 pkts_in_flight = tp->packets_out - (tp->sacked_out + tp->lost_out) + tp->retrans_out;
+ int delta = tp->snd_ssthresh - pkts_in_flight;
+
+ if (newly_acked_sacked <= 0 || !tp->prior_cwnd)
+ return;
+
+ __u32 prr_delivered = tp->prr_delivered + newly_acked_sacked;
+
+ if (delta < 0) {
+ __u64 dividend =
+ (__u64)tp->snd_ssthresh * prr_delivered + tp->prior_cwnd - 1;
+ sndcnt = (__u32)div64_u64(dividend, (__u64)tp->prior_cwnd) - tp->prr_out;
+ } else {
+ sndcnt = max(prr_delivered - tp->prr_out, newly_acked_sacked);
+ if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
+ sndcnt++;
+ sndcnt = min(delta, sndcnt);
+ }
+ /* Force a fast retransmit upon entering fast recovery */
+ sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
+ tp->snd_cwnd = pkts_in_flight + sndcnt;
+}
+
+/* Decide whether to run the increase function of congestion control. */
+static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
+{
+ if (tcp_sk(sk)->reordering > TCP_REORDERING)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return flag & FLAG_DATA_ACKED;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
+{
+ cubictcp_init(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+{
+ cubictcp_cwnd_event(sk, event);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (((1<<TCP_CA_CWR) | (1<<TCP_CA_Recovery)) &
+ (1 << inet_csk(sk)->icsk_ca_state)) {
+ /* Reduce cwnd if state mandates */
+ tcp_cwnd_reduction(sk, rs->acked_sacked, rs->losses, flag);
+
+ if (!before(tp->snd_una, tp->high_seq)) {
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+ inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+ }
+ } else if (tcp_may_raise_cwnd(sk, flag)) {
+ /* Advance cwnd if state allows */
+ cubictcp_cong_avoid(sk, ack, rs->acked_sacked);
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+
+ tcp_update_pacing_rate(sk);
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
+{
+ return cubictcp_recalc_ssthresh(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
+{
+ cubictcp_state(sk, new_state);
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
+{
+ cubictcp_acked(sk, sample);
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
+{
+ return tcp_reno_undo_cwnd(sk);
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops cc_cubic = {
+ .init = (void *)bpf_cubic_init,
+ .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+ .cong_control = (void *)bpf_cubic_cong_control,
+ .set_state = (void *)bpf_cubic_state,
+ .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+ .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .pkts_acked = (void *)bpf_cubic_acked,
+ .name = "bpf_cc_cubic",
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h
new file mode 100644
index 000000000000..a7c343dc82e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_COMPILER_H__
+#define __BPF_COMPILER_H__
+
+#define DO_PRAGMA_(X) _Pragma(#X)
+
+#if __clang__
+#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable))
+#else
+/* In GCC -funroll-loops, which is enabled with -O2, should have the
+ same impact than the loop-unroll-enable pragma above. */
+#define __pragma_loop_unroll
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N))
+#else
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N)
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full))
+#else
+#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534)
+#endif
+
+#if __clang__
+#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable))
+#else
+#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1)
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index 6939bfd8690f..46fb2b37d3a7 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_cubic.c. The purpose is mainly for testing
* the kernel BPF logic.
*
@@ -14,15 +14,16 @@
* "ca->ack_cnt / delta" operation.
*/
-#include <linux/bpf.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
-#include "bpf_tcp_helpers.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
+extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
+
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
*/
@@ -70,7 +71,7 @@ static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
/ (bic_scale * 10);
/* BIC TCP Parameters */
-struct bictcp {
+struct bpf_bictcp {
__u32 cnt; /* increase cwnd by 1 after ACKs */
__u32 last_max_cwnd; /* last maximum snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
@@ -91,7 +92,7 @@ struct bictcp {
__u32 curr_rtt; /* the minimum rtt of current round */
};
-static inline void bictcp_reset(struct bictcp *ca)
+static void bictcp_reset(struct bpf_bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
@@ -112,7 +113,7 @@ extern unsigned long CONFIG_HZ __kconfig;
#define USEC_PER_SEC 1000000UL
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
+static __u64 div64_u64(__u64 dividend, __u64 divisor)
{
return dividend / divisor;
}
@@ -120,7 +121,7 @@ static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
#define div64_ul div64_u64
#define BITS_PER_U64 (sizeof(__u64) * 8)
-static __always_inline int fls64(__u64 x)
+static int fls64(__u64 x)
{
int num = BITS_PER_U64 - 1;
@@ -153,15 +154,15 @@ static __always_inline int fls64(__u64 x)
return num + 1;
}
-static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
+static __u32 bictcp_clock_us(const struct sock *sk)
{
return tcp_sk(sk)->tcp_mstamp;
}
-static __always_inline void bictcp_hystart_reset(struct sock *sk)
+static void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
ca->round_start = ca->last_ack = bictcp_clock_us(sk);
ca->end_seq = tp->snd_nxt;
@@ -169,15 +170,10 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
-/* "struct_ops/" prefix is not a requirement
- * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
- * as long as it is used in one of the func ptr
- * under SEC(".struct_ops").
- */
-SEC("struct_ops/bictcp_init")
-void BPF_PROG(bictcp_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_init, struct sock *sk)
{
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
@@ -188,14 +184,11 @@ void BPF_PROG(bictcp_init, struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
-/* No prefix in SEC will also work.
- * The remaining tcp-cubic functions have an easier way.
- */
-SEC("no-sec-prefix-bictcp_cwnd_event")
-void BPF_PROG(bictcp_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 now = tcp_jiffies32;
__s32 delta;
@@ -236,7 +229,7 @@ static const __u8 v[] = {
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*/
-static __always_inline __u32 cubic_root(__u64 a)
+static __u32 cubic_root(__u64 a)
{
__u32 x, b, shift;
@@ -269,8 +262,7 @@ static __always_inline __u32 cubic_root(__u64 a)
/*
* Compute congestion window to use.
*/
-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
- __u32 acked)
+static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked)
{
__u32 delta, bic_target, max_cnt;
__u64 offs, t;
@@ -315,7 +307,7 @@ static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
* (so time^3 is done by using 64 bit)
* and without the support of division of 64bit numbers
* (so all divisions are done by using 32 bit)
- * also NOTE the unit of those veriables
+ * also NOTE the unit of those variables
* time = (t - K) / 2^bictcp_HZ
* c = bic_scale >> 10
* rtt = (srtt >> 3) / HZ
@@ -383,11 +375,11 @@ tcp_friendliness:
ca->cnt = max(ca->cnt, 2U);
}
-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
-void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk))
return;
@@ -403,10 +395,11 @@ void BPF_STRUCT_OPS(bictcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
-__u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
ca->epoch_start = 0; /* end of epoch */
@@ -420,7 +413,8 @@ __u32 BPF_STRUCT_OPS(bictcp_recalc_ssthresh, struct sock *sk)
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
-void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
@@ -439,7 +433,7 @@ void BPF_STRUCT_OPS(bictcp_state, struct sock *sk, __u8 new_state)
* We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms.
*/
-static __always_inline __u32 hystart_ack_delay(struct sock *sk)
+static __u32 hystart_ack_delay(struct sock *sk)
{
unsigned long rate;
@@ -450,10 +444,10 @@ static __always_inline __u32 hystart_ack_delay(struct sock *sk)
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
}
-static __always_inline void hystart_update(struct sock *sk, __u32 delay)
+static void hystart_update(struct sock *sk, __u32 delay)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 threshold;
if (hystart_detect & HYSTART_ACK_TRAIN) {
@@ -496,14 +490,17 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
}
}
-void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
- const struct ack_sample *sample)
+int bpf_cubic_acked_called = 0;
+
+SEC("struct_ops")
+void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
+ struct bpf_bictcp *ca = inet_csk_ca(sk);
__u32 delay;
- /* Some calls are for duplicates without timetamps */
+ bpf_cubic_acked_called = 1;
+ /* Some calls are for duplicates without timestamps */
if (sample->rtt_us < 0)
return;
@@ -525,21 +522,22 @@ void BPF_STRUCT_OPS(bictcp_acked, struct sock *sk,
hystart_update(sk, delay);
}
-__u32 BPF_STRUCT_OPS(tcp_reno_undo_cwnd, struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
+extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
- return max(tp->snd_cwnd, tp->prior_cwnd);
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
+{
+ return tcp_reno_undo_cwnd(sk);
}
SEC(".struct_ops")
struct tcp_congestion_ops cubic = {
- .init = (void *)bictcp_init,
- .ssthresh = (void *)bictcp_recalc_ssthresh,
- .cong_avoid = (void *)bictcp_cong_avoid,
- .set_state = (void *)bictcp_state,
- .undo_cwnd = (void *)tcp_reno_undo_cwnd,
- .cwnd_event = (void *)bictcp_cwnd_event,
- .pkts_acked = (void *)bictcp_acked,
+ .init = (void *)bpf_cubic_init,
+ .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+ .cong_avoid = (void *)bpf_cubic_cong_avoid,
+ .set_state = (void *)bpf_cubic_state,
+ .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+ .cwnd_event = (void *)bpf_cubic_cwnd_event,
+ .pkts_acked = (void *)bpf_cubic_acked,
.name = "bpf_cubic",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 4dc1a967776a..1cc83140849f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -1,23 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_dctcp.c. The purpose is mainly for testing
* the kernel BPF logic.
*/
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_tcp_helpers.h"
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
char _license[] SEC("license") = "GPL";
+volatile const char fallback_cc[TCP_CA_NAME_MAX];
+const char bpf_dctcp[] = "bpf_dctcp";
+const char tcp_cdg[] = "cdg";
+char cc_res[TCP_CA_NAME_MAX];
+int tcp_cdg_res = 0;
int stg_result = 0;
+int ebusy_cnt = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
@@ -28,7 +37,7 @@ struct {
#define DCTCP_MAX_ALPHA 1024U
-struct dctcp {
+struct bpf_dctcp {
__u32 old_delivered;
__u32 old_delivered_ce;
__u32 prior_rcv_nxt;
@@ -41,8 +50,7 @@ struct dctcp {
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
-static __always_inline void dctcp_reset(const struct tcp_sock *tp,
- struct dctcp *ca)
+static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
{
ca->next_seq = tp->snd_nxt;
@@ -50,13 +58,40 @@ static __always_inline void dctcp_reset(const struct tcp_sock *tp,
ca->old_delivered_ce = tp->delivered_ce;
}
-SEC("struct_ops/dctcp_init")
-void BPF_PROG(dctcp_init, struct sock *sk)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
int *stg;
+ if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) {
+ /* Switch to fallback */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Switch back to myself and the recurred bpf_dctcp_init()
+ * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
+ * except the last "cdg" one.
+ */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Switch back to fallback */
+ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
+ ebusy_cnt++;
+
+ /* Expecting -ENOTSUPP for tcp_cdg_res */
+ tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)tcp_cdg, sizeof(tcp_cdg));
+ bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)cc_res, sizeof(cc_res));
+ return;
+ }
+
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->loss_cwnd = 0;
@@ -70,21 +105,21 @@ void BPF_PROG(dctcp_init, struct sock *sk)
dctcp_reset(tp, ca);
}
-SEC("struct_ops/dctcp_ssthresh")
-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
-SEC("struct_ops/dctcp_update_alpha")
-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
@@ -110,27 +145,27 @@ void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
}
}
-static __always_inline void dctcp_react_to_loss(struct sock *sk)
+static void dctcp_react_to_loss(struct sock *sk)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
}
-SEC("struct_ops/dctcp_state")
-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk);
- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
+static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -145,9 +180,8 @@ static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
-static __always_inline
-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
- __u32 *prior_rcv_nxt, __u32 *ce_state)
+static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+ __u32 *prior_rcv_nxt, __u32 *ce_state)
{
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
@@ -167,10 +201,10 @@ void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
dctcp_ece_ack_cwr(sk, new_ce_state);
}
-SEC("struct_ops/dctcp_cwnd_event")
-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{
- struct dctcp *ca = inet_csk_ca(sk);
+ struct bpf_dctcp *ca = inet_csk_ca(sk);
switch (ev) {
case CA_EVENT_ECN_IS_CE:
@@ -186,49 +220,39 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
}
}
-SEC("struct_ops/dctcp_cwnd_undo")
-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
+SEC("struct_ops")
+__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
{
- const struct dctcp *ca = inet_csk_ca(sk);
+ const struct bpf_dctcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
-SEC("struct_ops/tcp_reno_cong_avoid")
-void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (!tcp_is_cwnd_limited(sk))
- return;
+extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
- /* In "safe" area, increase. */
- if (tcp_in_slow_start(tp)) {
- acked = tcp_slow_start(tp, acked);
- if (!acked)
- return;
- }
- /* In dangerous area, increase slowly. */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
+SEC("struct_ops")
+void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+{
+ tcp_reno_cong_avoid(sk, ack, acked);
}
SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = {
- .init = (void *)dctcp_init,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse",
};
SEC(".struct_ops")
struct tcp_congestion_ops dctcp = {
- .init = (void *)dctcp_init,
- .in_ack_event = (void *)dctcp_update_alpha,
- .cwnd_event = (void *)dctcp_cwnd_event,
- .ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)tcp_reno_cong_avoid,
- .undo_cwnd = (void *)dctcp_cwnd_undo,
- .set_state = (void *)dctcp_state,
+ .init = (void *)bpf_dctcp_init,
+ .in_ack_event = (void *)bpf_dctcp_update_alpha,
+ .cwnd_event = (void *)bpf_dctcp_cwnd_event,
+ .ssthresh = (void *)bpf_dctcp_ssthresh,
+ .cong_avoid = (void *)bpf_dctcp_cong_avoid,
+ .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
+ .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp",
};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c b/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c
new file mode 100644
index 000000000000..c91763f248b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp_release.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+const char cubic[] = "cubic";
+
+SEC("struct_ops")
+void BPF_PROG(dctcp_nouse_release, struct sock *sk)
+{
+ bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+ (void *)cubic, sizeof(cubic));
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops dctcp_rel = {
+ .release = (void *)dctcp_nouse_release,
+ .name = "bpf_dctcp_rel",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 95a5a0778ed7..b04e092fac94 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -19,9 +19,10 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
#define PROG(F) PROG_(F, _##F)
-#define PROG_(NUM, NAME) SEC("flow_dissector/"#NUM) int bpf_func##NAME
+#define PROG_(NUM, NAME) SEC("flow_dissector") int flow_dissector_##NUM
+
+#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
/* These are the identifiers of the BPF programs that will be used in tail
* calls. Name is limited to 16 characters, with the terminating character and
@@ -144,6 +145,19 @@ int _dissect(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
+ if (keys->n_proto == bpf_htons(ETH_P_IP)) {
+ /* IP traffic from FLOW_CONTINUE_SADDR falls-back to
+ * standard dissector
+ */
+ struct iphdr *iph, _iph;
+
+ iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
+ if (iph && iph->ihl == 5 &&
+ iph->saddr == bpf_htonl(FLOW_CONTINUE_SADDR)) {
+ return BPF_FLOW_DISSECTOR_CONTINUE;
+ }
+ }
+
return parse_eth_proto(skb, keys->n_proto);
}
@@ -323,7 +337,7 @@ PROG(IPV6)(struct __sk_buff *skb)
keys->ip_proto = ip6h->nexthdr;
keys->flow_label = ip6_flowlabel(ip6h);
- if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
+ if (keys->flow_label && keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
return export_flow_keys(keys, BPF_OK);
return parse_ipv6_proto(skb, ip6h->nexthdr);
diff --git a/tools/testing/selftests/bpf/progs/bpf_gotox.c b/tools/testing/selftests/bpf/progs/bpf_gotox.c
new file mode 100644
index 000000000000..216c71b94c64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_gotox.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+__u64 in_user;
+__u64 ret_user;
+
+int pid;
+
+/*
+ * Skip all the tests if compiler doesn't support indirect jumps.
+ *
+ * If tests are skipped, then all functions below are compiled as
+ * dummy, such that the skeleton looks the same, and the userspace
+ * program can avoid any checks rather than if data->skip is set.
+ */
+#ifdef __BPF_FEATURE_GOTOX
+__u64 skip SEC(".data") = 0;
+#else
+__u64 skip = 1;
+#endif
+
+struct simple_ctx {
+ __u64 x;
+};
+
+#ifdef __BPF_FEATURE_GOTOX
+__u64 some_var;
+
+/*
+ * This function adds code which will be replaced by a different
+ * number of instructions by the verifier. This adds additional
+ * stress on testing the insn_array maps corresponding to indirect jumps.
+ */
+static __always_inline void adjust_insns(__u64 x)
+{
+ some_var ^= x + bpf_jiffies64();
+}
+
+SEC("syscall")
+int one_switch(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int one_switch_non_zero_sec_off(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int simple_test_other_sec(struct pt_regs *ctx)
+{
+ __u64 x = in_user;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int two_switches(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ switch (ctx->x + !!ret_user) {
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 103;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 104;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 107;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 11);
+ ret_user = 205;
+ break;
+ case 5:
+ adjust_insns(ctx->x + 11);
+ ret_user = 115;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 1019;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int big_jump_table(struct simple_ctx *ctx __attribute__((unused)))
+{
+ const void *const jt[256] = {
+ [0 ... 255] = &&default_label,
+ [0] = &&l0,
+ [11] = &&l11,
+ [27] = &&l27,
+ [31] = &&l31,
+ };
+
+ goto *jt[ctx->x & 0xff];
+
+l0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ return 0;
+
+l11:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ return 0;
+
+l27:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ return 0;
+
+l31:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ return 0;
+
+default_label:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ return 0;
+}
+
+SEC("syscall")
+int one_jump_two_maps(struct simple_ctx *ctx __attribute__((unused)))
+{
+ __label__ l1, l2, l3, l4;
+ void *jt1[2] = { &&l1, &&l2 };
+ void *jt2[2] = { &&l3, &&l4 };
+ unsigned int a = ctx->x % 2;
+ unsigned int b = (ctx->x / 2) % 2;
+ volatile int ret = 0;
+
+ if (!(a < 2 && b < 2))
+ return 19;
+
+ if (ctx->x % 2)
+ goto *jt1[a];
+ else
+ goto *jt2[b];
+
+ l1: ret += 1;
+ l2: ret += 3;
+ l3: ret += 5;
+ l4: ret += 7;
+
+ ret_user = ret;
+ return ret;
+}
+
+SEC("syscall")
+int one_map_two_jumps(struct simple_ctx *ctx __attribute__((unused)))
+{
+ __label__ l1, l2, l3;
+ void *jt[3] = { &&l1, &&l2, &&l3 };
+ unsigned int a = (ctx->x >> 2) & 1;
+ unsigned int b = (ctx->x >> 3) & 1;
+ volatile int ret = 0;
+
+ if (ctx->x % 2)
+ goto *jt[a];
+
+ if (ctx->x % 3)
+ goto *jt[a + b];
+
+ l1: ret += 3;
+ l2: ret += 5;
+ l3: ret += 7;
+
+ ret_user = ret;
+ return ret;
+}
+
+/* Just to introduce some non-zero offsets in .text */
+static __noinline int f0(volatile struct simple_ctx *ctx __arg_ctx)
+{
+ if (ctx)
+ return 1;
+ else
+ return 13;
+}
+
+SEC("syscall") int f1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return f0(ctx);
+}
+
+static __noinline int __static_global(__u64 x)
+{
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int use_static_global1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return __static_global(ctx->x);
+}
+
+SEC("syscall")
+int use_static_global2(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ adjust_insns(ctx->x + 1);
+ return __static_global(ctx->x);
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int use_static_global_other_sec(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ return __static_global(in_user);
+}
+
+__noinline int __nonstatic_global(__u64 x)
+{
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int use_nonstatic_global1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return __nonstatic_global(ctx->x);
+}
+
+SEC("syscall")
+int use_nonstatic_global2(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ adjust_insns(ctx->x + 1);
+ return __nonstatic_global(ctx->x);
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int use_nonstatic_global_other_sec(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ return __nonstatic_global(in_user);
+}
+
+#else /* __BPF_FEATURE_GOTOX */
+
+#define SKIP_TEST(TEST_NAME) \
+ SEC("syscall") int TEST_NAME(void *ctx) \
+ { \
+ return 0; \
+ }
+
+SKIP_TEST(one_switch);
+SKIP_TEST(one_switch_non_zero_sec_off);
+SKIP_TEST(simple_test_other_sec);
+SKIP_TEST(two_switches);
+SKIP_TEST(big_jump_table);
+SKIP_TEST(one_jump_two_maps);
+SKIP_TEST(one_map_two_jumps);
+SKIP_TEST(use_static_global1);
+SKIP_TEST(use_static_global2);
+SKIP_TEST(use_static_global_other_sec);
+SKIP_TEST(use_nonstatic_global1);
+SKIP_TEST(use_nonstatic_global2);
+SKIP_TEST(use_nonstatic_global_other_sec);
+
+#endif /* __BPF_FEATURE_GOTOX */
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
new file mode 100644
index 000000000000..56957557e3e1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define MAX_ENTRIES 1000
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_ENTRIES);
+} hash_map_bench SEC(".maps");
+
+u64 __attribute__((__aligned__(256))) percpu_time[256];
+u64 nr_loops;
+
+static int loop_update_callback(__u32 index, u32 *key)
+{
+ u64 init_val = 1;
+
+ bpf_map_update_elem(&hash_map_bench, key, &init_val, BPF_ANY);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 key = cpu + MAX_ENTRIES;
+ u64 start_time = bpf_ktime_get_ns();
+
+ bpf_loop(nr_loops, loop_update_callback, &key, 0);
+ percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
new file mode 100644
index 000000000000..1eb74ddca414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+} hash_map_bench SEC(".maps");
+
+/* The number of slots to store times */
+#define NR_SLOTS 32
+#define NR_CPUS 256
+#define CPU_MASK (NR_CPUS-1)
+
+/* Configured by userspace */
+u64 nr_entries;
+u64 nr_loops;
+u32 __attribute__((__aligned__(8))) key[NR_CPUS];
+
+/* Filled by us */
+u64 __attribute__((__aligned__(256))) percpu_times_index[NR_CPUS];
+u64 __attribute__((__aligned__(256))) percpu_times[NR_CPUS][NR_SLOTS];
+
+static inline void patch_key(u32 i)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ key[0] = i + 1;
+#else
+ key[0] = __builtin_bswap32(i + 1);
+#endif
+ /* the rest of key is random and is configured by userspace */
+}
+
+static int lookup_callback(__u32 index, u32 *unused)
+{
+ patch_key(index);
+ return bpf_map_lookup_elem(&hash_map_bench, key) ? 0 : 1;
+}
+
+static int loop_lookup_callback(__u32 index, u32 *unused)
+{
+ return bpf_loop(nr_entries, lookup_callback, NULL, 0) ? 0 : 1;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 times_index;
+ u64 start_time;
+
+ times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS;
+ start_time = bpf_ktime_get_ns();
+ bpf_loop(nr_loops, loop_lookup_callback, NULL, 0);
+ percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time;
+ percpu_times_index[cpu & CPU_MASK] += 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
deleted file mode 100644
index 6a1255465fd6..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_iter.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
-#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
-#define bpf_iter__netlink bpf_iter__netlink___not_used
-#define bpf_iter__task bpf_iter__task___not_used
-#define bpf_iter__task_file bpf_iter__task_file___not_used
-#define bpf_iter__tcp bpf_iter__tcp___not_used
-#define tcp6_sock tcp6_sock___not_used
-#define bpf_iter__udp bpf_iter__udp___not_used
-#define udp6_sock udp6_sock___not_used
-#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
-#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
-#define bpf_iter__sockmap bpf_iter__sockmap___not_used
-#define btf_ptr btf_ptr___not_used
-#define BTF_F_COMPACT BTF_F_COMPACT___not_used
-#define BTF_F_NONAME BTF_F_NONAME___not_used
-#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
-#define BTF_F_ZERO BTF_F_ZERO___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__bpf_map
-#undef bpf_iter__ipv6_route
-#undef bpf_iter__netlink
-#undef bpf_iter__task
-#undef bpf_iter__task_file
-#undef bpf_iter__tcp
-#undef tcp6_sock
-#undef bpf_iter__udp
-#undef udp6_sock
-#undef bpf_iter__bpf_map_elem
-#undef bpf_iter__bpf_sk_storage_map
-#undef bpf_iter__sockmap
-#undef btf_ptr
-#undef BTF_F_COMPACT
-#undef BTF_F_NONAME
-#undef BTF_F_PTR_RAW
-#undef BTF_F_ZERO
-
-struct bpf_iter_meta {
- struct seq_file *seq;
- __u64 session_id;
- __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__ipv6_route {
- struct bpf_iter_meta *meta;
- struct fib6_info *rt;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__netlink {
- struct bpf_iter_meta *meta;
- struct netlink_sock *sk;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task {
- struct bpf_iter_meta *meta;
- struct task_struct *task;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task_file {
- struct bpf_iter_meta *meta;
- struct task_struct *task;
- __u32 fd;
- struct file *file;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__tcp {
- struct bpf_iter_meta *meta;
- struct sock_common *sk_common;
- uid_t uid;
-} __attribute__((preserve_access_index));
-
-struct tcp6_sock {
- struct tcp_sock tcp;
- struct ipv6_pinfo inet6;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__udp {
- struct bpf_iter_meta *meta;
- struct udp_sock *udp_sk;
- uid_t uid __attribute__((aligned(8)));
- int bucket __attribute__((aligned(8)));
-} __attribute__((preserve_access_index));
-
-struct udp6_sock {
- struct udp_sock udp;
- struct ipv6_pinfo inet6;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map_elem {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- void *key;
- void *value;
-};
-
-struct bpf_iter__bpf_sk_storage_map {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- struct sock *sk;
- void *value;
-};
-
-struct bpf_iter__sockmap {
- struct bpf_iter_meta *meta;
- struct bpf_map *map;
- void *key;
- struct sock *sk;
-};
-
-struct btf_ptr {
- void *ptr;
- __u32 type_id;
- __u32 flags;
-};
-
-enum {
- BTF_F_COMPACT = (1ULL << 0),
- BTF_F_NONAME = (1ULL << 1),
- BTF_F_PTR_RAW = (1ULL << 2),
- BTF_F_ZERO = (1ULL << 3),
-};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
index 6286023fd62b..19710cc0f250 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
@@ -19,13 +13,20 @@ struct {
__type(value, __u64);
} arraymap1 SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, __u64);
+ __type(value, __u32);
+} hashmap1 SEC(".maps");
+
__u32 key_sum = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
- __u32 *key = ctx->key;
+ __u32 *hmap_val, *key = ctx->key;
__u64 *val = ctx->value;
if (key == (void *)0 || val == (void *)0)
@@ -35,6 +36,18 @@ int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
bpf_seq_write(ctx->meta->seq, val, sizeof(__u64));
key_sum += *key;
val_sum += *val;
+
+ /* workaround - It's necessary to do this convoluted (val, key)
+ * write into hashmap1, instead of simply doing
+ * bpf_map_update_elem(&hashmap1, val, key, BPF_ANY);
+ * because key has MEM_RDONLY flag and bpf_map_update elem expects
+ * types without this flag
+ */
+ bpf_map_update_elem(&hashmap1, val, val, BPF_ANY);
+ hmap_val = bpf_map_lookup_elem(&hashmap1, val);
+ if (hmap_val)
+ *hmap_val = *key;
+
*val = *key;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 6dfce3fd68bc..f47da665f7e0 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -113,3 +112,12 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
return 0;
}
+
+SEC("iter.s/bpf_map_elem")
+int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx)
+{
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
new file mode 100644
index 000000000000..7b69e1887705
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat, Inc. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("iter/bpf_link")
+int dump_bpf_link(struct bpf_iter__bpf_link *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct bpf_link *link = ctx->link;
+ int link_id;
+
+ if (!link)
+ return 0;
+
+ link_id = link->id;
+ bpf_seq_write(seq, &link_id, sizeof(link_id));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
index b83b5d2e17dc..c868ffb8080f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
index 85fa710fad90..9fdea8cd4c6f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
-struct key_t {
- int a;
- int b;
- int c;
-};
-
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
index feaaa2b89c57..aa529f76c7fc 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -20,7 +20,7 @@ struct {
} hashmap1 SEC(".maps");
/* will set before prog run */
-volatile const __u32 num_cpus = 0;
+volatile const __s32 num_cpus = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
index 6cecab2b32ba..e88dab196e0f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Google LLC. */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
index 6b70ccaba301..eb9642923e1c 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -16,19 +16,37 @@ struct {
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
+__u32 to_add_val = 0;
SEC("iter/bpf_sk_storage_map")
-int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
- if (sk == (void *)0 || val == (void *)0)
+ if (sk == NULL || val == NULL)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
+
+ *val += to_add_val;
+
+ return 0;
+}
+
+SEC("iter/bpf_sk_storage_map")
+int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 *val = ctx->value;
+
+ if (sk == NULL || val == NULL)
+ return 0;
+
+ *(val + 1) = 0xdeadbeef;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
index d58d9f1642b5..73a5cf3ba3d3 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
new file mode 100644
index 000000000000..3e725b1fce37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned long last_sym_value = 0;
+
+static inline char to_lower(char c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c += ('a' - 'A');
+ return c;
+}
+
+static inline char to_upper(char c)
+{
+ if (c >= 'a' && c <= 'z')
+ c -= ('a' - 'A');
+ return c;
+}
+
+/* Dump symbols with max size; the latter is calculated by caching symbol N value
+ * and when iterating on symbol N+1, we can print max size of symbol N via
+ * address of N+1 - address of N.
+ */
+SEC("iter/ksym")
+int dump_ksym(struct bpf_iter__ksym *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct kallsym_iter *iter = ctx->ksym;
+ __u32 seq_num = ctx->meta->seq_num;
+ unsigned long value;
+ char type;
+
+ if (!iter)
+ return 0;
+
+ if (seq_num == 0) {
+ BPF_SEQ_PRINTF(seq, "ADDR TYPE NAME MODULE_NAME KIND MAX_SIZE\n");
+ return 0;
+ }
+ if (last_sym_value)
+ BPF_SEQ_PRINTF(seq, "0x%x\n", iter->value - last_sym_value);
+ else
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ value = iter->show_value ? iter->value : 0;
+
+ last_sym_value = value;
+
+ type = iter->type;
+
+ if (iter->module_name[0]) {
+ type = iter->exported ? to_upper(type) : to_lower(type);
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
+ value, type, iter->name, iter->module_name);
+ } else {
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s ", value, type, iter->name);
+ }
+ if (!iter->pos_mod_end || iter->pos_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "MOD ");
+ else if (!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "FTRACE_MOD ");
+ else if (!iter->pos_bpf_end || iter->pos_bpf_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "BPF ");
+ else
+ BPF_SEQ_PRINTF(seq, "KPROBE ");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
new file mode 100644
index 000000000000..2f20485e0de3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 value_sum = 0;
+
+SEC("iter/bpf_map_elem")
+int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx)
+{
+ __u32 value = 0;
+
+ if (ctx->value == (void *)0)
+ return 0;
+
+ bpf_probe_read_kernel(&value, sizeof(value), ctx->value);
+ value_sum += value;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
index 95989f4c99b5..00b2ceae81fb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
new file mode 100644
index 000000000000..a8aa5a71d846
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define bpf_tcp_sk(skc) ({ \
+ struct sock_common *_skc = skc; \
+ sk = NULL; \
+ tp = NULL; \
+ if (_skc) { \
+ tp = bpf_skc_to_tcp_sock(_skc); \
+ sk = (struct sock *)tp; \
+ } \
+ tp; \
+})
+
+unsigned short reuse_listen_hport = 0;
+unsigned short listen_hport = 0;
+const char cubic_cc[] = "bpf_cubic";
+char dctcp_cc[TCP_CA_NAME_MAX] = "bpf_dctcp";
+bool random_retry = false;
+
+
+SEC("iter/tcp")
+int change_tcp_cc(struct bpf_iter__tcp *ctx)
+{
+ char cur_cc[TCP_CA_NAME_MAX];
+ struct tcp_sock *tp;
+ struct sock *sk;
+
+ if (!bpf_tcp_sk(ctx->sk_common))
+ return 0;
+
+ if (sk->sk_family != AF_INET6 ||
+ (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_ESTABLISHED) ||
+ (sk->sk_num != reuse_listen_hport &&
+ sk->sk_num != listen_hport &&
+ bpf_ntohs(sk->sk_dport) != listen_hport))
+ return 0;
+
+ if (bpf_getsockopt(tp, SOL_TCP, TCP_CONGESTION,
+ cur_cc, sizeof(cur_cc)))
+ return 0;
+
+ if (bpf_strncmp(cur_cc, TCP_CA_NAME_MAX, cubic_cc))
+ return 0;
+
+ if (random_retry && bpf_get_prandom_u32() % 4 == 1)
+ return 1;
+
+ bpf_setsockopt(tp, SOL_TCP, TCP_CONGESTION, dctcp_cc, sizeof(dctcp_cc));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c
new file mode 100644
index 000000000000..d92631ec6161
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <limits.h>
+
+#define AUTOBIND_LEN 6
+char sun_path[AUTOBIND_LEN];
+
+#define NR_CASES 5
+int sndbuf_setsockopt[NR_CASES] = {-1, 0, 8192, INT_MAX / 2, INT_MAX};
+int sndbuf_getsockopt[NR_CASES] = {-1, -1, -1, -1, -1};
+int sndbuf_getsockopt_expected[NR_CASES];
+
+static inline int cmpname(struct unix_sock *unix_sk)
+{
+ int i;
+
+ for (i = 0; i < AUTOBIND_LEN; i++) {
+ if (unix_sk->addr->name->sun_path[i] != sun_path[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+SEC("iter/unix")
+int change_sndbuf(struct bpf_iter__unix *ctx)
+{
+ struct unix_sock *unix_sk = ctx->unix_sk;
+ int i, err;
+
+ if (!unix_sk || !unix_sk->addr)
+ return 0;
+
+ if (unix_sk->addr->name->sun_path[0])
+ return 0;
+
+ if (cmpname(unix_sk))
+ return 0;
+
+ for (i = 0; i < NR_CASES; i++) {
+ err = bpf_setsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf_setsockopt[i],
+ sizeof(sndbuf_setsockopt[i]));
+ if (err)
+ break;
+
+ err = bpf_getsockopt(unix_sk, SOL_SOCKET, SO_SNDBUF,
+ &sndbuf_getsockopt[i],
+ sizeof(sndbuf_getsockopt[i]));
+ if (err)
+ break;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
index f3af0e30cead..317fe49760cc 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Cloudflare */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c
deleted file mode 100644
index b7f32c160f4e..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-char _license[] SEC("license") = "GPL";
-
-SEC("iter/task")
-int dump_task(struct bpf_iter__task *ctx)
-{
- struct seq_file *seq = ctx->meta->seq;
- struct task_struct *task = ctx->task;
- static char info[] = " === END ===";
-
- if (task == (void *)0) {
- BPF_SEQ_PRINTF(seq, "%s\n", info);
- return 0;
- }
-
- if (ctx->meta->seq_num == 0)
- BPF_SEQ_PRINTF(seq, " tgid gid\n");
-
- BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
index a1ddc36f13ec..ef2f7c8d9373 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <errno.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index b2f7c7c5f952..959a8d899eaf 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -1,21 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int count = 0;
int tgid = 0;
+int last_tgid = 0;
+int unique_tgid_count = 0;
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
- __u32 fd = ctx->fd;
struct file *file = ctx->file;
+ __u32 fd = ctx->fd;
if (task == (void *)0 || file == (void *)0)
return 0;
@@ -28,6 +29,11 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
if (tgid == task->tgid && task->tgid != task->pid)
count++;
+ if (last_tgid != task->tgid) {
+ last_tgid = task->tgid;
+ unique_tgid_count++;
+ }
+
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
index 50e59a2e142e..f5a309455490 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -35,3 +34,35 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
+
+int num_user_stacks = 0;
+
+SEC("iter/task")
+int get_task_user_stacks(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ uint64_t buf_sz = 0;
+ int64_t res;
+
+ if (task == (void *)0)
+ return 0;
+
+ res = bpf_get_task_stack(task, entries,
+ MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
+ if (res <= 0)
+ return 0;
+
+ /* Only one task, the current one, should succeed */
+ ++num_user_stacks;
+
+ buf_sz += res;
+
+ /* If the verifier doesn't refine bpf_get_task_stack res, and instead
+ * assumes res is entirely unknown, this program will fail to load as
+ * the verifier will believe that max buf_sz value allows reading
+ * past the end of entries in bpf_seq_write call
+ */
+ bpf_seq_write(seq, &entries, buf_sz);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
new file mode 100644
index 000000000000..d64ba7ddaed5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Copied from mm.h */
+#define VM_READ 0x00000001
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_MAYSHARE 0x00000080
+
+/* Copied from kdev_t.h */
+#define MINORBITS 20
+#define MINORMASK ((1U << MINORBITS) - 1)
+#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
+#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
+
+#define D_PATH_BUF_SIZE 1024
+char d_path_buf[D_PATH_BUF_SIZE] = {};
+__u32 pid = 0;
+__u32 one_task = 0;
+__u32 one_task_error = 0;
+
+SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
+{
+ struct vm_area_struct *vma = ctx->vma;
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ struct file *file;
+ char perm_str[] = "----";
+
+ if (task == (void *)0 || vma == (void *)0)
+ return 0;
+
+ file = vma->vm_file;
+ if (task->tgid != (pid_t)pid) {
+ if (one_task)
+ one_task_error = 1;
+ return 0;
+ }
+ perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-';
+ perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-';
+ perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-';
+ perm_str[3] = (vma->vm_flags & VM_MAYSHARE) ? 's' : 'p';
+ BPF_SEQ_PRINTF(seq, "%08llx-%08llx %s ", vma->vm_start, vma->vm_end, perm_str);
+
+ if (file) {
+ __u32 dev = file->f_inode->i_sb->s_dev;
+
+ bpf_d_path(&file->f_path, d_path_buf, D_PATH_BUF_SIZE);
+
+ BPF_SEQ_PRINTF(seq, "%08llx ", vma->vm_pgoff << 12);
+ BPF_SEQ_PRINTF(seq, "%02x:%02x %u", MAJOR(dev), MINOR(dev),
+ file->f_inode->i_ino);
+ BPF_SEQ_PRINTF(seq, "\t%s\n", d_path_buf);
+ } else {
+ BPF_SEQ_PRINTF(seq, "%08llx 00:00 0\n", 0ULL);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
new file mode 100644
index 000000000000..966ee5a7b066
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+uint32_t tid = 0;
+int num_unknown_tid = 0;
+int num_known_tid = 0;
+void *user_ptr = 0;
+void *user_ptr_long = 0;
+uint32_t pid = 0;
+
+static char big_str1[5000];
+static char big_str2[5005];
+static char big_str3[4996];
+
+SEC("iter/task")
+int dump_task(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ static char info[] = " === END ===";
+
+ if (task == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ if (task->pid != (pid_t)tid)
+ num_unknown_tid++;
+ else
+ num_known_tid++;
+
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " tgid gid\n");
+
+ BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid);
+ return 0;
+}
+
+int num_expected_failure_copy_from_user_task = 0;
+int num_expected_failure_copy_from_user_task_str = 0;
+int num_success_copy_from_user_task = 0;
+int num_success_copy_from_user_task_str = 0;
+
+SEC("iter.s/task")
+int dump_task_sleepable(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ static const char info[] = " === END ===";
+ struct pt_regs *regs;
+ char task_str1[10] = "aaaaaaaaaa";
+ char task_str2[10], task_str3[10];
+ char task_str4[20] = "aaaaaaaaaaaaaaaaaaaa";
+ void *ptr;
+ uint32_t user_data = 0;
+ int ret;
+
+ if (task == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Read an invalid pointer and ensure we get an error */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
+ if (ret) {
+ ++num_expected_failure_copy_from_user_task;
+ } else {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Try to read the contents of the task's instruction pointer from the
+ * remote task's address space.
+ */
+ regs = (struct pt_regs *)bpf_task_pt_regs(task);
+ if (regs == (void *)0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ ptr = (void *)PT_REGS_IP(regs);
+
+ ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0);
+ if (ret) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_success_copy_from_user_task;
+
+ /* Read an invalid pointer and ensure we get an error */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1), ptr, task, 0);
+ if (ret >= 0 || task_str1[9] != 'a' || task_str1[0] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Read an invalid pointer and ensure we get error with pad zeros flag */
+ ptr = NULL;
+ ret = bpf_copy_from_user_task_str((char *)task_str1, sizeof(task_str1),
+ ptr, task, BPF_F_PAD_ZEROS);
+ if (ret >= 0 || task_str1[9] != '\0' || task_str1[0] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_expected_failure_copy_from_user_task_str;
+
+ /* Same length as the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str2, 10, user_ptr, task, 0);
+ /* only need to do the task pid check once */
+ if (bpf_strncmp(task_str2, 10, "test_data\0") != 0 || ret != 10 || task->tgid != pid) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Shorter length than the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str3, 2, user_ptr, task, 0);
+ if (bpf_strncmp(task_str3, 2, "t\0") != 0 || ret != 2) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string */
+ ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, 0);
+ if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10
+ || task_str4[sizeof(task_str4) - 1] != 'a') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string with pad zeros flag */
+ ret = bpf_copy_from_user_task_str((char *)task_str4, 20, user_ptr, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(task_str4, 10, "test_data\0") != 0 || ret != 10
+ || task_str4[sizeof(task_str4) - 1] != '\0') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Longer length than the string past a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr, task, 0);
+ if (bpf_strncmp(big_str1, 10, "test_data\0") != 0 || ret != 10) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* String that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str1, 5000, user_ptr_long, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(big_str1, 4, "baba") != 0 || ret != 5000
+ || bpf_strncmp(big_str1 + 4996, 4, "bab\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ for (int i = 0; i < 4999; ++i) {
+ if (i % 2 == 0) {
+ if (big_str1[i] != 'b') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ } else {
+ if (big_str1[i] != 'a') {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+ }
+ }
+
+ /* Longer length than the string that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str2, 5005, user_ptr_long, task, BPF_F_PAD_ZEROS);
+ if (bpf_strncmp(big_str2, 4, "baba") != 0 || ret != 5000
+ || bpf_strncmp(big_str2 + 4996, 5, "bab\0\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ /* Shorter length than the string that crosses a page boundary */
+ ret = bpf_copy_from_user_task_str(big_str3, 4996, user_ptr_long, task, 0);
+ if (bpf_strncmp(big_str3, 4, "baba") != 0 || ret != 4996
+ || bpf_strncmp(big_str3 + 4992, 4, "bab\0") != 0) {
+ BPF_SEQ_PRINTF(seq, "%s\n", info);
+ return 0;
+ }
+
+ ++num_success_copy_from_user_task_str;
+
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " tgid gid data\n");
+
+ BPF_SEQ_PRINTF(seq, "%8d %8d %8d\n", task->tgid, task->pid, user_data);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
index 54380c5e1069..b1e509b231cd 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -100,13 +99,13 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_timeout;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
@@ -122,7 +121,7 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
}
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
- seq_num, src, srcp, destp, destp);
+ seq_num, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->write_seq - tp->snd_una, rx_queue,
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
index b4fbddfa4e10..dbc7166aee91 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -100,13 +99,13 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_timeout;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_timeout;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
index 2a4647f20c46..6b17e7e86a48 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
index ee49493dc125..56177508798f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
@@ -9,8 +9,8 @@ __u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
-static volatile const __u32 print_len;
-static volatile const __u32 ret1;
+volatile const __u32 print_len;
+volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
@@ -45,7 +45,7 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
}
/* fill seq_file buffer */
- for (i = 0; i < print_len; i++)
+ for (i = 0; i < (int)print_len; i++)
bpf_seq_write(seq, &seq_num, sizeof(seq_num));
return ret;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
index e3a7575e81d2..9d8b7310d2c2 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
index 1c7304f56b1e..b150bd468824 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
index d5e3df66ad9a..6a4c50497c5e 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
index f258583afbbd..23b2aa2604de 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -65,7 +64,8 @@ int dump_udp4(struct bpf_iter__udp *ctx)
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
- inet->sk.sk_drops.counter);
+ udp_sk->drop_counters.drops0.counter +
+ udp_sk->drop_counters.drops1.counter);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
index 65f93bb03f0f..c48b05aa2a4b 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include "bpf_iter.h"
+#include <vmlinux.h>
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
@@ -73,7 +72,7 @@ int dump_udp6(struct bpf_iter__udp *ctx)
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
- inet->sk.sk_drops.counter);
-
+ udp_sk->drop_counters.drops0.counter +
+ udp_sk->drop_counters.drops1.counter);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
new file mode 100644
index 000000000000..fea275df9e22
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+#include <vmlinux.h>
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+static long sock_i_ino(const struct sock *sk)
+{
+ const struct socket *sk_socket = sk->sk_socket;
+ const struct inode *inode;
+ unsigned long ino;
+
+ if (!sk_socket)
+ return 0;
+
+ inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+ bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+ return ino;
+}
+
+SEC("iter/unix")
+int dump_unix(struct bpf_iter__unix *ctx)
+{
+ struct unix_sock *unix_sk = ctx->unix_sk;
+ struct sock *sk = (struct sock *)unix_sk;
+ struct seq_file *seq;
+ __u32 seq_num;
+
+ if (!unix_sk)
+ return 0;
+
+ seq = ctx->meta->seq;
+ seq_num = ctx->meta->seq_num;
+ if (seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "Num RefCount Protocol Flags Type St Inode Path\n");
+
+ BPF_SEQ_PRINTF(seq, "%pK: %08X %08X %08X %04X %02X %8lu",
+ unix_sk,
+ sk->sk_refcnt.refs.counter,
+ 0,
+ sk->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
+ sk->sk_type,
+ sk->sk_socket ?
+ (sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
+ (sk->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
+ sock_i_ino(sk));
+
+ if (unix_sk->addr) {
+ if (unix_sk->addr->name->sun_path[0]) {
+ BPF_SEQ_PRINTF(seq, " %s", unix_sk->addr->name->sun_path);
+ } else {
+ /* The name of the abstract UNIX domain socket starts
+ * with '\0' and can contain '\0'. The null bytes
+ * should be escaped as done in unix_seq_show().
+ */
+ __u64 i, len;
+
+ len = unix_sk->addr->len - sizeof(short);
+
+ BPF_SEQ_PRINTF(seq, " @");
+
+ for (i = 1; i < len; i++) {
+ /* unix_validate_addr() tests this upper bound. */
+ if (i >= sizeof(struct sockaddr_un))
+ break;
+
+ BPF_SEQ_PRINTF(seq, "%c",
+ unix_sk->addr->name->sun_path[i] ?:
+ '@');
+ }
+ }
+ }
+
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
new file mode 100644
index 000000000000..174298e122d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 unique_tgid_cnt = 0;
+uintptr_t address = 0;
+uintptr_t offset = 0;
+__u32 last_tgid = 0;
+__u32 pid = 0;
+__u32 page_shift = 0;
+
+SEC("iter/task_vma")
+int get_vma_offset(struct bpf_iter__task_vma *ctx)
+{
+ struct vm_area_struct *vma = ctx->vma;
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+
+ if (task == NULL || vma == NULL)
+ return 0;
+
+ if (last_tgid != task->tgid)
+ unique_tgid_cnt++;
+ last_tgid = task->tgid;
+
+ if (task->tgid != pid)
+ return 0;
+
+ if (vma->vm_start <= address && vma->vm_end > address) {
+ offset = address - vma->vm_start + (vma->vm_pgoff << page_shift);
+ BPF_SEQ_PRINTF(seq, "OK\n");
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c
new file mode 100644
index 000000000000..1d194455b109
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_loop.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int output;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 32);
+ __type(key, int);
+ __type(value, int);
+} map1 SEC(".maps");
+
+/* These should be set by the user program */
+u32 nested_callback_nr_loops;
+u32 stop_index = -1;
+u32 nr_loops;
+int pid;
+int callback_selector;
+
+/* Making these global variables so that the userspace program
+ * can verify the output through the skeleton
+ */
+int nr_loops_returned;
+int g_output;
+int err;
+
+static int callback(__u32 index, void *data)
+{
+ struct callback_ctx *ctx = data;
+
+ if (index >= stop_index)
+ return 1;
+
+ ctx->output += index;
+
+ return 0;
+}
+
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
+static int nested_callback2(__u32 index, void *data)
+{
+ nr_loops_returned += bpf_loop(nested_callback_nr_loops, callback, data, 0);
+
+ return 0;
+}
+
+static int nested_callback1(__u32 index, void *data)
+{
+ bpf_loop(nested_callback_nr_loops, nested_callback2, data, 0);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int test_prog(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = bpf_loop(nr_loops, callback, &data, 0);
+
+ if (nr_loops_returned < 0)
+ err = nr_loops_returned;
+ else
+ g_output = data.output;
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_null_ctx(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = bpf_loop(nr_loops, empty_callback, NULL, 0);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_invalid_flags(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_loop(nr_loops, callback, &data, 1);
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_nested_calls(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ nr_loops_returned = 0;
+ bpf_loop(nr_loops, nested_callback1, &data, 0);
+
+ g_output = data.output;
+
+ return 0;
+}
+
+static int callback_set_f0(int i, void *ctx)
+{
+ g_output = 0xF0;
+ return 0;
+}
+
+static int callback_set_0f(int i, void *ctx)
+{
+ g_output = 0x0F;
+ return 0;
+}
+
+/*
+ * non-constant callback is a corner case for bpf_loop inline logic
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_non_constant_callback(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int (*callback)(int i, void *ctx);
+
+ g_output = 0;
+
+ if (callback_selector == 0x0F)
+ callback = callback_set_0f;
+ else
+ callback = callback_set_f0;
+
+ bpf_loop(1, callback, NULL, 0);
+
+ return 0;
+}
+
+static int stack_check_inner_callback(void *ctx)
+{
+ return 0;
+}
+
+static int map1_lookup_elem(int key)
+{
+ int *val = bpf_map_lookup_elem(&map1, &key);
+
+ return val ? *val : -1;
+}
+
+static void map1_update_elem(int key, int val)
+{
+ bpf_map_update_elem(&map1, &key, &val, BPF_ANY);
+}
+
+static int stack_check_outer_callback(void *ctx)
+{
+ int a = map1_lookup_elem(1);
+ int b = map1_lookup_elem(2);
+ int c = map1_lookup_elem(3);
+ int d = map1_lookup_elem(4);
+ int e = map1_lookup_elem(5);
+ int f = map1_lookup_elem(6);
+
+ bpf_loop(1, stack_check_inner_callback, NULL, 0);
+
+ map1_update_elem(1, a + 1);
+ map1_update_elem(2, b + 1);
+ map1_update_elem(3, c + 1);
+ map1_update_elem(4, d + 1);
+ map1_update_elem(5, e + 1);
+ map1_update_elem(6, f + 1);
+
+ return 0;
+}
+
+/* Some of the local variables in stack_check and
+ * stack_check_outer_callback would be allocated on stack by
+ * compiler. This test should verify that stack content for these
+ * variables is preserved between calls to bpf_loop (might be an issue
+ * if loop inlining allocates stack slots incorrectly).
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int stack_check(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int a = map1_lookup_elem(7);
+ int b = map1_lookup_elem(8);
+ int c = map1_lookup_elem(9);
+ int d = map1_lookup_elem(10);
+ int e = map1_lookup_elem(11);
+ int f = map1_lookup_elem(12);
+
+ bpf_loop(1, stack_check_outer_callback, NULL, 0);
+
+ map1_update_elem(7, a + 1);
+ map1_update_elem(8, b + 1);
+ map1_update_elem(9, c + 1);
+ map1_update_elem(10, d + 1);
+ map1_update_elem(11, e + 1);
+ map1_update_elem(12, f + 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
new file mode 100644
index 000000000000..d461746fd3c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+u32 nr_loops;
+long hits;
+
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
+static int outer_loop(__u32 index, void *data)
+{
+ bpf_loop(nr_loops, empty_callback, NULL, 0);
+ __sync_add_and_fetch(&hits, nr_loops);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ bpf_loop(1000, outer_loop, NULL, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
new file mode 100644
index 000000000000..c9bfbe1bafc1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_MISC_H__
+#define __BPF_MISC_H__
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/* Expand a macro and then stringize the expansion */
+#define QUOTE(str) #str
+#define EXPAND_QUOTE(str) QUOTE(str)
+
+/* This set of attributes controls behavior of the
+ * test_loader.c:test_loader__run_subtests().
+ *
+ * The test_loader sequentially loads each program in a skeleton.
+ * Programs could be loaded in privileged and unprivileged modes.
+ * - __success, __failure, __msg, __regex imply privileged mode;
+ * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv
+ * imply unprivileged mode.
+ * If combination of privileged and unprivileged attributes is present
+ * both modes are used. If none are present privileged mode is implied.
+ *
+ * See test_loader.c:drop_capabilities() for exact set of capabilities
+ * that differ between privileged and unprivileged modes.
+ *
+ * For test filtering purposes the name of the program loaded in
+ * unprivileged mode is derived from the usual program name by adding
+ * `@unpriv' suffix.
+ *
+ * __msg Message expected to be found in the verifier log.
+ * Multiple __msg attributes could be specified.
+ * To match a regular expression use "{{" "}}" brackets,
+ * e.g. "foo{{[0-9]+}}" matches strings like "foo007".
+ * Extended POSIX regular expression syntax is allowed
+ * inside the brackets.
+ * __not_msg Message not expected to be found in verifier log.
+ * If __msg_not is situated between __msg tags
+ * framework matches __msg tags first, and then
+ * checks that __msg_not is not present in a portion of
+ * a log between bracketing __msg tags.
+ * Same regex syntax as for __msg is supported.
+ * __msg_unpriv Same as __msg but for unprivileged mode.
+ * __not_msg_unpriv Same as __not_msg but for unprivileged mode.
+ *
+ * __stderr Message expected to be found in bpf stderr stream. The
+ * same regex rules apply like __msg.
+ * __stderr_unpriv Same as __stderr but for unpriveleged mode.
+ * __stdout Same as __stderr but for stdout stream.
+ * __stdout_unpriv Same as __stdout but for unpriveleged mode.
+ *
+ * __xlated Expect a line in a disassembly log after verifier applies rewrites.
+ * Multiple __xlated attributes could be specified.
+ * Regular expressions could be specified same way as in __msg.
+ * __xlated_unpriv Same as __xlated but for unprivileged mode.
+ *
+ * __jited Match a line in a disassembly of the jited BPF program.
+ * Has to be used after __arch_* macro.
+ * For example:
+ *
+ * __arch_x86_64
+ * __jited(" endbr64")
+ * __jited(" nopl (%rax,%rax)")
+ * __jited(" xorq %rax, %rax")
+ * ...
+ * __naked void some_test(void)
+ * {
+ * asm volatile (... ::: __clobber_all);
+ * }
+ *
+ * Regular expressions could be included in patterns same way
+ * as in __msg.
+ *
+ * By default assume that each pattern has to be matched on the
+ * next consecutive line of disassembly, e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited(" nopl (%rax,%rax)") # matched on line N+1
+ *
+ * If match occurs on a wrong line an error is reported.
+ * To override this behaviour use literal "...", e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited("...") # not matched
+ * __jited(" nopl (%rax,%rax)") # matched on any line >= N
+ *
+ * __jited_unpriv Same as __jited but for unprivileged mode.
+ *
+ *
+ * __success Expect program load success in privileged mode.
+ * __success_unpriv Expect program load success in unprivileged mode.
+ *
+ * __failure Expect program load failure in privileged mode.
+ * __failure_unpriv Expect program load failure in unprivileged mode.
+ *
+ * __retval Execute the program using BPF_PROG_TEST_RUN command,
+ * expect return value to match passed parameter:
+ * - a decimal number
+ * - a hexadecimal number, when starts from 0x
+ * - a macro which expands to one of the above
+ * - literal _INT_MIN (expands to INT_MIN)
+ * In addition, two special macros are defined below:
+ * - POINTER_VALUE
+ * - TEST_DATA_LEN
+ * __retval_unpriv Same, but load program in unprivileged mode.
+ *
+ * __description Text to be used instead of a program name for display
+ * and filtering purposes.
+ *
+ * __log_level Log level to use for the program, numeric value expected.
+ *
+ * __flag Adds one flag use for the program, the following values are valid:
+ * - BPF_F_STRICT_ALIGNMENT;
+ * - BPF_F_TEST_RND_HI32;
+ * - BPF_F_TEST_STATE_FREQ;
+ * - BPF_F_SLEEPABLE;
+ * - BPF_F_XDP_HAS_FRAGS;
+ * - A numeric value.
+ * Multiple __flag attributes could be specified, the final flags
+ * value is derived by applying binary "or" to all specified values.
+ *
+ * __auxiliary Annotated program is not a separate test, but used as auxiliary
+ * for some other test cases and should always be loaded.
+ * __auxiliary_unpriv Same, but load program in unprivileged mode.
+ *
+ * __arch_* Specify on which architecture the test case should be tested.
+ * Several __arch_* annotations could be specified at once.
+ * When test case is not run on current arch it is marked as skipped.
+ * __caps_unpriv Specify the capabilities that should be set when running the test.
+ *
+ * __linear_size Specify the size of the linear area of non-linear skbs, or
+ * 0 for linear skbs.
+ */
+#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
+#define __not_msg(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
+#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
+#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
+#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
+#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
+#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __not_msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
+#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
+#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
+#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
+#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
+#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val))))
+#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val))))
+#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
+#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
+#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
+#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch)))
+#define __arch_x86_64 __arch("X86_64")
+#define __arch_arm64 __arch("ARM64")
+#define __arch_riscv64 __arch("RISCV64")
+#define __arch_s390x __arch("s390x")
+#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps))))
+#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited")))
+#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited")))
+#define __stderr(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr=" XSTR(__COUNTER__) "=" msg)))
+#define __stderr_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __stdout(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout=" XSTR(__COUNTER__) "=" msg)))
+#define __stdout_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __linear_size(sz) __attribute__((btf_decl_tag("comment:test_linear_size=" XSTR(sz))))
+
+/* Define common capabilities tested using __caps_unpriv */
+#define CAP_NET_ADMIN 12
+#define CAP_SYS_ADMIN 21
+#define CAP_PERFMON 38
+#define CAP_BPF 39
+
+/* Convenience macro for use with 'asm volatile' blocks */
+#define __naked __attribute__((naked))
+#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
+#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
+#define __imm(name) [name]"i"(name)
+#define __imm_const(name, expr) [name]"i"(expr)
+#define __imm_addr(name) [name]"i"(&name)
+#define __imm_ptr(name) [name]"r"(&name)
+#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
+
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+
+/* Magic constants used with __retval() */
+#define POINTER_VALUE 0xbadcafe
+#define TEST_DATA_LEN 64
+
+#ifndef __used
+#define __used __attribute__((used))
+#endif
+
+#if defined(__TARGET_ARCH_x86)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__x64_"
+#elif defined(__TARGET_ARCH_s390)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__s390x_"
+#elif defined(__TARGET_ARCH_arm64)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__arm64_"
+#elif defined(__TARGET_ARCH_riscv)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX "__riscv_"
+#elif defined(__TARGET_ARCH_powerpc)
+#define SYSCALL_WRAPPER 1
+#define SYS_PREFIX ""
+#else
+#define SYSCALL_WRAPPER 0
+#define SYS_PREFIX "__se_"
+#endif
+
+/* How many arguments are passed to function in register */
+#if defined(__TARGET_ARCH_x86) || defined(__x86_64__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__i386__)
+#define FUNC_REG_ARG_CNT 3
+#elif defined(__TARGET_ARCH_s390) || defined(__s390x__)
+#define FUNC_REG_ARG_CNT 5
+#elif defined(__TARGET_ARCH_arm) || defined(__arm__)
+#define FUNC_REG_ARG_CNT 4
+#elif defined(__TARGET_ARCH_arm64) || defined(__aarch64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_mips) || defined(__mips__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_powerpc) || defined(__powerpc__) || defined(__powerpc64__)
+#define FUNC_REG_ARG_CNT 8
+#elif defined(__TARGET_ARCH_sparc) || defined(__sparc__)
+#define FUNC_REG_ARG_CNT 6
+#elif defined(__TARGET_ARCH_riscv) || defined(__riscv__)
+#define FUNC_REG_ARG_CNT 8
+#else
+/* default to 5 for others */
+#define FUNC_REG_ARG_CNT 5
+#endif
+
+/* make it look to compiler like value is read and written */
+#define __sink(expr) asm volatile("" : "+g"(expr))
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
+ defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+#define CAN_USE_GOTOL
+#endif
+
+#if __clang_major__ >= 18
+#define CAN_USE_BPF_ST
+#endif
+
+#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \
+ (defined(__TARGET_ARCH_powerpc))
+#define CAN_USE_LOAD_ACQ_STORE_REL
+#endif
+
+#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)
+#define SPEC_V1
+#endif
+
+#if defined(__TARGET_ARCH_x86)
+#define SPEC_V4
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_mod_race.c b/tools/testing/selftests/bpf/progs/bpf_mod_race.c
new file mode 100644
index 000000000000..82a5c6c6ba83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_mod_race.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile struct {
+ /* thread to activate trace programs for */
+ pid_t tgid;
+ /* return error from __init function */
+ int inject_error;
+ /* uffd monitored range start address */
+ void *fault_addr;
+} bpf_mod_race_config = { -1 };
+
+int bpf_blocking = 0;
+int res_try_get_module = -1;
+
+static __always_inline bool check_thread_id(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return task->tgid == bpf_mod_race_config.tgid;
+}
+
+/* The trace of execution is something like this:
+ *
+ * finit_module()
+ * load_module()
+ * prepare_coming_module()
+ * notifier_call(MODULE_STATE_COMING)
+ * btf_parse_module()
+ * btf_alloc_id() // Visible to userspace at this point
+ * list_add(btf_mod->list, &btf_modules)
+ * do_init_module()
+ * freeinit = kmalloc()
+ * ret = mod->init()
+ * bpf_prog_widen_race()
+ * bpf_copy_from_user()
+ * ...<sleep>...
+ * if (ret < 0)
+ * ...
+ * free_module()
+ * return ret
+ *
+ * At this point, module loading thread is blocked, we now load the program:
+ *
+ * bpf_check
+ * add_kfunc_call/check_pseudo_btf_id
+ * btf_try_get_module
+ * try_get_module_live == false
+ * return -ENXIO
+ *
+ * Without the fix (try_get_module_live in btf_try_get_module):
+ *
+ * bpf_check
+ * add_kfunc_call/check_pseudo_btf_id
+ * btf_try_get_module
+ * try_get_module == true
+ * <store module reference in btf_kfunc_tab or used_btf array>
+ * ...
+ * return fd
+ *
+ * Now, if we inject an error in the blocked program, our module will be freed
+ * (going straight from MODULE_STATE_COMING to MODULE_STATE_GOING).
+ * Later, when bpf program is freed, it will try to module_put already freed
+ * module. This is why try_get_module_live returns false if mod->state is not
+ * MODULE_STATE_LIVE.
+ */
+
+SEC("fmod_ret.s/bpf_fentry_test1")
+int BPF_PROG(widen_race, int a, int ret)
+{
+ char dst;
+
+ if (!check_thread_id())
+ return 0;
+ /* Indicate that we will attempt to block */
+ bpf_blocking = 1;
+ bpf_copy_from_user(&dst, 1, bpf_mod_race_config.fault_addr);
+ return bpf_mod_race_config.inject_error;
+}
+
+SEC("fexit/do_init_module")
+int BPF_PROG(fexit_init_module, struct module *mod, int ret)
+{
+ if (!check_thread_id())
+ return 0;
+ /* Indicate that we finished blocking */
+ bpf_blocking = 2;
+ return 0;
+}
+
+SEC("fexit/btf_try_get_module")
+int BPF_PROG(fexit_module_get, const struct btf *btf, struct module *mod)
+{
+ res_try_get_module = !!mod;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h
new file mode 100644
index 000000000000..3754f581b328
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BPF_QDISC_COMMON_H
+#define _BPF_QDISC_COMMON_H
+
+#define NET_XMIT_SUCCESS 0x00
+#define NET_XMIT_DROP 0x01 /* skb dropped */
+#define NET_XMIT_CN 0x02 /* congestion notification */
+
+#define TC_PRIO_CONTROL 7
+#define TC_PRIO_MAX 15
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+
+struct bpf_sk_buff_ptr;
+
+static struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
+{
+ return (struct qdisc_skb_cb *)skb->cb;
+}
+
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
+{
+ return qdisc_skb_cb(skb)->pkt_len;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c
new file mode 100644
index 000000000000..f188062ed730
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fail__incompl_ops.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+int BPF_PROG(bpf_qdisc_test_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops")
+struct sk_buff *BPF_PROG(bpf_qdisc_test_dequeue, struct Qdisc *sch)
+{
+ return NULL;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_qdisc_test_reset, struct Qdisc *sch)
+{
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_qdisc_test_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops test = {
+ .enqueue = (void *)bpf_qdisc_test_enqueue,
+ .dequeue = (void *)bpf_qdisc_test_dequeue,
+ .reset = (void *)bpf_qdisc_test_reset,
+ .destroy = (void *)bpf_qdisc_test_destroy,
+ .id = "bpf_qdisc_test",
+};
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
new file mode 100644
index 000000000000..1de2be3e370b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct skb_node {
+ struct sk_buff __kptr * skb;
+ struct bpf_list_node node;
+};
+
+private(A) struct bpf_spin_lock q_fifo_lock;
+private(A) struct bpf_list_head q_fifo __contains(skb_node, node);
+
+bool init_called;
+
+SEC("struct_ops/bpf_fifo_enqueue")
+int BPF_PROG(bpf_fifo_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct skb_node *skbn;
+ u32 pkt_len;
+
+ if (sch->q.qlen == sch->limit)
+ goto drop;
+
+ skbn = bpf_obj_new(typeof(*skbn));
+ if (!skbn)
+ goto drop;
+
+ pkt_len = qdisc_pkt_len(skb);
+
+ sch->q.qlen++;
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ bpf_spin_lock(&q_fifo_lock);
+ bpf_list_push_back(&q_fifo, &skbn->node);
+ bpf_spin_unlock(&q_fifo_lock);
+
+ sch->qstats.backlog += pkt_len;
+ return NET_XMIT_SUCCESS;
+drop:
+ bpf_qdisc_skb_drop(skb, to_free);
+ return NET_XMIT_DROP;
+}
+
+SEC("struct_ops/bpf_fifo_dequeue")
+struct sk_buff *BPF_PROG(bpf_fifo_dequeue, struct Qdisc *sch)
+{
+ struct bpf_list_node *node;
+ struct sk_buff *skb = NULL;
+ struct skb_node *skbn;
+
+ bpf_spin_lock(&q_fifo_lock);
+ node = bpf_list_pop_front(&q_fifo);
+ bpf_spin_unlock(&q_fifo_lock);
+ if (!node)
+ return NULL;
+
+ skbn = container_of(node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+ if (!skb)
+ return NULL;
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ bpf_qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+
+ return skb;
+}
+
+SEC("struct_ops/bpf_fifo_init")
+int BPF_PROG(bpf_fifo_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ sch->limit = 1000;
+ init_called = true;
+ return 0;
+}
+
+SEC("struct_ops/bpf_fifo_reset")
+void BPF_PROG(bpf_fifo_reset, struct Qdisc *sch)
+{
+ struct bpf_list_node *node;
+ struct skb_node *skbn;
+ int i;
+
+ bpf_for(i, 0, sch->q.qlen) {
+ struct sk_buff *skb = NULL;
+
+ bpf_spin_lock(&q_fifo_lock);
+ node = bpf_list_pop_front(&q_fifo);
+ bpf_spin_unlock(&q_fifo_lock);
+
+ if (!node)
+ break;
+
+ skbn = container_of(node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_kfree_skb(skb);
+ bpf_obj_drop(skbn);
+ }
+ sch->q.qlen = 0;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_fifo_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops fifo = {
+ .enqueue = (void *)bpf_fifo_enqueue,
+ .dequeue = (void *)bpf_fifo_dequeue,
+ .init = (void *)bpf_fifo_init,
+ .reset = (void *)bpf_fifo_reset,
+ .destroy = (void *)bpf_fifo_destroy,
+ .id = "bpf_fifo",
+};
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
new file mode 100644
index 000000000000..1a3233a275c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* bpf_fq is intended for testing the bpf qdisc infrastructure and not a direct
+ * copy of sch_fq. bpf_fq implements the scheduling algorithm of sch_fq before
+ * 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling") was
+ * introduced. It gives each flow a fair chance to transmit packets in a
+ * round-robin fashion. Note that for flow pacing, bpf_fq currently only
+ * respects skb->tstamp but not skb->sk->sk_pacing_rate. In addition, if there
+ * are multiple bpf_fq instances, they will have a shared view of flows and
+ * configuration since some key data structure such as fq_prio_flows,
+ * fq_nonprio_flows, and fq_bpf_data are global.
+ *
+ * To use bpf_fq alone without running selftests, use the following commands.
+ *
+ * 1. Register bpf_fq to the kernel
+ * bpftool struct_ops register bpf_qdisc_fq.bpf.o /sys/fs/bpf
+ * 2. Add bpf_fq to an interface
+ * tc qdisc add dev <interface name> root handle <handle> bpf_fq
+ * 3. Delete bpf_fq attached to the interface
+ * tc qdisc delete dev <interface name> root
+ * 4. Unregister bpf_fq
+ * bpftool struct_ops unregister name fq
+ *
+ * The qdisc name, bpf_fq, used in tc commands is defined by Qdisc_ops.id.
+ * The struct_ops_map_name, fq, used in the bpftool command is the name of the
+ * Qdisc_ops.
+ *
+ * SEC(".struct_ops")
+ * struct Qdisc_ops fq = {
+ * ...
+ * .id = "bpf_fq",
+ * };
+ */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+#include "bpf_qdisc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_SEC 1000000000L
+
+#define NUM_QUEUE (1 << 20)
+
+struct fq_bpf_data {
+ u32 quantum;
+ u32 initial_quantum;
+ u32 flow_refill_delay;
+ u32 flow_plimit;
+ u64 horizon;
+ u32 orphan_mask;
+ u32 timer_slack;
+ u64 time_next_delayed_flow;
+ u64 unthrottle_latency_ns;
+ u8 horizon_drop;
+ u32 new_flow_cnt;
+ u32 old_flow_cnt;
+ u64 ktime_cache;
+};
+
+enum {
+ CLS_RET_PRIO = 0,
+ CLS_RET_NONPRIO = 1,
+ CLS_RET_ERR = 2,
+};
+
+struct skb_node {
+ u64 tstamp;
+ struct sk_buff __kptr * skb;
+ struct bpf_rb_node node;
+};
+
+struct fq_flow_node {
+ int credit;
+ u32 qlen;
+ u64 age;
+ u64 time_next_packet;
+ struct bpf_list_node list_node;
+ struct bpf_rb_node rb_node;
+ struct bpf_rb_root queue __contains(skb_node, node);
+ struct bpf_spin_lock lock;
+ struct bpf_refcount refcount;
+};
+
+struct dequeue_nonprio_ctx {
+ bool stop_iter;
+ u64 expire;
+ u64 now;
+};
+
+struct remove_flows_ctx {
+ bool gc_only;
+ u32 reset_cnt;
+ u32 reset_max;
+};
+
+struct unset_throttled_flows_ctx {
+ bool unset_all;
+ u64 now;
+};
+
+struct fq_stashed_flow {
+ struct fq_flow_node __kptr * flow;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u64);
+ __type(value, struct fq_stashed_flow);
+ __uint(max_entries, NUM_QUEUE);
+} fq_nonprio_flows SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u64);
+ __type(value, struct fq_stashed_flow);
+ __uint(max_entries, 1);
+} fq_prio_flows SEC(".maps");
+
+private(A) struct bpf_spin_lock fq_delayed_lock;
+private(A) struct bpf_rb_root fq_delayed __contains(fq_flow_node, rb_node);
+
+private(B) struct bpf_spin_lock fq_new_flows_lock;
+private(B) struct bpf_list_head fq_new_flows __contains(fq_flow_node, list_node);
+
+private(C) struct bpf_spin_lock fq_old_flows_lock;
+private(C) struct bpf_list_head fq_old_flows __contains(fq_flow_node, list_node);
+
+private(D) struct fq_bpf_data q;
+
+/* Wrapper for bpf_kptr_xchg that expects NULL dst */
+static void bpf_kptr_xchg_back(void *map_val, void *ptr)
+{
+ void *ret;
+
+ ret = bpf_kptr_xchg(map_val, ptr);
+ if (ret)
+ bpf_obj_drop(ret);
+}
+
+static bool skbn_tstamp_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct skb_node *skbn_a;
+ struct skb_node *skbn_b;
+
+ skbn_a = container_of(a, struct skb_node, node);
+ skbn_b = container_of(b, struct skb_node, node);
+
+ return skbn_a->tstamp < skbn_b->tstamp;
+}
+
+static bool fn_time_next_packet_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct fq_flow_node *flow_a;
+ struct fq_flow_node *flow_b;
+
+ flow_a = container_of(a, struct fq_flow_node, rb_node);
+ flow_b = container_of(b, struct fq_flow_node, rb_node);
+
+ return flow_a->time_next_packet < flow_b->time_next_packet;
+}
+
+static void
+fq_flows_add_head(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct fq_flow_node *flow, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &flow->list_node);
+ bpf_spin_unlock(lock);
+ *flow_cnt += 1;
+}
+
+static void
+fq_flows_add_tail(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct fq_flow_node *flow, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ bpf_list_push_back(head, &flow->list_node);
+ bpf_spin_unlock(lock);
+ *flow_cnt += 1;
+}
+
+static void
+fq_flows_remove_front(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct bpf_list_node **node, u32 *flow_cnt)
+{
+ bpf_spin_lock(lock);
+ *node = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ *flow_cnt -= 1;
+}
+
+static bool
+fq_flows_is_empty(struct bpf_list_head *head, struct bpf_spin_lock *lock)
+{
+ struct bpf_list_node *node;
+
+ bpf_spin_lock(lock);
+ node = bpf_list_pop_front(head);
+ if (node) {
+ bpf_list_push_front(head, node);
+ bpf_spin_unlock(lock);
+ return false;
+ }
+ bpf_spin_unlock(lock);
+
+ return true;
+}
+
+/* flow->age is used to denote the state of the flow (not-detached, detached, throttled)
+ * as well as the timestamp when the flow is detached.
+ *
+ * 0: not-detached
+ * 1 - (~0ULL-1): detached
+ * ~0ULL: throttled
+ */
+static void fq_flow_set_detached(struct fq_flow_node *flow)
+{
+ flow->age = bpf_jiffies64();
+}
+
+static bool fq_flow_is_detached(struct fq_flow_node *flow)
+{
+ return flow->age != 0 && flow->age != ~0ULL;
+}
+
+static bool sk_listener(struct sock *sk)
+{
+ return (1 << sk->__sk_common.skc_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
+}
+
+static void fq_gc(void);
+
+static int fq_new_flow(void *flow_map, struct fq_stashed_flow **sflow, u64 hash)
+{
+ struct fq_stashed_flow tmp = {};
+ struct fq_flow_node *flow;
+ int ret;
+
+ flow = bpf_obj_new(typeof(*flow));
+ if (!flow)
+ return -ENOMEM;
+
+ flow->credit = q.initial_quantum,
+ flow->qlen = 0,
+ flow->age = 1,
+ flow->time_next_packet = 0,
+
+ ret = bpf_map_update_elem(flow_map, &hash, &tmp, 0);
+ if (ret == -ENOMEM || ret == -E2BIG) {
+ fq_gc();
+ bpf_map_update_elem(&fq_nonprio_flows, &hash, &tmp, 0);
+ }
+
+ *sflow = bpf_map_lookup_elem(flow_map, &hash);
+ if (!*sflow) {
+ bpf_obj_drop(flow);
+ return -ENOMEM;
+ }
+
+ bpf_kptr_xchg_back(&(*sflow)->flow, flow);
+ return 0;
+}
+
+static int
+fq_classify(struct sk_buff *skb, struct fq_stashed_flow **sflow)
+{
+ struct sock *sk = skb->sk;
+ int ret = CLS_RET_NONPRIO;
+ u64 hash = 0;
+
+ if ((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL) {
+ *sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash);
+ ret = CLS_RET_PRIO;
+ } else {
+ if (!sk || sk_listener(sk)) {
+ hash = bpf_skb_get_hash(skb) & q.orphan_mask;
+ /* Avoid collision with an existing flow hash, which
+ * only uses the lower 32 bits of hash, by setting the
+ * upper half of hash to 1.
+ */
+ hash |= (1ULL << 32);
+ } else if (sk->__sk_common.skc_state == TCP_CLOSE) {
+ hash = bpf_skb_get_hash(skb) & q.orphan_mask;
+ hash |= (1ULL << 32);
+ } else {
+ hash = sk->__sk_common.skc_hash;
+ }
+ *sflow = bpf_map_lookup_elem(&fq_nonprio_flows, &hash);
+ }
+
+ if (!*sflow)
+ ret = fq_new_flow(&fq_nonprio_flows, sflow, hash) < 0 ?
+ CLS_RET_ERR : CLS_RET_NONPRIO;
+
+ return ret;
+}
+
+static bool fq_packet_beyond_horizon(struct sk_buff *skb)
+{
+ return (s64)skb->tstamp > (s64)(q.ktime_cache + q.horizon);
+}
+
+SEC("struct_ops/bpf_fq_enqueue")
+int BPF_PROG(bpf_fq_enqueue, struct sk_buff *skb, struct Qdisc *sch,
+ struct bpf_sk_buff_ptr *to_free)
+{
+ struct fq_flow_node *flow = NULL, *flow_copy;
+ struct fq_stashed_flow *sflow;
+ u64 time_to_send, jiffies;
+ struct skb_node *skbn;
+ int ret;
+
+ if (sch->q.qlen >= sch->limit)
+ goto drop;
+
+ if (!skb->tstamp) {
+ time_to_send = q.ktime_cache = bpf_ktime_get_ns();
+ } else {
+ if (fq_packet_beyond_horizon(skb)) {
+ q.ktime_cache = bpf_ktime_get_ns();
+ if (fq_packet_beyond_horizon(skb)) {
+ if (q.horizon_drop)
+ goto drop;
+
+ skb->tstamp = q.ktime_cache + q.horizon;
+ }
+ }
+ time_to_send = skb->tstamp;
+ }
+
+ ret = fq_classify(skb, &sflow);
+ if (ret == CLS_RET_ERR)
+ goto drop;
+
+ flow = bpf_kptr_xchg(&sflow->flow, flow);
+ if (!flow)
+ goto drop;
+
+ if (ret == CLS_RET_NONPRIO) {
+ if (flow->qlen >= q.flow_plimit) {
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+ goto drop;
+ }
+
+ if (fq_flow_is_detached(flow)) {
+ flow_copy = bpf_refcount_acquire(flow);
+
+ jiffies = bpf_jiffies64();
+ if ((s64)(jiffies - (flow_copy->age + q.flow_refill_delay)) > 0) {
+ if (flow_copy->credit < q.quantum)
+ flow_copy->credit = q.quantum;
+ }
+ flow_copy->age = 0;
+ fq_flows_add_tail(&fq_new_flows, &fq_new_flows_lock, flow_copy,
+ &q.new_flow_cnt);
+ }
+ }
+
+ skbn = bpf_obj_new(typeof(*skbn));
+ if (!skbn) {
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+ goto drop;
+ }
+
+ skbn->tstamp = skb->tstamp = time_to_send;
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ if (skb)
+ bpf_qdisc_skb_drop(skb, to_free);
+
+ bpf_spin_lock(&flow->lock);
+ bpf_rbtree_add(&flow->queue, &skbn->node, skbn_tstamp_less);
+ bpf_spin_unlock(&flow->lock);
+
+ flow->qlen++;
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+
+ sch->q.qlen++;
+ return NET_XMIT_SUCCESS;
+
+drop:
+ bpf_qdisc_skb_drop(skb, to_free);
+ sch->qstats.drops++;
+ return NET_XMIT_DROP;
+}
+
+static int fq_unset_throttled_flows(u32 index, struct unset_throttled_flows_ctx *ctx)
+{
+ struct bpf_rb_node *node = NULL;
+ struct fq_flow_node *flow;
+
+ bpf_spin_lock(&fq_delayed_lock);
+
+ node = bpf_rbtree_first(&fq_delayed);
+ if (!node) {
+ bpf_spin_unlock(&fq_delayed_lock);
+ return 1;
+ }
+
+ flow = container_of(node, struct fq_flow_node, rb_node);
+ if (!ctx->unset_all && flow->time_next_packet > ctx->now) {
+ q.time_next_delayed_flow = flow->time_next_packet;
+ bpf_spin_unlock(&fq_delayed_lock);
+ return 1;
+ }
+
+ node = bpf_rbtree_remove(&fq_delayed, &flow->rb_node);
+
+ bpf_spin_unlock(&fq_delayed_lock);
+
+ if (!node)
+ return 1;
+
+ flow = container_of(node, struct fq_flow_node, rb_node);
+ flow->age = 0;
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+
+ return 0;
+}
+
+static void fq_flow_set_throttled(struct fq_flow_node *flow)
+{
+ flow->age = ~0ULL;
+
+ if (q.time_next_delayed_flow > flow->time_next_packet)
+ q.time_next_delayed_flow = flow->time_next_packet;
+
+ bpf_spin_lock(&fq_delayed_lock);
+ bpf_rbtree_add(&fq_delayed, &flow->rb_node, fn_time_next_packet_less);
+ bpf_spin_unlock(&fq_delayed_lock);
+}
+
+static void fq_check_throttled(u64 now)
+{
+ struct unset_throttled_flows_ctx ctx = {
+ .unset_all = false,
+ .now = now,
+ };
+ unsigned long sample;
+
+ if (q.time_next_delayed_flow > now)
+ return;
+
+ sample = (unsigned long)(now - q.time_next_delayed_flow);
+ q.unthrottle_latency_ns -= q.unthrottle_latency_ns >> 3;
+ q.unthrottle_latency_ns += sample >> 3;
+
+ q.time_next_delayed_flow = ~0ULL;
+ bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &ctx, 0);
+}
+
+static struct sk_buff*
+fq_dequeue_nonprio_flows(u32 index, struct dequeue_nonprio_ctx *ctx)
+{
+ u64 time_next_packet, time_to_send;
+ struct bpf_rb_node *rb_node;
+ struct sk_buff *skb = NULL;
+ struct bpf_list_head *head;
+ struct bpf_list_node *node;
+ struct bpf_spin_lock *lock;
+ struct fq_flow_node *flow;
+ struct skb_node *skbn;
+ bool is_empty;
+ u32 *cnt;
+
+ if (q.new_flow_cnt) {
+ head = &fq_new_flows;
+ lock = &fq_new_flows_lock;
+ cnt = &q.new_flow_cnt;
+ } else if (q.old_flow_cnt) {
+ head = &fq_old_flows;
+ lock = &fq_old_flows_lock;
+ cnt = &q.old_flow_cnt;
+ } else {
+ if (q.time_next_delayed_flow != ~0ULL)
+ ctx->expire = q.time_next_delayed_flow;
+ goto break_loop;
+ }
+
+ fq_flows_remove_front(head, lock, &node, cnt);
+ if (!node)
+ goto break_loop;
+
+ flow = container_of(node, struct fq_flow_node, list_node);
+ if (flow->credit <= 0) {
+ flow->credit += q.quantum;
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+ return NULL;
+ }
+
+ bpf_spin_lock(&flow->lock);
+ rb_node = bpf_rbtree_first(&flow->queue);
+ if (!rb_node) {
+ bpf_spin_unlock(&flow->lock);
+ is_empty = fq_flows_is_empty(&fq_old_flows, &fq_old_flows_lock);
+ if (head == &fq_new_flows && !is_empty) {
+ fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
+ } else {
+ fq_flow_set_detached(flow);
+ bpf_obj_drop(flow);
+ }
+ return NULL;
+ }
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ time_to_send = skbn->tstamp;
+
+ time_next_packet = (time_to_send > flow->time_next_packet) ?
+ time_to_send : flow->time_next_packet;
+ if (ctx->now < time_next_packet) {
+ bpf_spin_unlock(&flow->lock);
+ flow->time_next_packet = time_next_packet;
+ fq_flow_set_throttled(flow);
+ return NULL;
+ }
+
+ rb_node = bpf_rbtree_remove(&flow->queue, rb_node);
+ bpf_spin_unlock(&flow->lock);
+
+ if (!rb_node)
+ goto add_flow_and_break;
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+
+ if (!skb)
+ goto add_flow_and_break;
+
+ flow->credit -= qdisc_skb_cb(skb)->pkt_len;
+ flow->qlen--;
+
+add_flow_and_break:
+ fq_flows_add_head(head, lock, flow, cnt);
+
+break_loop:
+ ctx->stop_iter = true;
+ return skb;
+}
+
+static struct sk_buff *fq_dequeue_prio(void)
+{
+ struct fq_flow_node *flow = NULL;
+ struct fq_stashed_flow *sflow;
+ struct bpf_rb_node *rb_node;
+ struct sk_buff *skb = NULL;
+ struct skb_node *skbn;
+ u64 hash = 0;
+
+ sflow = bpf_map_lookup_elem(&fq_prio_flows, &hash);
+ if (!sflow)
+ return NULL;
+
+ flow = bpf_kptr_xchg(&sflow->flow, flow);
+ if (!flow)
+ return NULL;
+
+ bpf_spin_lock(&flow->lock);
+ rb_node = bpf_rbtree_first(&flow->queue);
+ if (!rb_node) {
+ bpf_spin_unlock(&flow->lock);
+ goto out;
+ }
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ rb_node = bpf_rbtree_remove(&flow->queue, &skbn->node);
+ bpf_spin_unlock(&flow->lock);
+
+ if (!rb_node)
+ goto out;
+
+ skbn = container_of(rb_node, struct skb_node, node);
+ skb = bpf_kptr_xchg(&skbn->skb, skb);
+ bpf_obj_drop(skbn);
+
+out:
+ bpf_kptr_xchg_back(&sflow->flow, flow);
+
+ return skb;
+}
+
+SEC("struct_ops/bpf_fq_dequeue")
+struct sk_buff *BPF_PROG(bpf_fq_dequeue, struct Qdisc *sch)
+{
+ struct dequeue_nonprio_ctx cb_ctx = {};
+ struct sk_buff *skb = NULL;
+ int i;
+
+ if (!sch->q.qlen)
+ goto out;
+
+ skb = fq_dequeue_prio();
+ if (skb)
+ goto dequeue;
+
+ q.ktime_cache = cb_ctx.now = bpf_ktime_get_ns();
+ fq_check_throttled(q.ktime_cache);
+ bpf_for(i, 0, sch->limit) {
+ skb = fq_dequeue_nonprio_flows(i, &cb_ctx);
+ if (cb_ctx.stop_iter)
+ break;
+ };
+
+ if (skb) {
+dequeue:
+ sch->q.qlen--;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ bpf_qdisc_bstats_update(sch, skb);
+ return skb;
+ }
+
+ if (cb_ctx.expire)
+ bpf_qdisc_watchdog_schedule(sch, cb_ctx.expire, q.timer_slack);
+out:
+ return NULL;
+}
+
+static int fq_remove_flows_in_list(u32 index, void *ctx)
+{
+ struct bpf_list_node *node;
+ struct fq_flow_node *flow;
+
+ bpf_spin_lock(&fq_new_flows_lock);
+ node = bpf_list_pop_front(&fq_new_flows);
+ bpf_spin_unlock(&fq_new_flows_lock);
+ if (!node) {
+ bpf_spin_lock(&fq_old_flows_lock);
+ node = bpf_list_pop_front(&fq_old_flows);
+ bpf_spin_unlock(&fq_old_flows_lock);
+ if (!node)
+ return 1;
+ }
+
+ flow = container_of(node, struct fq_flow_node, list_node);
+ bpf_obj_drop(flow);
+
+ return 0;
+}
+
+extern unsigned CONFIG_HZ __kconfig;
+
+/* limit number of collected flows per round */
+#define FQ_GC_MAX 8
+#define FQ_GC_AGE (3*CONFIG_HZ)
+
+static bool fq_gc_candidate(struct fq_flow_node *flow)
+{
+ u64 jiffies = bpf_jiffies64();
+
+ return fq_flow_is_detached(flow) &&
+ ((s64)(jiffies - (flow->age + FQ_GC_AGE)) > 0);
+}
+
+static int
+fq_remove_flows(struct bpf_map *flow_map, u64 *hash,
+ struct fq_stashed_flow *sflow, struct remove_flows_ctx *ctx)
+{
+ if (sflow->flow &&
+ (!ctx->gc_only || fq_gc_candidate(sflow->flow))) {
+ bpf_map_delete_elem(flow_map, hash);
+ ctx->reset_cnt++;
+ }
+
+ return ctx->reset_cnt < ctx->reset_max ? 0 : 1;
+}
+
+static void fq_gc(void)
+{
+ struct remove_flows_ctx cb_ctx = {
+ .gc_only = true,
+ .reset_cnt = 0,
+ .reset_max = FQ_GC_MAX,
+ };
+
+ bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &cb_ctx, 0);
+}
+
+SEC("struct_ops/bpf_fq_reset")
+void BPF_PROG(bpf_fq_reset, struct Qdisc *sch)
+{
+ struct unset_throttled_flows_ctx utf_ctx = {
+ .unset_all = true,
+ };
+ struct remove_flows_ctx rf_ctx = {
+ .gc_only = false,
+ .reset_cnt = 0,
+ .reset_max = NUM_QUEUE,
+ };
+ struct fq_stashed_flow *sflow;
+ u64 hash = 0;
+
+ sch->q.qlen = 0;
+ sch->qstats.backlog = 0;
+
+ bpf_for_each_map_elem(&fq_nonprio_flows, fq_remove_flows, &rf_ctx, 0);
+
+ rf_ctx.reset_cnt = 0;
+ bpf_for_each_map_elem(&fq_prio_flows, fq_remove_flows, &rf_ctx, 0);
+ fq_new_flow(&fq_prio_flows, &sflow, hash);
+
+ bpf_loop(NUM_QUEUE, fq_remove_flows_in_list, NULL, 0);
+ q.new_flow_cnt = 0;
+ q.old_flow_cnt = 0;
+
+ bpf_loop(NUM_QUEUE, fq_unset_throttled_flows, &utf_ctx, 0);
+}
+
+SEC("struct_ops/bpf_fq_init")
+int BPF_PROG(bpf_fq_init, struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = sch->dev_queue->dev;
+ u32 psched_mtu = dev->mtu + dev->hard_header_len;
+ struct fq_stashed_flow *sflow;
+ u64 hash = 0;
+
+ if (fq_new_flow(&fq_prio_flows, &sflow, hash) < 0)
+ return -ENOMEM;
+
+ sch->limit = 10000;
+ q.initial_quantum = 10 * psched_mtu;
+ q.quantum = 2 * psched_mtu;
+ q.flow_refill_delay = 40;
+ q.flow_plimit = 100;
+ q.horizon = 10ULL * NSEC_PER_SEC;
+ q.horizon_drop = 1;
+ q.orphan_mask = 1024 - 1;
+ q.timer_slack = 10 * NSEC_PER_USEC;
+ q.time_next_delayed_flow = ~0ULL;
+ q.unthrottle_latency_ns = 0ULL;
+ q.new_flow_cnt = 0;
+ q.old_flow_cnt = 0;
+
+ return 0;
+}
+
+SEC("struct_ops")
+void BPF_PROG(bpf_fq_destroy, struct Qdisc *sch)
+{
+}
+
+SEC(".struct_ops")
+struct Qdisc_ops fq = {
+ .enqueue = (void *)bpf_fq_enqueue,
+ .dequeue = (void *)bpf_fq_dequeue,
+ .reset = (void *)bpf_fq_reset,
+ .init = (void *)bpf_fq_init,
+ .destroy = (void *)bpf_fq_destroy,
+ .id = "bpf_fq",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_smc.c b/tools/testing/selftests/bpf/progs/bpf_smc.c
new file mode 100644
index 000000000000..70d8b08f5914
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_smc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+enum {
+ BPF_SMC_LISTEN = 10,
+};
+
+struct smc_sock___local {
+ struct sock sk;
+ struct smc_sock *listen_smc;
+ bool use_fallback;
+} __attribute__((preserve_access_index));
+
+int smc_cnt = 0;
+int fallback_cnt = 0;
+
+SEC("fentry/smc_release")
+int BPF_PROG(bpf_smc_release, struct socket *sock)
+{
+ /* only count from one side (client) */
+ if (sock->sk->__sk_common.skc_state == BPF_SMC_LISTEN)
+ return 0;
+ smc_cnt++;
+ return 0;
+}
+
+SEC("fentry/smc_switch_to_fallback")
+int BPF_PROG(bpf_smc_switch_to_fallback, struct smc_sock___local *smc)
+{
+ /* only count from one side (client) */
+ if (smc && !smc->listen_smc)
+ fallback_cnt++;
+ return 0;
+}
+
+/* go with default value if no strat was found */
+bool default_ip_strat_value = true;
+
+struct smc_policy_ip_key {
+ __u32 sip;
+ __u32 dip;
+};
+
+struct smc_policy_ip_value {
+ __u8 mode;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct smc_policy_ip_key));
+ __uint(value_size, sizeof(struct smc_policy_ip_value));
+ __uint(max_entries, 128);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} smc_policy_ip SEC(".maps");
+
+static bool smc_check(__u32 src, __u32 dst)
+{
+ struct smc_policy_ip_value *value;
+ struct smc_policy_ip_key key = {
+ .sip = src,
+ .dip = dst,
+ };
+
+ value = bpf_map_lookup_elem(&smc_policy_ip, &key);
+ return value ? value->mode : default_ip_strat_value;
+}
+
+SEC("fmod_ret/update_socket_protocol")
+int BPF_PROG(smc_run, int family, int type, int protocol)
+{
+ struct task_struct *task;
+
+ if (family != AF_INET && family != AF_INET6)
+ return protocol;
+
+ if ((type & 0xf) != SOCK_STREAM)
+ return protocol;
+
+ if (protocol != 0 && protocol != IPPROTO_TCP)
+ return protocol;
+
+ task = bpf_get_current_task_btf();
+ /* Prevent from affecting other tests */
+ if (!task || !task->nsproxy->net_ns->smc.hs_ctrl)
+ return protocol;
+
+ return IPPROTO_SMC;
+}
+
+SEC("struct_ops")
+int BPF_PROG(bpf_smc_set_tcp_option_cond, const struct tcp_sock *tp,
+ struct inet_request_sock *ireq)
+{
+ return smc_check(ireq->req.__req_common.skc_daddr,
+ ireq->req.__req_common.skc_rcv_saddr);
+}
+
+SEC("struct_ops")
+int BPF_PROG(bpf_smc_set_tcp_option, struct tcp_sock *tp)
+{
+ return smc_check(tp->inet_conn.icsk_inet.sk.__sk_common.skc_rcv_saddr,
+ tp->inet_conn.icsk_inet.sk.__sk_common.skc_daddr);
+}
+
+SEC(".struct_ops")
+struct smc_hs_ctrl linkcheck = {
+ .name = "linkcheck",
+ .syn_option = (void *)bpf_smc_set_tcp_option,
+ .synack_option = (void *)bpf_smc_set_tcp_option_cond,
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
new file mode 100644
index 000000000000..9e7d9674ce2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2022 Sony Group Corporation */
+#include <vmlinux.h>
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+int arg1 = 0;
+unsigned long arg2 = 0;
+unsigned long arg3 = 0;
+unsigned long arg4_cx = 0;
+unsigned long arg4 = 0;
+unsigned long arg5 = 0;
+
+int arg1_core = 0;
+unsigned long arg2_core = 0;
+unsigned long arg3_core = 0;
+unsigned long arg4_core_cx = 0;
+unsigned long arg4_core = 0;
+unsigned long arg5_core = 0;
+
+int option_syscall = 0;
+unsigned long arg2_syscall = 0;
+unsigned long arg3_syscall = 0;
+unsigned long arg4_syscall = 0;
+unsigned long arg5_syscall = 0;
+
+const volatile pid_t filter_pid = 0;
+
+SEC("kprobe/" SYS_PREFIX "sys_prctl")
+int BPF_KPROBE(handle_sys_prctl)
+{
+ struct pt_regs *real_regs;
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ unsigned long tmp = 0;
+
+ if (pid != filter_pid)
+ return 0;
+
+ real_regs = PT_REGS_SYSCALL_REGS(ctx);
+
+ /* test for PT_REGS_PARM */
+
+ bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs));
+ arg1 = tmp;
+ bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg4_cx, sizeof(arg4_cx), &PT_REGS_PARM4(real_regs));
+ bpf_probe_read_kernel(&arg4, sizeof(arg4), &PT_REGS_PARM4_SYSCALL(real_regs));
+ bpf_probe_read_kernel(&arg5, sizeof(arg5), &PT_REGS_PARM5_SYSCALL(real_regs));
+
+ /* test for the CORE variant of PT_REGS_PARM */
+ arg1_core = PT_REGS_PARM1_CORE_SYSCALL(real_regs);
+ arg2_core = PT_REGS_PARM2_CORE_SYSCALL(real_regs);
+ arg3_core = PT_REGS_PARM3_CORE_SYSCALL(real_regs);
+ arg4_core_cx = PT_REGS_PARM4_CORE(real_regs);
+ arg4_core = PT_REGS_PARM4_CORE_SYSCALL(real_regs);
+ arg5_core = PT_REGS_PARM5_CORE_SYSCALL(real_regs);
+
+ return 0;
+}
+
+SEC("ksyscall/prctl")
+int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != filter_pid)
+ return 0;
+
+ option_syscall = option;
+ arg2_syscall = arg2;
+ arg3_syscall = arg3;
+ arg4_syscall = arg4;
+ arg5_syscall = arg5;
+ return 0;
+}
+
+__u64 splice_fd_in;
+__u64 splice_off_in;
+__u64 splice_fd_out;
+__u64 splice_off_out;
+__u64 splice_len;
+__u64 splice_flags;
+
+SEC("ksyscall/splice")
+int BPF_KSYSCALL(splice_enter, int fd_in, loff_t *off_in, int fd_out,
+ loff_t *off_out, size_t len, unsigned int flags)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != filter_pid)
+ return 0;
+
+ splice_fd_in = fd_in;
+ splice_off_in = (__u64)off_in;
+ splice_fd_out = fd_out;
+ splice_off_out = (__u64)off_out;
+ splice_len = len;
+ splice_flags = flags;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644
index 000000000000..8a7a4c1b54e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "X";
+
+SEC("struct_ops")
+void BPF_PROG(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+ .init = (void *)nogpltcp_init,
+ .name = "bpf_nogpltcp",
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_test_utils.h b/tools/testing/selftests/bpf/progs/bpf_test_utils.h
new file mode 100644
index 000000000000..f4e67b492dd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_test_utils.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_TEST_UTILS_H__
+#define __BPF_TEST_UTILS_H__
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Clobber as many native registers and stack slots as possible. */
+static __always_inline void clobber_regs_stack(void)
+{
+ char tmp_str[] = "123456789";
+ unsigned long tmp;
+
+ bpf_strtoul(tmp_str, sizeof(tmp_str), 0, &tmp);
+ __sink(tmp);
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index 01378911252b..d8dacef37c16 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -2,14 +2,93 @@
#ifndef __BPF_TRACING_NET_H__
#define __BPF_TRACING_NET_H__
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+
#define AF_INET 2
#define AF_INET6 10
+#define SOL_SOCKET 1
+#define SO_REUSEADDR 2
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_PRIORITY 12
+#define SO_REUSEPORT 15
+#if defined(__TARGET_ARCH_powerpc)
+#define SO_RCVLOWAT 16
+#else
+#define SO_RCVLOWAT 18
+#endif
+#define SO_BINDTODEVICE 25
+#define SO_MARK 36
+#define SO_MAX_PACING_RATE 47
+#define SO_BINDTOIFINDEX 62
+#define SO_TXREHASH 74
+#define __SO_ACCEPTCON (1 << 16)
+
+#define IP_TOS 1
+
+#define SOL_IPV6 41
+#define IPV6_TCLASS 67
+#define IPV6_AUTOFLOWLABEL 70
+
+#define TC_ACT_UNSPEC (-1)
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#define SOL_TCP 6
+#define TCP_NODELAY 1
+#define TCP_MAXSEG 2
+#define TCP_KEEPIDLE 4
+#define TCP_KEEPINTVL 5
+#define TCP_KEEPCNT 6
+#define TCP_SYNCNT 7
+#define TCP_WINDOW_CLAMP 10
+#define TCP_CONGESTION 13
+#define TCP_THIN_LINEAR_TIMEOUTS 16
+#define TCP_USER_TIMEOUT 18
+#define TCP_NOTSENT_LOWAT 25
+#define TCP_SAVE_SYN 27
+#define TCP_SAVED_SYN 28
+#define TCP_CA_NAME_MAX 16
+#define TCP_NAGLE_OFF 1
+#define TCP_RTO_MAX_MS 44
+
+#define TCP_ECN_OK 1
+#define TCP_ECN_QUEUE_CWR 2
+#define TCP_ECN_DEMAND_CWR 4
+#define TCP_ECN_SEEN 8
+
+#define TCP_CONG_NEEDS_ECN 0x2
+
#define ICSK_TIME_RETRANS 1
#define ICSK_TIME_PROBE0 3
#define ICSK_TIME_LOSS_PROBE 5
#define ICSK_TIME_REO_TIMEOUT 6
+#define ETH_ALEN 6
+#define ETH_HLEN 14
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_TIMESTAMP 8
+#define TCPOPT_SACK_PERM 4
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_SACK_PERM 2
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_PARTIAL 3
+
#define IFNAMSIZ 16
#define RTF_GATEWAY 0x0002
@@ -17,6 +96,14 @@
#define TCP_INFINITE_SSTHRESH 0x7fffffff
#define TCP_PINGPONG_THRESH 3
+#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
+#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
+#define FLAG_DATA_SACKED 0x20 /* New SACK. */
+#define FLAG_SND_UNA_ADVANCED \
+ 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
+#define FLAG_ACKED (FLAG_DATA_ACKED | FLAG_SYN_ACKED)
+#define FLAG_FORWARD_PROGRESS (FLAG_ACKED | FLAG_DATA_SACKED)
+
#define fib_nh_dev nh_common.nhc_dev
#define fib_nh_gw_family nh_common.nhc_gw_family
#define fib_nh_gw6 nh_common.nhc_gw.ipv6
@@ -25,6 +112,8 @@
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
#define inet_dport sk.__sk_common.skc_dport
+#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
+
#define ir_loc_addr req.__req_common.skc_rcv_saddr
#define ir_num req.__req_common.skc_num
#define ir_rmt_addr req.__req_common.skc_daddr
@@ -32,12 +121,19 @@
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
+#define sk_num __sk_common.skc_num
+#define sk_dport __sk_common.skc_dport
#define sk_family __sk_common.skc_family
#define sk_rmem_alloc sk_backlog.rmem_alloc
#define sk_refcnt __sk_common.skc_refcnt
#define sk_state __sk_common.skc_state
+#define sk_net __sk_common.skc_net
+#define sk_rcv_saddr __sk_common.skc_rcv_saddr
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_flags __sk_common.skc_flags
+#define sk_reuse __sk_common.skc_reuse
+#define sk_cookie __sk_common.skc_cookie
#define s6_addr32 in6_u.u6_addr32
@@ -48,4 +144,51 @@
#define tw_v6_daddr __tw_common.skc_v6_daddr
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
+#define tcp_jiffies32 ((__u32)bpf_jiffies64())
+
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+static inline bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1 - seq2) < 0;
+}
+
+#define after(seq2, seq1) before(seq1, seq2)
+
+static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
+{
+ return (struct inet_connection_sock *)sk;
+}
+
+static inline void *inet_csk_ca(const struct sock *sk)
+{
+ return (void *)inet_csk(sk)->icsk_ca_priv;
+}
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+ return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+ if (tcp_in_slow_start(tp))
+ return tp->snd_cwnd < 2 * tp->max_packets_out;
+
+ return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited);
+}
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/bprm_opts.c b/tools/testing/selftests/bpf/progs/bprm_opts.c
index 5bfef2887e70..418d9c6d4952 100644
--- a/tools/testing/selftests/bpf/progs/bprm_opts.c
+++ b/tools/testing/selftests/bpf/progs/bprm_opts.c
@@ -4,7 +4,7 @@
* Copyright 2020 Google LLC.
*/
-#include "vmlinux.h"
+#include <linux/bpf.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c
new file mode 100644
index 000000000000..21a560427b10
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_bad_signed_arr_elem_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_arrays___err_bad_signed_arr_elem_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
new file mode 100644
index 000000000000..888e79db6a77
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
new file mode 100644
index 000000000000..194749130d87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
new file mode 100644
index 000000000000..3d732d4193e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___err_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
new file mode 100644
index 000000000000..17cf5d6a848d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___val3_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
deleted file mode 100644
index dd0ffa518f36..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
deleted file mode 100644
index bc83372088ad..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
deleted file mode 100644
index 917bec41be08..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
deleted file mode 100644
index 6ec7e6ec1c91..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
deleted file mode 100644
index 7bbcacf2b0d1..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_int_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
deleted file mode 100644
index f384dd38ec70..000000000000
--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "core_reloc_types.h"
-
-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
new file mode 100644
index 000000000000..d14b496190c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_existence___wrong_field_defs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
new file mode 100644
index 000000000000..3824345d82ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___diff_offs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
new file mode 100644
index 000000000000..57ae2c258928
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
index 8f44767a75fa..e01690618e1e 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
@@ -11,7 +11,7 @@
/*
*struct bitfields_only_mixed_types {
* int a: 3;
- * long int b: 2;
+ * long b: 2;
* _Bool c: 1;
* enum {
* A = 0,
@@ -27,7 +27,7 @@
struct bitfields_only_mixed_types {
int a: 3;
- long int b: 2;
+ long b: 2;
bool c: 1; /* it's really a _Bool type */
enum {
A, /* A = 0, dumper is very explicit */
@@ -44,8 +44,8 @@ struct bitfields_only_mixed_types {
* char: 4;
* int a: 4;
* short b;
- * long int c;
- * long int d: 8;
+ * long c;
+ * long d: 8;
* int e;
* int f;
*};
@@ -53,7 +53,7 @@ struct bitfields_only_mixed_types {
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_mixed_with_others {
- long: 4; /* char is enough as a backing field */
+ char: 4; /* char is enough as a backing field */
int a: 4;
/* 8-bit implicit padding */
short b; /* combined with previous bitfield */
@@ -71,7 +71,7 @@ struct bitfield_mixed_with_others {
*struct bitfield_flushed {
* int a: 4;
* long: 60;
- * long int b: 16;
+ * long b: 16;
*};
*
*/
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
index ba97165bdb28..a657651eba52 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
@@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6];
typedef int *ptr_multiarr_t[7][8][9][10];
-typedef int * (*fn_ptr_arr_t[11])();
+typedef int * (*fn_ptr_arr_t[11])(void);
-typedef int * (*fn_ptr_multiarr_t[12][13])();
+typedef int * (*fn_ptr_multiarr_t[12][13])(void);
struct root_struct {
arr_t _1;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
index 1cef3bec1dc7..7998f27df7dd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
@@ -29,7 +29,7 @@ struct non_packed_fields {
struct nested_packed {
char: 4;
int a: 4;
- long int b;
+ long b;
struct {
char c;
int d;
@@ -44,7 +44,7 @@ union union_is_never_packed {
union union_does_not_need_packing {
struct {
- long int a;
+ long a;
int b;
} __attribute__((packed));
int c;
@@ -58,7 +58,81 @@ union jump_code_union {
} __attribute__((packed));
};
-/*------ END-EXPECTED-OUTPUT ------ */
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct nested_packed_but_aligned_struct {
+ * int x1;
+ * int x2;
+ *};
+ *
+ *struct outer_implicitly_packed_struct {
+ * char y1;
+ * struct nested_packed_but_aligned_struct y2;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct nested_packed_but_aligned_struct {
+ int x1;
+ int x2;
+} __attribute__((packed));
+
+struct outer_implicitly_packed_struct {
+ char y1;
+ struct nested_packed_but_aligned_struct y2;
+};
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct usb_ss_ep_comp_descriptor {
+ * char: 8;
+ * char bDescriptorType;
+ * char bMaxBurst;
+ * short wBytesPerInterval;
+ *};
+ *
+ *struct usb_host_endpoint {
+ * long: 64;
+ * char: 8;
+ * struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ * long: 0;
+ *} __attribute__((packed));
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+struct usb_ss_ep_comp_descriptor {
+ char: 8;
+ char bDescriptorType;
+ char bMaxBurst;
+ int: 0;
+ short wBytesPerInterval;
+} __attribute__((packed));
+
+struct usb_host_endpoint {
+ long: 64;
+ char: 8;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ long: 0;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct nested_packed_struct {
+ int a;
+ char b;
+} __attribute__((packed));
+
+struct outer_nonpacked_struct {
+ short a;
+ struct nested_packed_struct b;
+};
+
+struct outer_packed_struct {
+ short a;
+ struct nested_packed_struct b;
+} __attribute__((packed));
+
+/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct packed_trailing_space _1;
@@ -69,6 +143,10 @@ int f(struct {
union union_is_never_packed _6;
union union_does_not_need_packing _7;
union jump_code_union _8;
+ struct outer_implicitly_packed_struct _9;
+ struct usb_host_endpoint _10;
+ struct outer_nonpacked_struct _11;
+ struct outer_packed_struct _12;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
index 35c512818a56..79276fbe454a 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
@@ -9,7 +9,7 @@
/* ----- START-EXPECTED-OUTPUT ----- */
struct padded_implicitly {
int a;
- long int b;
+ long b;
char c;
};
@@ -19,7 +19,7 @@ struct padded_implicitly {
/*
*struct padded_explicitly {
* int a;
- * int: 32;
+ * long: 0;
* int b;
*};
*
@@ -28,41 +28,28 @@ struct padded_implicitly {
struct padded_explicitly {
int a;
- int: 1; /* algo will explicitly pad with full 32 bits here */
+ int: 1; /* algo will emit aligning `long: 0;` here */
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
-/*
- *struct padded_a_lot {
- * int a;
- * long: 32;
- * long: 64;
- * long: 64;
- * int b;
- *};
- *
- */
-/* ------ END-EXPECTED-OUTPUT ------ */
-
struct padded_a_lot {
int a;
- /* 32 bit of implicit padding here, which algo will make explicit */
long: 64;
long: 64;
int b;
};
+/* ------ END-EXPECTED-OUTPUT ------ */
+
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_cache_line {
* int a;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
* int b;
- * long: 32;
* long: 64;
* long: 64;
* long: 64;
@@ -85,7 +72,7 @@ struct padded_cache_line {
*struct zone {
* int a;
* short b;
- * short: 16;
+ * long: 0;
* struct zone_padding __pad__;
*};
*
@@ -102,12 +89,160 @@ struct zone {
struct zone_padding __pad__;
};
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct padding_wo_named_members {
+ long: 64;
+ long: 64;
+};
+
+struct padding_weird_1 {
+ int a;
+ long: 64;
+ short: 16;
+ short b;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *struct padding_weird_2 {
+ * long: 56;
+ * char a;
+ * long: 56;
+ * char b;
+ * char: 8;
+ *};
+ *
+ */
+/* ------ END-EXPECTED-OUTPUT ------ */
+struct padding_weird_2 {
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char a;
+ int: 32; /* these paddings will be collapsed into `long: 56;` */
+ short: 16;
+ char: 8;
+ char b;
+ char: 8;
+};
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+struct exact_1byte {
+ char x;
+};
+
+struct padded_1byte {
+ char: 8;
+};
+
+struct exact_2bytes {
+ short x;
+};
+
+struct padded_2bytes {
+ short: 16;
+};
+
+struct exact_4bytes {
+ int x;
+};
+
+struct padded_4bytes {
+ int: 32;
+};
+
+struct exact_8bytes {
+ long x;
+};
+
+struct padded_8bytes {
+ long: 64;
+};
+
+struct ff_periodic_effect {
+ int: 32;
+ short magnitude;
+ long: 0;
+ short phase;
+ long: 0;
+ int: 32;
+ int custom_len;
+ short *custom_data;
+};
+
+struct ib_wc {
+ long: 64;
+ long: 64;
+ int: 32;
+ int byte_len;
+ void *qp;
+ union {} ex;
+ long: 64;
+ int slid;
+ int wc_flags;
+ long: 64;
+ char smac[6];
+ long: 0;
+ char network_hdr_type;
+};
+
+struct acpi_object_method {
+ long: 64;
+ char: 8;
+ char type;
+ short reference_count;
+ char flags;
+ short: 0;
+ char: 8;
+ char sync_level;
+ long: 64;
+ void *node;
+ void *aml_start;
+ union {} dispatch;
+ long: 64;
+ int aml_length;
+};
+
+struct nested_unpacked {
+ int x;
+};
+
+struct nested_packed {
+ struct nested_unpacked a;
+ char c;
+} __attribute__((packed));
+
+struct outer_mixed_but_unpacked {
+ struct nested_packed b1;
+ short a1;
+ struct nested_packed b2;
+};
+
+/* ------ END-EXPECTED-OUTPUT ------ */
+
int f(struct {
struct padded_implicitly _1;
struct padded_explicitly _2;
struct padded_a_lot _3;
struct padded_cache_line _4;
struct zone _5;
+ struct padding_wo_named_members _6;
+ struct padding_weird_1 _7;
+ struct padding_weird_2 _8;
+ struct exact_1byte _100;
+ struct padded_1byte _101;
+ struct exact_2bytes _102;
+ struct padded_2bytes _103;
+ struct exact_4bytes _104;
+ struct padded_4bytes _105;
+ struct exact_8bytes _106;
+ struct padded_8bytes _107;
+ struct ff_periodic_effect _200;
+ struct ib_wc _201;
+ struct acpi_object_method _202;
+ struct outer_mixed_but_unpacked _203;
} *_)
{
return 0;
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index 31975c96e2c9..29d01fff32bd 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -25,6 +25,39 @@ typedef enum {
H = 2,
} e3_t;
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_byte {
+ * EBYTE_1 = 0,
+ * EBYTE_2 = 1,
+ *} __attribute__((mode(byte)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_byte {
+ EBYTE_1,
+ EBYTE_2,
+} __attribute__((mode(byte)));
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+/*
+ *enum e_word {
+ * EWORD_1 = 0LL,
+ * EWORD_2 = 1LL,
+ *} __attribute__((mode(word)));
+ *
+ */
+/* ----- END-EXPECTED-OUTPUT ----- */
+enum e_word {
+ EWORD_1,
+ EWORD_2,
+} __attribute__((mode(word))); /* force to use 8-byte backing for this enum */
+
+/* ----- START-EXPECTED-OUTPUT ----- */
+enum e_big {
+ EBIG_1 = 1000000000000ULL,
+};
+
typedef int int_t;
typedef volatile const int * volatile const crazy_ptr_t;
@@ -51,7 +84,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* typedef int (*fn_t)(int);
* typedef char * const * (*fn_ptr2_t)(s_t, fn_t);
*
- * - `fn_complext_t`: pointer to a function returning struct and accepting
+ * - `fn_complex_t`: pointer to a function returning struct and accepting
* union and struct. All structs and enum are anonymous and defined inline.
*
* - `signal_t: pointer to a function accepting a pointer to a function as an
@@ -67,7 +100,7 @@ typedef void (*printf_fn_t)(const char *, ...);
* `int -> char *` function and returns pointer to a char. Equivalent:
* typedef char * (*fn_input_t)(int);
* typedef char * (*fn_output_outer_t)(fn_input_t);
- * typedef const fn_output_outer_t (* fn_output_inner_t)();
+ * typedef const fn_output_outer_t (* fn_output_inner_t)(void);
* typedef const fn_output_inner_t fn_ptr_arr2_t[5];
*/
/* ----- START-EXPECTED-OUTPUT ----- */
@@ -94,7 +127,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
-typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
+typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int));
struct struct_w_typedefs {
int_t a;
@@ -174,6 +207,12 @@ struct struct_in_struct {
};
};
+struct struct_in_array {};
+
+struct struct_in_array_typed {};
+
+typedef struct struct_in_array_typed struct_in_array_t[2];
+
struct struct_with_embedded_stuff {
int a;
struct {
@@ -183,7 +222,7 @@ struct struct_with_embedded_stuff {
const char *d;
} e;
union {
- volatile long int f;
+ volatile long f;
void * restrict g;
};
};
@@ -203,6 +242,14 @@ struct struct_with_embedded_stuff {
} r[5];
struct struct_in_struct s[10];
int t[11];
+ struct struct_in_array (*u)[2];
+ struct_in_array_t *v;
+};
+
+struct float_struct {
+ float f;
+ const double *d;
+ volatile long double *ld;
};
struct root_struct {
@@ -210,6 +257,9 @@ struct root_struct {
enum e2 _2;
e2_t _2_1;
e3_t _2_2;
+ enum e_byte _100;
+ enum e_word _101;
+ enum e_big _102;
struct struct_w_typedefs _3;
anon_struct_t _7;
struct struct_fwd *_8;
@@ -219,6 +269,7 @@ struct root_struct {
union_fwd_t *_12;
union_fwd_ptr_t _13;
struct struct_with_embedded_stuff _14;
+ struct float_struct _15;
};
/* ------ END-EXPECTED-OUTPUT ------ */
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag.c b/tools/testing/selftests/bpf/progs/btf_type_tag.c
new file mode 100644
index 000000000000..1d488da7e920
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if __has_attribute(btf_type_tag)
+#define __tag1 __attribute__((btf_type_tag("tag1")))
+#define __tag2 __attribute__((btf_type_tag("tag2")))
+volatile const bool skip_tests = false;
+#else
+#define __tag1
+#define __tag2
+volatile const bool skip_tests = true;
+#endif
+
+struct btf_type_tag_test {
+ int __tag1 * __tag1 __tag2 *p;
+} g;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
new file mode 100644
index 000000000000..d93f68024cc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_testmod_btf_type_tag_1 {
+ int a;
+};
+
+struct bpf_testmod_btf_type_tag_2 {
+ struct bpf_testmod_btf_type_tag_1 *p;
+};
+
+__u64 g;
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
+int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
+{
+ g = arg->a;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
+int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
+{
+ g = arg->p->a;
+ return 0;
+}
+
+/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
+ *
+ * struct css_rstat_cpu {
+ * ...
+ * struct cgroup_subsys_state *updated_children;
+ * ...
+ * };
+ *
+ * struct cgroup_subsys_state {
+ * ...
+ * struct css_rstat_cpu __percpu *rstat_cpu;
+ * ...
+ * };
+ *
+ * struct cgroup {
+ * struct cgroup_subsys_state self;
+ * ...
+ * };
+ */
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
+{
+ g = (__u64)cgrp->self.rstat_cpu->updated_children;
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
+{
+ struct css_rstat_cpu *rstat;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ rstat = (struct css_rstat_cpu *)bpf_per_cpu_ptr(
+ cgrp->self.rstat_cpu, cpu);
+ if (rstat) {
+ /* READ_ONCE */
+ *(volatile long *)rstat;
+ }
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_user.c b/tools/testing/selftests/bpf/progs/btf_type_tag_user.c
new file mode 100644
index 000000000000..5523f77c5a44
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_user.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_testmod_btf_type_tag_1 {
+ int a;
+};
+
+struct bpf_testmod_btf_type_tag_2 {
+ struct bpf_testmod_btf_type_tag_1 *p;
+};
+
+int g;
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_user_1")
+int BPF_PROG(test_user1, struct bpf_testmod_btf_type_tag_1 *arg)
+{
+ g = arg->a;
+ return 0;
+}
+
+SEC("fentry/bpf_testmod_test_btf_type_tag_user_2")
+int BPF_PROG(test_user2, struct bpf_testmod_btf_type_tag_2 *arg)
+{
+ g = arg->p->a;
+ return 0;
+}
+
+/* int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
+ * int __user *usockaddr_len);
+ */
+SEC("fentry/__sys_getsockname")
+int BPF_PROG(test_sys_getsockname, int fd, struct sockaddr *usockaddr,
+ int *usockaddr_len)
+{
+ g = usockaddr->sa_family;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
new file mode 100644
index 000000000000..5d6fc7f01ebb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cb_refs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 16);
+} array_map SEC(".maps");
+
+static __noinline int cb1(void *map, void *key, void *value, void *ctx)
+{
+ void *p = *(void **)ctx;
+ bpf_kfunc_call_test_release(p);
+ /* Without the fix this would cause underflow */
+ return 0;
+}
+
+SEC("?tc")
+int underflow_prog(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ bpf_for_each_map_elem(&array_map, cb1, &p, 0);
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+static __always_inline int cb2(void *map, void *key, void *value, void *ctx)
+{
+ unsigned long sl = 0;
+
+ *(void **)ctx = bpf_kfunc_call_test_acquire(&sl);
+ /* Without the fix this would leak memory */
+ return 0;
+}
+
+SEC("?tc")
+int leak_prog(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 0;
+
+ p = NULL;
+ bpf_for_each_map_elem(&array_map, cb2, &p, 0);
+ p = bpf_kptr_xchg(&v->ptr, p);
+ if (p)
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+static __always_inline int cb(void *map, void *key, void *value, void *ctx)
+{
+ return 0;
+}
+
+static __always_inline int cb3(void *map, void *key, void *value, void *ctx)
+{
+ unsigned long sl = 0;
+ void *p;
+
+ bpf_kfunc_call_test_acquire(&sl);
+ bpf_for_each_map_elem(&array_map, cb, &p, 0);
+ /* It should only complain here, not in cb. This is why we need
+ * callback_ref to be set to frameno.
+ */
+ return 0;
+}
+
+SEC("?tc")
+int nested_cb(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+ int sp = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ bpf_for_each_map_elem(&array_map, cb3, &sp, 0);
+ bpf_kfunc_call_test_release(p);
+ return 0;
+}
+
+SEC("?tc")
+int non_cb_transfer_ref(void *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sl = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sl);
+ if (!p)
+ return 0;
+ cb1(NULL, NULL, NULL, &p);
+ bpf_kfunc_call_test_acquire(&sl);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
index a0778fe7857a..41d59f0ee606 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
@@ -3,8 +3,6 @@
#ifndef __PROGS_CG_STORAGE_MULTI_H
#define __PROGS_CG_STORAGE_MULTI_H
-#include <asm/types.h>
-
struct cgroup_value {
__u32 egress_pkts;
__u32 ingress_pkts;
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
index a25373002055..3f81ff92184c 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
index a149f33bc533..d662db27fe4a 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c
@@ -20,7 +20,7 @@ struct {
__u32 invocations = 0;
-SEC("cgroup_skb/egress/1")
+SEC("cgroup_skb/egress")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
@@ -32,7 +32,7 @@ int egress1(struct __sk_buff *skb)
return 1;
}
-SEC("cgroup_skb/egress/2")
+SEC("cgroup_skb/egress")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
diff --git a/tools/testing/selftests/bpf/progs/cgroup_ancestor.c b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
new file mode 100644
index 000000000000..8c2deb4fc493
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_tracing_net.h"
+#define NUM_CGROUP_LEVELS 4
+
+__u64 cgroup_ids[NUM_CGROUP_LEVELS];
+__u16 dport;
+
+static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
+{
+ /* [1] &level passed to external function that may change it, it's
+ * incompatible with loop unroll.
+ */
+ cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level);
+}
+
+SEC("tc")
+int log_cgroup_id(struct __sk_buff *skb)
+{
+ struct sock *sk = (void *)skb->sk;
+
+ if (!sk)
+ return TC_ACT_OK;
+
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) {
+ log_nth_level(skb, 0);
+ log_nth_level(skb, 1);
+ log_nth_level(skb, 2);
+ log_nth_level(skb, 3);
+ }
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
new file mode 100644
index 000000000000..932b8ecd4ae3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 invocations = 0;
+__u32 assertion_error = 0;
+__u32 retval_value = 0;
+__u32 ctx_retval_value = 0;
+__u32 page_size = 0;
+
+SEC("cgroup/getsockopt")
+int get_retval(struct bpf_sockopt *ctx)
+{
+ retval_value = bpf_get_retval();
+ ctx_retval_value = ctx->retval;
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int set_eisconn(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EISCONN))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int clear_retval(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ ctx->retval = 0;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c
new file mode 100644
index 000000000000..13dfb4bbfd28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define BPF_RETVAL_HOOK(name, section, ctx, expected_err) \
+ __attribute__((__section__("?" section))) \
+ int name(struct ctx *_ctx) \
+ { \
+ bpf_set_retval(bpf_get_retval()); \
+ return 1; \
+ }
+
+#include "cgroup_getset_retval_hooks.h"
+
+#undef BPF_RETVAL_HOOK
diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
new file mode 100644
index 000000000000..45a0e9f492a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 invocations = 0;
+__u32 assertion_error = 0;
+__u32 retval_value = 0;
+__s32 page_size = 0;
+
+SEC("cgroup/setsockopt")
+int get_retval(struct bpf_sockopt *ctx)
+{
+ retval_value = bpf_get_retval();
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 1;
+}
+
+SEC("cgroup/setsockopt")
+int set_eunatch(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EUNATCH))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
+
+SEC("cgroup/setsockopt")
+int set_eisconn(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ if (bpf_set_retval(-EISCONN))
+ assertion_error = 1;
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
+
+SEC("cgroup/setsockopt")
+int legacy_eperm(struct bpf_sockopt *ctx)
+{
+ __sync_fetch_and_add(&invocations, 1);
+
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
new file mode 100644
index 000000000000..ff189a736ad8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC.
+ */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct percpu_attach_counter {
+ /* Previous percpu state, to figure out if we have new updates */
+ __u64 prev;
+ /* Current percpu state */
+ __u64 state;
+};
+
+struct attach_counter {
+ /* State propagated through children, pending aggregation */
+ __u64 pending;
+ /* Total state, including all cpus and all children */
+ __u64 state;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1024);
+ __type(key, __u64);
+ __type(value, struct percpu_attach_counter);
+} percpu_attach_counters SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1024);
+ __type(key, __u64);
+ __type(value, struct attach_counter);
+} attach_counters SEC(".maps");
+
+extern void css_rstat_updated(
+ struct cgroup_subsys_state *css, int cpu) __ksym;
+extern void css_rstat_flush(struct cgroup_subsys_state *css) __ksym;
+
+static uint64_t cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+static int create_percpu_attach_counter(__u64 cg_id, __u64 state)
+{
+ struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0};
+
+ return bpf_map_update_elem(&percpu_attach_counters, &cg_id,
+ &pcpu_init, BPF_NOEXIST);
+}
+
+static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending)
+{
+ struct attach_counter init = {.state = state, .pending = pending};
+
+ return bpf_map_update_elem(&attach_counters, &cg_id,
+ &init, BPF_NOEXIST);
+}
+
+SEC("fentry/cgroup_attach_task")
+int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader,
+ bool threadgroup)
+{
+ __u64 cg_id = cgroup_id(dst_cgrp);
+ struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem(
+ &percpu_attach_counters,
+ &cg_id);
+
+ if (pcpu_counter)
+ pcpu_counter->state += 1;
+ else if (create_percpu_attach_counter(cg_id, 1))
+ return 0;
+
+ css_rstat_updated(&dst_cgrp->self, bpf_get_smp_processor_id());
+ return 0;
+}
+
+SEC("fentry/bpf_rstat_flush")
+int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu)
+{
+ struct percpu_attach_counter *pcpu_counter;
+ struct attach_counter *total_counter, *parent_counter;
+ __u64 cg_id = cgroup_id(cgrp);
+ __u64 parent_cg_id = parent ? cgroup_id(parent) : 0;
+ __u64 state;
+ __u64 delta = 0;
+
+ /* Add CPU changes on this level since the last flush */
+ pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters,
+ &cg_id, cpu);
+ if (pcpu_counter) {
+ state = pcpu_counter->state;
+ delta += state - pcpu_counter->prev;
+ pcpu_counter->prev = state;
+ }
+
+ total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
+ if (!total_counter) {
+ if (create_attach_counter(cg_id, delta, 0))
+ return 0;
+ goto update_parent;
+ }
+
+ /* Collect pending stats from subtree */
+ if (total_counter->pending) {
+ delta += total_counter->pending;
+ total_counter->pending = 0;
+ }
+
+ /* Propagate changes to this cgroup's total */
+ total_counter->state += delta;
+
+update_parent:
+ /* Skip if there are no changes to propagate, or no parent */
+ if (!delta || !parent_cg_id)
+ return 0;
+
+ /* Propagate changes to cgroup's parent */
+ parent_counter = bpf_map_lookup_elem(&attach_counters,
+ &parent_cg_id);
+ if (parent_counter)
+ parent_counter->pending += delta;
+ else
+ create_attach_counter(parent_cg_id, 0, delta);
+ return 0;
+}
+
+SEC("iter.s/cgroup")
+int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+ struct seq_file *seq = meta->seq;
+ struct attach_counter *total_counter;
+ __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0;
+
+ /* Do nothing for the terminal call */
+ if (!cg_id)
+ return 1;
+
+ /* Flush the stats to make sure we get the most updated numbers */
+ css_rstat_flush(&cgrp->self);
+
+ total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id);
+ if (!total_counter) {
+ BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n",
+ cg_id);
+ } else {
+ BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n",
+ cg_id, total_counter->state);
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter.c b/tools/testing/selftests/bpf/progs/cgroup_iter.c
new file mode 100644
index 000000000000..f30841997a8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_iter.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+int terminate_early = 0;
+u64 terminal_cgroup = 0;
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ if (terminal_cgroup == cgroup_id(cgrp))
+ return 1;
+
+ return terminate_early ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
new file mode 100644
index 000000000000..6a0ea02c4de2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("cgroup/getsockopt")
+int getsockopt_1(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_2(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_3(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_4(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_preorder.c b/tools/testing/selftests/bpf/progs/cgroup_preorder.c
new file mode 100644
index 000000000000..4ef6202baa0a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_preorder.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned int idx;
+__u8 result[4];
+
+SEC("cgroup/getsockopt")
+int child(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 1;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int child_2(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 2;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int parent(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 3;
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int parent_2(struct bpf_sockopt *ctx)
+{
+ if (idx < 4)
+ result[idx++] = 4;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
new file mode 100644
index 000000000000..88e13e17ec9e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+char value[16];
+
+static __always_inline void read_xattr(struct cgroup *cgroup)
+{
+ struct bpf_dynptr value_ptr;
+
+ bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr);
+ bpf_cgroup_read_xattr(cgroup, "user.bpf_test",
+ &value_ptr);
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_css_iter_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("kernel func bpf_iter_css_new requires RCU critical section protection")
+int BPF_PROG(use_css_iter_sleepable_missing_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(use_css_iter_sleepable_with_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ goto out;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_bpf_cgroup_ancestor)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("cgroup/sendmsg4")
+__success
+int BPF_PROG(cgroup_skb)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
new file mode 100644
index 000000000000..e32b07d802bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 data_end;
+
+SEC("cgroup_skb/ingress")
+int direct_packet_access(struct __sk_buff *skb)
+{
+ data_end = skb->data_end;
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
index 3f757e30d7a0..ac86a8a61605 100644
--- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c
@@ -14,7 +14,6 @@
#include <sys/types.h>
#include <sys/socket.h>
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
__u16 g_serv_port = 0;
@@ -67,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb,
SEC("cgroup_skb/ingress")
int ingress_lookup(struct __sk_buff *skb)
{
- __u32 serv_port_key = 0;
struct ipv6hdr ip6h;
struct tcphdr tcph;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c
new file mode 100644
index 000000000000..db1e4d2d3281
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int bpf_prog(struct __sk_buff *skb)
+{
+ __u64 *counter;
+
+ counter = bpf_get_local_storage(&cgroup_storage, 0);
+ __sync_fetch_and_add(counter, 1);
+
+ /* Drop one out of every two packets */
+ return (*counter & 1);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c b/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c
new file mode 100644
index 000000000000..1e2e73f3b749
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_tcp_skb.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include "cgroup_tcp_skb.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u16 g_sock_port = 0;
+__u32 g_sock_state = 0;
+int g_unexpected = 0;
+__u32 g_packet_count = 0;
+
+int needed_tcp_pkt(struct __sk_buff *skb, struct tcphdr *tcph)
+{
+ struct ipv6hdr ip6h;
+
+ if (skb->protocol != bpf_htons(ETH_P_IPV6))
+ return 0;
+ if (bpf_skb_load_bytes(skb, 0, &ip6h, sizeof(ip6h)))
+ return 0;
+
+ if (ip6h.nexthdr != IPPROTO_TCP)
+ return 0;
+
+ if (bpf_skb_load_bytes(skb, sizeof(ip6h), tcph, sizeof(*tcph)))
+ return 0;
+
+ if (tcph->source != bpf_htons(g_sock_port) &&
+ tcph->dest != bpf_htons(g_sock_port))
+ return 0;
+
+ return 1;
+}
+
+/* Run accept() on a socket in the cgroup to receive a new connection. */
+static int egress_accept(struct tcphdr *tcph)
+{
+ if (g_sock_state == SYN_RECV_SENDING_SYN_ACK) {
+ if (tcph->fin || !tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_RECV;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ingress_accept(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case INIT:
+ if (!tcph->syn || tcph->fin || tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_RECV_SENDING_SYN_ACK;
+ break;
+ case SYN_RECV:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = ESTABLISHED;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Run connect() on a socket in the cgroup to start a new connection. */
+static int egress_connect(struct tcphdr *tcph)
+{
+ if (g_sock_state == INIT) {
+ if (!tcph->syn || tcph->fin || tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = SYN_SENT;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ingress_connect(struct tcphdr *tcph)
+{
+ if (g_sock_state == SYN_SENT) {
+ if (tcph->fin || !tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = ESTABLISHED;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* The connection is closed by the peer outside the cgroup. */
+static int egress_close_remote(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ break;
+ case CLOSE_WAIT_SENDING_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = CLOSE_WAIT;
+ break;
+ case CLOSE_WAIT:
+ if (!tcph->fin)
+ g_unexpected++;
+ else
+ g_sock_state = LAST_ACK;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ingress_close_remote(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ if (tcph->fin)
+ g_sock_state = CLOSE_WAIT_SENDING_ACK;
+ break;
+ case LAST_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = CLOSED;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* The connection is closed by the endpoint inside the cgroup. */
+static int egress_close_local(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ if (tcph->fin)
+ g_sock_state = FIN_WAIT1;
+ break;
+ case TIME_WAIT_SENDING_ACK:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = TIME_WAIT;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ingress_close_local(struct tcphdr *tcph)
+{
+ switch (g_sock_state) {
+ case ESTABLISHED:
+ break;
+ case FIN_WAIT1:
+ if (tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = FIN_WAIT2;
+ break;
+ case FIN_WAIT2:
+ if (!tcph->fin || tcph->syn || !tcph->ack)
+ g_unexpected++;
+ else
+ g_sock_state = TIME_WAIT_SENDING_ACK;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Check the types of outgoing packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/egress")
+int server_egress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_accept(&tcph) || egress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/ingress")
+int server_ingress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_accept(&tcph) || ingress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/egress")
+int server_egress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_accept(&tcph) || egress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a server socket to make sure they
+ * are consistent with the state of the server socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/ingress")
+int server_ingress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_accept(&tcph) || ingress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/egress")
+int client_egress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_connect(&tcph) || egress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the server side.
+ */
+SEC("cgroup_skb/ingress")
+int client_ingress_srv(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_connect(&tcph) || ingress_close_remote(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of outgoing packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/egress")
+int client_egress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Egress of the server socket. */
+ if (egress_connect(&tcph) || egress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
+
+/* Check the types of incoming packets of a client socket to make sure they
+ * are consistent with the state of the client socket.
+ *
+ * The connection is closed by the client side.
+ */
+SEC("cgroup_skb/ingress")
+int client_ingress(struct __sk_buff *skb)
+{
+ struct tcphdr tcph;
+
+ if (!needed_tcp_pkt(skb, &tcph))
+ return 1;
+
+ g_packet_count++;
+
+ /* Ingress of the server socket. */
+ if (ingress_connect(&tcph) || ingress_close_local(&tcph))
+ return 1;
+
+ g_unexpected++;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
new file mode 100644
index 000000000000..73ba32e9a693
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CGRP_KFUNC_COMMON_H
+#define _CGRP_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __cgrps_kfunc_map_value {
+ struct cgroup __kptr * cgrp;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __cgrps_kfunc_map_value);
+ __uint(max_entries, 1);
+} __cgrps_kfunc_map SEC(".maps");
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
+{
+ s32 id;
+ long status;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+}
+
+static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return status;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v) {
+ bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+ return -ENOENT;
+ }
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired) {
+ bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+ if (old) {
+ bpf_cgroup_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CGRP_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
new file mode 100644
index 000000000000..9fe9c4a4e8f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
+{
+ int status;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ return NULL;
+
+ return cgrps_kfunc_map_value_lookup(cgrp);
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
+ acquired = bpf_cgroup_acquire(v->cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ /*
+ * Can't invoke bpf_cgroup_release() without checking the return value
+ * of bpf_cgroup_acquire().
+ */
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
+
+ /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
+ acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/cgroup_destroy_locked")
+__failure __msg("calling kernel function bpf_cgroup_acquire is not allowed")
+int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
+{
+ struct cgroup *acquired;
+
+ /* Can't acquire an untrusted struct cgroup * pointer. */
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("cgrp_kfunc_acquire_trusted_walked")
+int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
+ acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
+ acquired = bpf_cgroup_acquire(NULL);
+ if (acquired)
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
+int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+
+ /* Acquired cgroup is never released. */
+ __sink(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Unreleased reference")
+int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ bpf_rcu_read_lock();
+ kptr = v->cgrp;
+ if (kptr)
+ /* Can't release a cgroup kptr stored in a map. */
+ bpf_cgroup_release(kptr);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value *v;
+
+ v = insert_lookup_cgrp(cgrp);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
+ bpf_cgroup_release(v->cgrp);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("arg#0 pointer type STRUCT cgroup must point")
+int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired = (struct cgroup *)&path;
+
+ /* Cannot release random frame pointer. */
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
+{
+ struct __cgrps_kfunc_map_value local, *v;
+ long status;
+ struct cgroup *acquired, *old;
+ s32 id;
+
+ status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+ if (status)
+ return 0;
+
+ local.cgrp = NULL;
+ status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired)
+ return -ENOENT;
+
+ old = bpf_kptr_xchg(&v->cgrp, acquired);
+
+ /* old cannot be passed to bpf_cgroup_release() without a NULL check. */
+ bpf_cgroup_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+__failure __msg("release kernel function bpf_cgroup_release expects")
+int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
+{
+ /* Cannot release trusted cgroup pointer which was not acquired. */
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
new file mode 100644
index 000000000000..02d8f160ca0e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid, invocations;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ * TP_PROTO(struct cgroup *cgrp, const char *path),
+ * TP_ARGS(cgrp, path)
+ */
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ bool same = pid == cur_pid;
+
+ if (same)
+ __sync_fetch_and_add(&invocations, 1);
+
+ return same;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ acquired = bpf_cgroup_acquire(cgrp);
+ if (!acquired)
+ err = 1;
+ else
+ bpf_cgroup_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr, *cg;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = v->cgrp;
+ if (!kptr) {
+ err = 4;
+ return 0;
+ }
+
+ cg = bpf_cgroup_ancestor(kptr, 1);
+ if (cg) /* verifier only check */
+ bpf_cgroup_release(cg);
+
+ kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ bpf_cgroup_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *kptr;
+ struct __cgrps_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = cgrps_kfunc_map_insert(cgrp);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = cgrps_kfunc_map_value_lookup(cgrp);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ kptr = v->cgrp;
+ if (!kptr)
+ err = 3;
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *self, *ancestor1, *invalid;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ self = bpf_cgroup_ancestor(cgrp, cgrp->level);
+ if (!self) {
+ err = 1;
+ return 0;
+ }
+
+ if (self->self.id != cgrp->self.id) {
+ bpf_cgroup_release(self);
+ err = 2;
+ return 0;
+ }
+ bpf_cgroup_release(self);
+
+ ancestor1 = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
+ if (!ancestor1) {
+ err = 3;
+ return 0;
+ }
+ bpf_cgroup_release(ancestor1);
+
+ invalid = bpf_cgroup_ancestor(cgrp, 10000);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 4;
+ return 0;
+ }
+
+ invalid = bpf_cgroup_ancestor(cgrp, -1);
+ if (invalid) {
+ bpf_cgroup_release(invalid);
+ err = 5;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path)
+{
+ struct cgroup *parent, *res;
+ u64 parent_cgid;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ /* @cgrp's ID is not visible yet, let's test with the parent */
+ parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1);
+ if (!parent) {
+ err = 1;
+ return 0;
+ }
+
+ parent_cgid = parent->kn->id;
+ bpf_cgroup_release(parent);
+
+ res = bpf_cgroup_from_id(parent_cgid);
+ if (!res) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_cgroup_release(res);
+
+ if (res != parent) {
+ err = 3;
+ return 0;
+ }
+
+ res = bpf_cgroup_from_id((u64)-1);
+ if (res) {
+ bpf_cgroup_release(res);
+ err = 4;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int test_cgrp_from_id_ns(void *ctx)
+{
+ struct cgroup *cg;
+
+ cg = bpf_cgroup_from_id(1);
+ if (!cg)
+ return 42;
+ bpf_cgroup_release(cg);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
new file mode 100644
index 000000000000..8aeba1b75c83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct socket_cookie {
+ __u64 cookie_key;
+ __u64 cookie_value;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct socket_cookie);
+} socket_cookies SEC(".maps");
+
+SEC("cgroup/connect6")
+int set_cookie(struct bpf_sock_addr *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!p)
+ return 1;
+
+ p->cookie_value = 0xF;
+ p->cookie_key = bpf_get_socket_cookie(ctx);
+ return 1;
+}
+
+SEC("sockops")
+int update_cookie_sockops(struct bpf_sock_ops *ctx)
+{
+ struct socket_cookie *p;
+ struct tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+
+ if (ctx->family != AF_INET6 || ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tcp_sk = bpf_skc_to_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ p = bpf_cgrp_storage_get(&socket_cookies,
+ tcp_sk->inet_conn.icsk_inet.sk.sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 1;
+
+ if (p->cookie_key != bpf_get_socket_cookie(ctx))
+ return 1;
+
+ p->cookie_value |= (ctx->local_port << 8);
+ return 1;
+}
+
+SEC("fexit/inet_stream_connect")
+int BPF_PROG(update_cookie_tracing, struct socket *sock,
+ struct sockaddr *uaddr, int addr_len, int flags)
+{
+ struct socket_cookie *p;
+
+ if (uaddr->sa_family != AF_INET6)
+ return 0;
+
+ p = bpf_cgrp_storage_get(&socket_cookies, sock->sk->sk_cgrp_data.cgroup, 0, 0);
+ if (!p)
+ return 0;
+
+ if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
+ return 0;
+
+ p->cookie_value |= 0xF0;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
new file mode 100644
index 000000000000..d41f90e2ab64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_negative.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ (void)bpf_cgrp_storage_get(&map_a, (struct cgroup *)task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
new file mode 100644
index 000000000000..3500e4b69ebe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+int target_hid = 0;
+bool is_cgroup1 = 0;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static void __on_update(struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+}
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct cgroup *cgrp;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_update(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_update(task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 200;
+
+ ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr = 100;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct cgroup *cgrp;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_enter(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_enter(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
new file mode 100644
index 000000000000..a2de95f85648
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+__s32 target_pid;
+__u64 cgroup_id;
+int target_hid;
+bool is_cgroup1;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("?iter.s/cgroup")
+int cgroup_iter(struct bpf_iter__cgroup *ctx)
+{
+ struct cgroup *cgrp = ctx->cgroup;
+ long *ptr;
+
+ if (cgrp == NULL)
+ return 0;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ return 0;
+}
+
+static void __no_rcu_lock(struct cgroup *cgrp)
+{
+ long *ptr;
+
+ /* Note that trace rcu is held in sleepable prog, so we can use
+ * bpf_cgrp_storage_get() in sleepable prog.
+ */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int cgrp1_no_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* bpf_task_get_cgroup1 can work in sleepable prog */
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __no_rcu_lock(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int no_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* task->cgroups is untrusted in sleepable prog outside of RCU CS */
+ __no_rcu_lock(task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int yes_rcu_lock(void *ctx)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ bpf_rcu_read_lock();
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ bpf_cgroup_release(cgrp);
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ cgrp = task->cgroups->dfl_cgrp;
+ /* cgrp is trusted under RCU CS */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ cgroup_id = cgrp->kn->id;
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
new file mode 100644
index 000000000000..1c348f000f38
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+int target_hid = 0;
+bool is_cgroup1 = 0;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+ int err;
+
+ /* populate value 0 */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+
+ /* delete value 0 */
+ err = bpf_cgrp_storage_delete(&map_a, cgrp);
+ if (err)
+ return;
+
+ /* value is not available */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 0);
+ if (ptr)
+ return;
+
+ /* re-populate the value */
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_enter(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_enter(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
+
+static void __on_exit(struct pt_regs *regs, long id, struct cgroup *cgrp)
+{
+ long *ptr;
+
+ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ if (is_cgroup1) {
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ __on_exit(regs, id, cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+ }
+
+ __on_exit(regs, id, task->cgroups->dfl_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c
new file mode 100644
index 000000000000..6884ab99a421
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_arena_common.h"
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} test_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1);
+} arena SEC(".maps");
+
+SEC("socket")
+__log_level(2)
+__msg(" 0: .......... (b7) r0 = 42")
+__msg(" 1: 0......... (bf) r1 = r0")
+__msg(" 2: .1........ (bf) r2 = r1")
+__msg(" 3: ..2....... (bf) r3 = r2")
+__msg(" 4: ...3...... (bf) r4 = r3")
+__msg(" 5: ....4..... (bf) r5 = r4")
+__msg(" 6: .....5.... (bf) r6 = r5")
+__msg(" 7: ......6... (bf) r7 = r6")
+__msg(" 8: .......7.. (bf) r8 = r7")
+__msg(" 9: ........8. (bf) r9 = r8")
+__msg("10: .........9 (bf) r0 = r9")
+__msg("11: 0......... (95) exit")
+__naked void assign_chain(void)
+{
+ asm volatile (
+ "r0 = 42;"
+ "r1 = r0;"
+ "r2 = r1;"
+ "r3 = r2;"
+ "r4 = r3;"
+ "r5 = r4;"
+ "r6 = r5;"
+ "r7 = r6;"
+ "r8 = r7;"
+ "r9 = r8;"
+ "r0 = r9;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 7")
+__msg("1: .1........ (07) r1 += 7")
+__msg("2: .......... (b7) r2 = 7")
+__msg("3: ..2....... (b7) r3 = 42")
+__msg("4: ..23...... (0f) r2 += r3")
+__msg("5: .......... (b7) r0 = 0")
+__msg("6: 0......... (95) exit")
+__naked void arithmetics(void)
+{
+ asm volatile (
+ "r1 = 7;"
+ "r1 += 7;"
+ "r2 = 7;"
+ "r3 = 42;"
+ "r2 += r3;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+#ifdef CAN_USE_BPF_ST
+SEC("socket")
+__log_level(2)
+__msg(" 1: .1........ (07) r1 += -8")
+__msg(" 2: .1........ (7a) *(u64 *)(r1 +0) = 7")
+__msg(" 3: .1........ (b7) r2 = 42")
+__msg(" 4: .12....... (7b) *(u64 *)(r1 +0) = r2")
+__msg(" 5: .12....... (7b) *(u64 *)(r1 +0) = r2")
+__msg(" 6: .......... (b7) r0 = 0")
+__naked void store(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r1 +0) = 7;"
+ "r2 = 42;"
+ "*(u64 *)(r1 +0) = r2;"
+ "*(u64 *)(r1 +0) = r2;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+#endif
+
+SEC("socket")
+__log_level(2)
+__msg("1: ....4..... (07) r4 += -8")
+__msg("2: ....4..... (79) r5 = *(u64 *)(r4 +0)")
+__msg("3: ....45.... (07) r4 += -8")
+__naked void load(void)
+{
+ asm volatile (
+ "r4 = r10;"
+ "r4 += -8;"
+ "r5 = *(u64 *)(r4 +0);"
+ "r4 += -8;"
+ "r0 = r5;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .1........ (61) r2 = *(u32 *)(r1 +0)")
+__msg("1: ..2....... (d4) r2 = le64 r2")
+__msg("2: ..2....... (bf) r0 = r2")
+__naked void endian(void)
+{
+ asm volatile (
+ "r2 = *(u32 *)(r1 +0);"
+ "r2 = le64 r2;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg(" 8: 0......... (b7) r1 = 1")
+__msg(" 9: 01........ (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
+__msg("10: 01........ (c3) lock *(u32 *)(r0 +0) += r1")
+__msg("11: 01........ (db) r1 = atomic64_xchg((u64 *)(r0 +0), r1)")
+__msg("12: 01........ (bf) r2 = r0")
+__msg("13: .12....... (bf) r0 = r1")
+__msg("14: 012....... (db) r0 = atomic64_cmpxchg((u64 *)(r2 +0), r0, r1)")
+__naked void atomic(void)
+{
+ asm volatile (
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = 0;"
+ "*(u64 *)(r2 +0) = r1;"
+ "r1 = %[test_map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto 1f;"
+ "r1 = 1;"
+ "r1 = atomic_fetch_add((u64 *)(r0 +0), r1);"
+ ".8byte %[add_nofetch];" /* same as "lock *(u32 *)(r0 +0) += r1;" */
+ "r1 = xchg_64(r0 + 0, r1);"
+ "r2 = r0;"
+ "r0 = r1;"
+ "r0 = cmpxchg_64(r2 + 0, r0, r1);"
+ "1: exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(test_map),
+ __imm_insn(add_nofetch, BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0))
+ : __clobber_all);
+}
+
+#ifdef CAN_USE_LOAD_ACQ_STORE_REL
+
+SEC("socket")
+__log_level(2)
+__msg("2: .12....... (db) store_release((u64 *)(r2 -8), r1)")
+__msg("3: .......... (bf) r3 = r10")
+__msg("4: ...3...... (db) r4 = load_acquire((u64 *)(r3 -8))")
+__naked void atomic_load_acq_store_rel(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ "r2 = r10;"
+ ".8byte %[store_release_insn];" /* store_release((u64 *)(r2 - 8), r1); */
+ "r3 = r10;"
+ ".8byte %[load_acquire_insn];" /* r4 = load_acquire((u64 *)(r3 + 0)); */
+ "r0 = r4;"
+ "exit;"
+ :
+ : __imm_insn(store_release_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_STORE_REL, BPF_REG_2, BPF_REG_1, -8)),
+ __imm_insn(load_acquire_insn,
+ BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_4, BPF_REG_3, -8))
+ : __clobber_all);
+}
+
+#endif /* CAN_USE_LOAD_ACQ_STORE_REL */
+
+SEC("socket")
+__log_level(2)
+__msg("4: .12....7.. (85) call bpf_trace_printk#6")
+__msg("5: 0......7.. (0f) r0 += r7")
+__naked void regular_call(void)
+{
+ asm volatile (
+ "r7 = 1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 1;"
+ "call %[bpf_trace_printk];"
+ "r0 += r7;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("2: 012....... (25) if r1 > 0x7 goto pc+1")
+__msg("3: ..2....... (bf) r0 = r2")
+__naked void if1(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "if r1 > 0x7 goto +1;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("3: 0123...... (2d) if r1 > r3 goto pc+1")
+__msg("4: ..2....... (bf) r0 = r2")
+__naked void if2(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "r3 = 7;"
+ "if r1 > r3 goto +1;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* Verifier misses that r2 is alive if jset is not handled properly */
+SEC("socket")
+__log_level(2)
+__msg("2: 012....... (45) if r1 & 0x7 goto pc+1")
+__naked void if3_jset_bug(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "if r1 & 0x7 goto +1;"
+ "exit;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 0")
+__msg("1: .1........ (b7) r2 = 7")
+__msg("2: .12....... (25) if r1 > 0x7 goto pc+4")
+__msg("3: .12....... (07) r1 += 1")
+__msg("4: .12....... (27) r2 *= 2")
+__msg("5: .12....... (05) goto pc+0")
+__msg("6: .12....... (05) goto pc-5")
+__msg("7: .......... (b7) r0 = 0")
+__msg("8: 0......... (95) exit")
+__naked void loop(void)
+{
+ asm volatile (
+ "r1 = 0;"
+ "r2 = 7;"
+ "if r1 > 0x7 goto +4;"
+ "r1 += 1;"
+ "r2 *= 2;"
+ "goto +0;"
+ "goto -5;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+
+#ifdef CAN_USE_GOTOL
+SEC("socket")
+__log_level(2)
+__msg("2: .123...... (25) if r1 > 0x7 goto pc+2")
+__msg("3: ..2....... (bf) r0 = r2")
+__msg("4: 0......... (06) gotol pc+1")
+__msg("5: ...3...... (bf) r0 = r3")
+__msg("6: 0......... (95) exit")
+__naked void gotol(void)
+{
+ asm volatile (
+ "r2 = 42;"
+ "r3 = 24;"
+ "if r1 > 0x7 goto +2;"
+ "r0 = r2;"
+ "gotol +1;"
+ "r0 = r3;"
+ "exit;"
+ :
+ : __imm(bpf_trace_printk)
+ : __clobber_all);
+}
+#endif
+
+SEC("socket")
+__log_level(2)
+__msg("0: .......... (b7) r1 = 1")
+__msg("1: .1........ (e5) may_goto pc+1")
+__msg("2: .......... (05) goto pc-3")
+__msg("3: .1........ (bf) r0 = r1")
+__msg("4: 0......... (95) exit")
+__naked void may_goto(void)
+{
+ asm volatile (
+ "1: r1 = 1;"
+ ".8byte %[may_goto];"
+ "goto 1b;"
+ "r0 = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("1: 0......... (18) r2 = 0x7")
+__msg("3: 0.2....... (0f) r0 += r2")
+__naked void ldimm64(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "r2 = 0x7 ll;"
+ "r0 += r2;"
+ "exit;"
+ :
+ :: __clobber_all);
+}
+
+/* No rules specific for LD_ABS/LD_IND, default behaviour kicks in */
+SEC("socket")
+__log_level(2)
+__msg("2: 0123456789 (30) r0 = *(u8 *)skb[42]")
+__msg("3: 012.456789 (0f) r7 += r0")
+__msg("4: 012.456789 (b7) r3 = 42")
+__msg("5: 0123456789 (50) r0 = *(u8 *)skb[r3 + 0]")
+__msg("6: 0......7.. (0f) r7 += r0")
+__naked void ldabs(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "r7 = 0;"
+ "r0 = *(u8 *)skb[42];"
+ "r7 += r0;"
+ "r3 = 42;"
+ ".8byte %[ld_ind];" /* same as "r0 = *(u8 *)skb[r3];" */
+ "r7 += r0;"
+ "r0 = r7;"
+ "exit;"
+ :
+ : __imm_insn(ld_ind, BPF_LD_IND(BPF_B, BPF_REG_3, 0))
+ : __clobber_all);
+}
+
+
+#ifdef __BPF_FEATURE_ADDR_SPACE_CAST
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__log_level(2)
+__msg(" 6: .12345.... (85) call bpf_arena_alloc_pages")
+__msg(" 7: 0......... (bf) r1 = addr_space_cast(r0, 0, 1)")
+__msg(" 8: .1........ (b7) r2 = 42")
+__naked void addr_space_cast(void)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = 0;"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r1 = addr_space_cast(r0, 0, 1);"
+ "r2 = 42;"
+ "*(u64 *)(r1 +0) = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena)
+ : __clobber_all);
+}
+#endif
+
+static __used __naked int aux1(void)
+{
+ asm volatile (
+ "r0 = r1;"
+ "r0 += r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("0: ....45.... (b7) r1 = 1")
+__msg("1: .1..45.... (b7) r2 = 2")
+__msg("2: .12.45.... (b7) r3 = 3")
+/* Conservative liveness for subprog parameters. */
+__msg("3: .12345.... (85) call pc+2")
+__msg("4: .......... (b7) r0 = 0")
+__msg("5: 0......... (95) exit")
+__msg("6: .12....... (bf) r0 = r1")
+__msg("7: 0.2....... (0f) r0 += r2")
+/* Conservative liveness for subprog return value. */
+__msg("8: 0......... (95) exit")
+__naked void subprog1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "r3 = 3;"
+ "call aux1;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+/* to retain debug info for BTF generation */
+void kfunc_root(void)
+{
+ bpf_arena_alloc_pages(0, 0, 0, 0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644
index 000000000000..a3819a5d09c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED 1
+
+int port;
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+ if (ctx->type != SOCK_STREAM)
+ return VERDICT_PROCEED;
+ if (ctx->user_port == bpf_htons(port))
+ return VERDICT_REJECT;
+ return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index a943d394fd3a..9d158cfad981 100644
--- a/tools/testing/selftests/bpf/progs/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
@@ -7,8 +7,7 @@
#include <linux/bpf.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <sys/socket.h>
-#include <netinet/tcp.h>
+#include <linux/tcp.h>
#include <linux/if.h>
#include <errno.h>
@@ -31,9 +30,14 @@
#define IFNAMSIZ 16
#endif
-int _version SEC("version") = 1;
+#ifndef SOL_TCP
+#define SOL_TCP 6
+#endif
+
+const char reno[] = "reno";
+const char cubic[] = "cubic";
-__attribute__ ((noinline))
+__attribute__ ((noinline)) __weak
int do_bind(struct bpf_sock_addr *ctx)
{
struct sockaddr_in sa = {};
@@ -49,35 +53,27 @@ int do_bind(struct bpf_sock_addr *ctx)
}
static __inline int verify_cc(struct bpf_sock_addr *ctx,
- char expected[TCP_CA_NAME_MAX])
+ const char expected[])
{
char buf[TCP_CA_NAME_MAX];
- int i;
if (bpf_getsockopt(ctx, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
return 1;
- for (i = 0; i < TCP_CA_NAME_MAX; i++) {
- if (buf[i] != expected[i])
- return 1;
- if (buf[i] == 0)
- break;
- }
+ if (bpf_strncmp(buf, TCP_CA_NAME_MAX, expected))
+ return 1;
return 0;
}
static __inline int set_cc(struct bpf_sock_addr *ctx)
{
- char reno[TCP_CA_NAME_MAX] = "reno";
- char cubic[TCP_CA_NAME_MAX] = "cubic";
-
- if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &reno, sizeof(reno)))
+ if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, (void *)reno, sizeof(reno)))
return 1;
if (verify_cc(ctx, reno))
return 1;
- if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &cubic, sizeof(cubic)))
+ if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, (void *)cubic, sizeof(cubic)))
return 1;
if (verify_cc(ctx, cubic))
return 1;
@@ -198,4 +194,10 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
return do_bind(ctx) ? 1 : 0;
}
+SEC("cgroup/connect4")
+int connect_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 506d0f81a375..e98573b00ddb 100644
--- a/tools/testing/selftests/bpf/progs/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
@@ -24,8 +24,6 @@
#define DST_REWRITE_PORT6 6666
-int _version SEC("version") = 1;
-
SEC("cgroup/connect6")
int connect_v6_prog(struct bpf_sock_addr *ctx)
{
@@ -92,4 +90,10 @@ int connect_v6_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/connect6")
+int connect_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port4.c b/tools/testing/selftests/bpf/progs/connect_force_port4.c
index 7396308677a3..27a632dd382e 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port4.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port4.c
@@ -10,8 +10,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
char _license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
struct svc_addr {
__be32 addr;
@@ -58,6 +59,9 @@ int connect4(struct bpf_sock_addr *ctx)
SEC("cgroup/getsockname4")
int getsockname4(struct bpf_sock_addr *ctx)
{
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose local server as 1.2.3.4:60000 to client. */
if (ctx->user_port == bpf_htons(60123)) {
ctx->user_ip4 = bpf_htonl(0x01020304);
@@ -71,6 +75,9 @@ int getpeername4(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose service 1.2.3.4:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60123)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
diff --git a/tools/testing/selftests/bpf/progs/connect_force_port6.c b/tools/testing/selftests/bpf/progs/connect_force_port6.c
index c1a2b555e9ad..19cad93e612f 100644
--- a/tools/testing/selftests/bpf/progs/connect_force_port6.c
+++ b/tools/testing/selftests/bpf/progs/connect_force_port6.c
@@ -9,8 +9,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
char _license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
struct svc_addr {
__be32 addr[4];
@@ -63,6 +64,9 @@ int connect6(struct bpf_sock_addr *ctx)
SEC("cgroup/getsockname6")
int getsockname6(struct bpf_sock_addr *ctx)
{
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose local server as [fc00::1]:60000 to client. */
if (ctx->user_port == bpf_htons(60124)) {
ctx->user_ip6[0] = bpf_htonl(0xfc000000);
@@ -79,6 +83,9 @@ int getpeername6(struct bpf_sock_addr *ctx)
{
struct svc_addr *orig;
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
/* Expose service [fc00::1]:60000 as peer instead of backend. */
if (ctx->user_port == bpf_htons(60124)) {
orig = bpf_sk_storage_get(&service_mapping, ctx->sk, 0, 0);
diff --git a/tools/testing/selftests/bpf/progs/connect_ping.c b/tools/testing/selftests/bpf/progs/connect_ping.c
new file mode 100644
index 000000000000..60178192b672
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect_ping.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2022 Google LLC.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+
+/* 2001:db8::1 */
+#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+
+__u32 do_bind = 0;
+__u32 has_error = 0;
+__u32 invocations_v4 = 0;
+__u32 invocations_v6 = 0;
+
+SEC("cgroup/connect4")
+int connect_v4_prog(struct bpf_sock_addr *ctx)
+{
+ struct sockaddr_in sa = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = bpf_htonl(0x01010101),
+ };
+
+ __sync_fetch_and_add(&invocations_v4, 1);
+
+ if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
+ has_error = 1;
+
+ return 1;
+}
+
+SEC("cgroup/connect6")
+int connect_v6_prog(struct bpf_sock_addr *ctx)
+{
+ struct sockaddr_in6 sa = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = BINDADDR_V6,
+ };
+
+ __sync_fetch_and_add(&invocations_v6, 1);
+
+ if (do_bind && bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)))
+ has_error = 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
new file mode 100644
index 000000000000..ba60adadb335
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/connect_unix")
+int connect_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/connect_unix")
+int connect_unix_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c
new file mode 100644
index 000000000000..004f2acef2eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/core_kern.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define ATTR __always_inline
+#include "test_jhash.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 256);
+} array1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 256);
+} array2 SEC(".maps");
+
+static __noinline int randmap(int v, const struct net_device *dev)
+{
+ struct bpf_map *map = (struct bpf_map *)&array1;
+ int key = bpf_get_prandom_u32() & 0xff;
+ int *val;
+
+ if (bpf_get_prandom_u32() & 1)
+ map = (struct bpf_map *)&array2;
+
+ val = bpf_map_lookup_elem(map, &key);
+ if (val)
+ *val = bpf_get_prandom_u32() + v + dev->mtu;
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_devmap_xmit")
+int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
+ *from_dev, const struct net_device *to_dev, int sent, int drops,
+ int err)
+{
+ return randmap(from_dev->ifindex, from_dev);
+}
+
+SEC("fentry/eth_type_trans")
+int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
+ struct net_device *dev, unsigned short protocol)
+{
+ return randmap(dev->ifindex + skb->len, dev);
+}
+
+SEC("fexit/eth_type_trans")
+int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
+ struct net_device *dev, unsigned short protocol)
+{
+ return randmap(dev->ifindex + skb->len, dev);
+}
+
+volatile const int never;
+
+struct __sk_bUfF /* it will not exist in vmlinux */ {
+ int len;
+} __attribute__((preserve_access_index));
+
+struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
+ size_t len;
+} __attribute__((preserve_access_index));
+
+SEC("tc")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ void *ptr;
+ int nh_off, i = 0;
+
+ nh_off = 14;
+
+ /* pragma unroll doesn't work on large loops */
+#define C do { \
+ ptr = data + i; \
+ if (ptr + nh_off > data_end) \
+ break; \
+ ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+ if (never) { \
+ /* below is a dead code with unresolvable CO-RE relo */ \
+ i += ((struct __sk_bUfF *)ctx)->len; \
+ /* this CO-RE relo may or may not resolve
+ * depending on whether bpf_testmod is loaded.
+ */ \
+ i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
+ } \
+ } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+ C30;C30;C30; /* 90 calls */
+ return 0;
+}
+
+typedef int (*func_proto_typedef___match)(long);
+typedef int (*func_proto_typedef___doesnt_match)(char *);
+typedef int (*func_proto_typedef_nested1)(func_proto_typedef___match);
+
+int proto_out[3];
+
+SEC("raw_tracepoint/sys_enter")
+int core_relo_proto(void *ctx)
+{
+ proto_out[0] = bpf_core_type_exists(func_proto_typedef___match);
+ proto_out[1] = bpf_core_type_exists(func_proto_typedef___doesnt_match);
+ proto_out[2] = bpf_core_type_exists(func_proto_typedef_nested1);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_kern_overflow.c b/tools/testing/selftests/bpf/progs/core_kern_overflow.c
new file mode 100644
index 000000000000..f0d5652256ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/core_kern_overflow.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+typedef int (*func_proto_typedef)(long);
+typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
+typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
+
+int proto_out;
+
+SEC("raw_tracepoint/sys_enter")
+int core_relo_proto(void *ctx)
+{
+ proto_out = bpf_core_type_exists(func_proto_typedef_nested2);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index 9a2850850121..5760ae015e09 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -13,6 +13,7 @@ struct core_reloc_kernel_output {
int valid[10];
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
};
/*
@@ -346,6 +347,7 @@ struct core_reloc_nesting___err_too_deep {
*/
struct core_reloc_arrays_output {
int a2;
+ int a3;
char b123;
int c1c;
int d00d;
@@ -454,6 +456,15 @@ struct core_reloc_arrays___err_bad_zero_sz_arr {
struct core_reloc_arrays_substruct d[1][2];
};
+struct core_reloc_arrays___err_bad_signed_arr_elem_sz {
+ /* int -> short (signed!): not supported case */
+ short a[5];
+ char b[2][3][4];
+ struct core_reloc_arrays_substruct c[3];
+ struct core_reloc_arrays_substruct d[1][2];
+ struct core_reloc_arrays_substruct f[][2];
+};
+
/*
* PRIMITIVES
*/
@@ -700,27 +711,11 @@ struct core_reloc_existence___minimal {
int a;
};
-struct core_reloc_existence___err_wrong_int_sz {
- short a;
-};
-
-struct core_reloc_existence___err_wrong_int_type {
+struct core_reloc_existence___wrong_field_defs {
+ void *a;
int b[1];
-};
-
-struct core_reloc_existence___err_wrong_int_kind {
struct{ int x; } c;
-};
-
-struct core_reloc_existence___err_wrong_arr_kind {
int arr;
-};
-
-struct core_reloc_existence___err_wrong_arr_value_type {
- short arr[1];
-};
-
-struct core_reloc_existence___err_wrong_struct_type {
int s;
};
@@ -801,12 +796,21 @@ struct core_reloc_bitfields___err_too_big_bitfield {
*/
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
+ int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -816,6 +820,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___diff_sz {
@@ -825,6 +830,17 @@ struct core_reloc_size___diff_sz {
char arr_field[10];
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
+ double float_field;
+};
+
+struct core_reloc_size___diff_offs {
+ float float_field;
+ enum { YET_OTHER_VALUE = 123 } enum_field;
+ void *ptr_field;
+ int arr_field[4];
+ union { int x; } union_field;
+ struct { int x; } struct_field;
+ int int_field;
};
/* Error case of two candidates with the fields (int_field) at the same
@@ -839,6 +855,7 @@ struct core_reloc_size___err_ambiguous1 {
int arr_field[4];
void *ptr_field;
enum { VALUE___1 = 123 } enum_field;
+ float float_field;
};
struct core_reloc_size___err_ambiguous2 {
@@ -850,13 +867,15 @@ struct core_reloc_size___err_ambiguous2 {
int arr_field[4];
void *ptr_field;
enum { VALUE___2 = 123 } enum_field;
+ float float_field;
};
/*
- * TYPE EXISTENCE & SIZE
+ * TYPE EXISTENCE, MATCH & SIZE
*/
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -865,9 +884,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -885,6 +919,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct * restrict a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -909,6 +951,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -916,22 +959,86 @@ typedef char arr_typedef[20];
struct core_reloc_type_based {
struct a_struct f1;
- union a_union f2;
- enum an_enum f3;
- named_struct_typedef f4;
- anon_struct_typedef f5;
- struct_ptr_typedef f6;
- int_typedef f7;
- enum_typedef f8;
- void_ptr_typedef f9;
- func_proto_typedef f10;
- arr_typedef f11;
+ struct a_complex_struct f2;
+ union a_union f3;
+ enum an_enum f4;
+ named_struct_typedef f5;
+ anon_struct_typedef f6;
+ struct_ptr_typedef f7;
+ int_typedef f8;
+ enum_typedef f9;
+ void_ptr_typedef f10;
+ restrict_ptr_typedef f11;
+ func_proto_typedef f12;
+ arr_typedef f13;
};
/* no types in target */
struct core_reloc_type_based___all_missing {
};
+/* different member orders, enum variant values, signedness, etc */
+struct a_struct___diff {
+ int x;
+ int a;
+};
+
+struct a_struct___forward;
+
+struct a_complex_struct___diff {
+ union {
+ struct a_struct___forward *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
+union a_union___diff {
+ int z;
+ int y;
+};
+
+typedef struct a_struct___diff named_struct_typedef___diff;
+
+typedef struct { int z, x, y; } anon_struct_typedef___diff;
+
+typedef struct {
+ int c;
+ int b;
+ int a;
+} *struct_ptr_typedef___diff;
+
+enum an_enum___diff {
+ AN_ENUM_VAL2___diff = 0,
+ AN_ENUM_VAL1___diff = 42,
+ AN_ENUM_VAL3___diff = 1,
+};
+
+typedef unsigned int int_typedef___diff;
+
+typedef enum { TYPEDEF_ENUM_VAL2___diff, TYPEDEF_ENUM_VAL1___diff = 50 } enum_typedef___diff;
+
+typedef const void *void_ptr_typedef___diff;
+
+typedef int_typedef___diff (*func_proto_typedef___diff)(long);
+
+typedef char arr_typedef___diff[3];
+
+struct core_reloc_type_based___diff {
+ struct a_struct___diff f1;
+ struct a_complex_struct___diff f2;
+ union a_union___diff f3;
+ enum an_enum___diff f4;
+ named_struct_typedef___diff f5;
+ anon_struct_typedef___diff f6;
+ struct_ptr_typedef___diff f7;
+ int_typedef___diff f8;
+ enum_typedef___diff f9;
+ void_ptr_typedef___diff f10;
+ func_proto_typedef___diff f11;
+ arr_typedef___diff f12;
+};
+
/* different type sizes, extra modifiers, anon vs named enums, etc */
struct a_struct___diff_sz {
long x;
@@ -1110,6 +1217,20 @@ struct core_reloc_enumval_output {
int anon_val2;
};
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
@@ -1127,6 +1248,23 @@ struct core_reloc_enumval {
anon_enum f2;
};
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val {
+ enum named_unsigned_enum64 f1;
+ enum named_signed_enum64 f2;
+};
+
/* differing enumerator values */
enum named_enum___diff {
NAMED_ENUM_VAL1___diff = 101,
@@ -1145,6 +1283,23 @@ struct core_reloc_enumval___diff {
anon_enum___diff f2;
};
+enum named_unsigned_enum64___diff {
+ UNSIGNED_ENUM64_VAL1___diff = 0x101ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___diff = 0x202ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___diff = 0x303ffffffffULL,
+};
+
+enum named_signed_enum64___diff {
+ SIGNED_ENUM64_VAL1___diff = -101,
+ SIGNED_ENUM64_VAL2___diff = -202,
+ SIGNED_ENUM64_VAL3___diff = -303,
+};
+
+struct core_reloc_enum64val___diff {
+ enum named_unsigned_enum64___diff f1;
+ enum named_signed_enum64___diff f2;
+};
+
/* missing (optional) third enum value */
enum named_enum___val3_missing {
NAMED_ENUM_VAL1___val3_missing = 111,
@@ -1161,6 +1316,21 @@ struct core_reloc_enumval___val3_missing {
anon_enum___val3_missing f2;
};
+enum named_unsigned_enum64___val3_missing {
+ UNSIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___val3_missing = 0x222,
+};
+
+enum named_signed_enum64___val3_missing {
+ SIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffLL,
+ SIGNED_ENUM64_VAL2___val3_missing = -222,
+};
+
+struct core_reloc_enum64val___val3_missing {
+ enum named_unsigned_enum64___val3_missing f1;
+ enum named_signed_enum64___val3_missing f2;
+};
+
/* missing (mandatory) second enum value, should fail */
enum named_enum___err_missing {
NAMED_ENUM_VAL1___err_missing = 1,
@@ -1176,3 +1346,18 @@ struct core_reloc_enumval___err_missing {
enum named_enum___err_missing f1;
anon_enum___err_missing f2;
};
+
+enum named_unsigned_enum64___err_missing {
+ UNSIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___err_missing = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64___err_missing {
+ SIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL3___err_missing = -3,
+};
+
+struct core_reloc_enum64val___err_missing {
+ enum named_unsigned_enum64___err_missing f1;
+ enum named_signed_enum64___err_missing f2;
+};
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
new file mode 100644
index 000000000000..86085b79f5ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CPUMASK_COMMON_H
+#define _CPUMASK_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+/* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h,
+ * so use the hard-coded number as a workaround.
+ */
+#define CPUMASK_KPTR_FIELDS_MAX 11
+
+int err;
+
+#define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
+private(MASK) static struct bpf_cpumask __kptr * global_mask;
+
+struct __cpumask_map_value {
+ struct bpf_cpumask __kptr * cpumask;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __cpumask_map_value);
+ __uint(max_entries, 1);
+} __cpumask_map SEC(".maps");
+
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_and(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_or(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
+ const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak;
+int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz) __ksym __weak;
+
+void bpf_rcu_read_lock(void) __ksym __weak;
+void bpf_rcu_read_unlock(void) __ksym __weak;
+
+static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
+{
+ return (const struct cpumask *)cpumask;
+}
+
+static inline struct bpf_cpumask *create_cpumask(void)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = bpf_cpumask_create();
+ if (!cpumask) {
+ err = 1;
+ return NULL;
+ }
+
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 2;
+ bpf_cpumask_release(cpumask);
+ return NULL;
+ }
+
+ return cpumask;
+}
+
+static inline struct __cpumask_map_value *cpumask_map_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__cpumask_map, &key);
+}
+
+static inline int cpumask_map_insert(struct bpf_cpumask *mask)
+{
+ struct __cpumask_map_value local, *v;
+ long status;
+ struct bpf_cpumask *old;
+ u32 key = 0;
+
+ local.cpumask = NULL;
+ status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0);
+ if (status) {
+ bpf_cpumask_release(mask);
+ return status;
+ }
+
+ v = bpf_map_lookup_elem(&__cpumask_map, &key);
+ if (!v) {
+ bpf_cpumask_release(mask);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->cpumask, mask);
+ if (old) {
+ bpf_cpumask_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CPUMASK_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
new file mode 100644
index 000000000000..8a2fd596c8a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct kptr_nested_array_2 {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_array_1 {
+ /* Make btf_parse_fields() in map_create() return -E2BIG */
+ struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1];
+};
+
+struct kptr_nested_array {
+ struct kptr_nested_array_1 d_1;
+};
+
+private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+ __sink(cpumask);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+
+ /* cpumask is released twice. */
+ bpf_cpumask_release(cpumask);
+ bpf_cpumask_release(cpumask);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("must be referenced")
+int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ /* Can't acquire a non-struct bpf_cpumask. */
+ cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
+ __sink(cpumask);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
+int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ /* Can't set the CPU of a non-struct bpf_cpumask. */
+ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ return 0;
+
+ v = cpumask_map_value_lookup();
+ if (!v)
+ return 0;
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+
+ /* cpumask is never released. */
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg0")
+int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
+{
+ /* NULL passed to KF_TRUSTED_ARGS kfunc. */
+ bpf_cpumask_empty(NULL);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R2 must be a rcu pointer")
+int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ /* RCU region is exited before calling KF_RCU kfunc. */
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("NULL pointer passed to trusted arg1")
+int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+
+ /* No NULL check is performed on global cpumask kptr. */
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to helper arg2")
+int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *prev, *curr;
+
+ curr = bpf_cpumask_create();
+ if (!curr)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, curr);
+ if (prev)
+ bpf_cpumask_release(prev);
+
+ bpf_rcu_read_lock();
+ curr = global_mask;
+ /* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */
+ prev = bpf_kptr_xchg(&global_mask, curr);
+ bpf_rcu_read_unlock();
+ if (prev)
+ bpf_cpumask_release(prev);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("has no valid kptr")
+int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("type=scalar expected=fp")
+int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456;
+ u64 bits;
+ int ret;
+
+ ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits));
+ if (!ret)
+ err = 2;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("leads to invalid memory access")
+int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags)
+{
+ void *garbage = (void *)0x123456;
+ struct bpf_cpumask *local;
+ int ret;
+
+ local = create_cpumask();
+ if (!local) {
+ err = 1;
+ return 0;
+ }
+
+ ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8);
+ if (!ret)
+ err = 2;
+
+ bpf_cpumask_release(local);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
new file mode 100644
index 000000000000..0e04c31b91c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "cpumask_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid, nr_cpus;
+
+struct kptr_nested {
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_pair {
+ struct bpf_cpumask __kptr * mask_1;
+ struct bpf_cpumask __kptr * mask_2;
+};
+
+struct kptr_nested_mid {
+ int dummy;
+ struct kptr_nested m;
+};
+
+struct kptr_nested_deep {
+ struct kptr_nested_mid ptrs[2];
+ struct kptr_nested_pair ptr_pairs[3];
+};
+
+struct kptr_nested_deep_array_1_2 {
+ int dummy;
+ struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_1_1 {
+ int dummy;
+ struct kptr_nested_deep_array_1_2 d_2;
+};
+
+struct kptr_nested_deep_array_1 {
+ long dummy;
+ struct kptr_nested_deep_array_1_1 d_1;
+};
+
+struct kptr_nested_deep_array_2_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_2_1 {
+ int dummy;
+ struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+struct kptr_nested_deep_array_2 {
+ long dummy;
+ struct kptr_nested_deep_array_2_1 d_1;
+};
+
+struct kptr_nested_deep_array_3_2 {
+ long dummy[2];
+ struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_deep_array_3_1 {
+ int dummy;
+ struct kptr_nested_deep_array_3_2 d_2;
+};
+
+struct kptr_nested_deep_array_3 {
+ long dummy;
+ struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
+};
+
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
+private(MASK) static struct kptr_nested global_mask_nested[2];
+private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
+private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
+private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
+private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
+
+static bool is_test_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static bool create_cpumask_set(struct bpf_cpumask **out1,
+ struct bpf_cpumask **out2,
+ struct bpf_cpumask **out3,
+ struct bpf_cpumask **out4)
+{
+ struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return false;
+
+ mask2 = create_cpumask();
+ if (!mask2) {
+ bpf_cpumask_release(mask1);
+ err = 3;
+ return false;
+ }
+
+ mask3 = create_cpumask();
+ if (!mask3) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ err = 4;
+ return false;
+ }
+
+ mask4 = create_cpumask();
+ if (!mask4) {
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(mask3);
+ err = 5;
+ return false;
+ }
+
+ *out1 = mask1;
+ *out2 = mask2;
+ *out3 = mask3;
+ *out4 = mask4;
+
+ return true;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear_cpu(0, cpumask);
+ if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ bpf_cpumask_setall(cpumask);
+ if (!bpf_cpumask_full(cast(cpumask))) {
+ err = 3;
+ goto release_exit;
+ }
+
+ bpf_cpumask_clear(cpumask);
+ if (!bpf_cpumask_empty(cast(cpumask))) {
+ err = 4;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
+ bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
+ err = 4;
+ goto release_exit;
+ }
+
+ bpf_cpumask_set_cpu(0, cpumask);
+ if (bpf_cpumask_first(cast(cpumask)) != 0) {
+ err = 5;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
+ err = 6;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+ u32 first;
+
+ if (!is_test_task())
+ return 0;
+
+ mask1 = create_cpumask();
+ if (!mask1)
+ return 0;
+
+ mask2 = create_cpumask();
+ if (!mask2)
+ goto release_exit;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ first = bpf_cpumask_first_and(cast(mask1), cast(mask2));
+ if (first <= 1)
+ err = 3;
+
+release_exit:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ if (!is_test_task())
+ return 0;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 3;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
+ err = 4;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
+ err = 5;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(cpumask);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+
+ if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_empty(cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+ if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+ bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
+ err = 6;
+ goto release_exit;
+ }
+
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+ if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
+ err = 7;
+ goto release_exit;
+ }
+
+ if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
+ err = 9;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
+ int cpu;
+
+ if (!is_test_task())
+ return 0;
+
+ if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
+ return 0;
+
+ bpf_cpumask_set_cpu(0, mask1);
+ bpf_cpumask_set_cpu(1, mask2);
+ bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
+
+ cpu = bpf_cpumask_any_distribute(cast(mask1));
+ if (cpu != 0) {
+ err = 6;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
+ if (cpu < nr_cpus) {
+ err = 7;
+ goto release_exit;
+ }
+
+ bpf_cpumask_copy(dst2, cast(dst1));
+ if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
+ err = 8;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_distribute(cast(dst2));
+ if (cpu > 1) {
+ err = 9;
+ goto release_exit;
+ }
+
+ cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2));
+ if (cpu < nr_cpus) {
+ err = 10;
+ goto release_exit;
+ }
+
+release_exit:
+ bpf_cpumask_release(mask1);
+ bpf_cpumask_release(mask2);
+ bpf_cpumask_release(dst1);
+ bpf_cpumask_release(dst2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask))
+ err = 3;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *cpumask;
+ struct __cpumask_map_value *v;
+
+ cpumask = create_cpumask();
+ if (!cpumask)
+ return 0;
+
+ if (cpumask_map_insert(cpumask)) {
+ err = 3;
+ return 0;
+ }
+
+ v = cpumask_map_value_lookup();
+ if (!v) {
+ err = 4;
+ return 0;
+ }
+
+ cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
+ if (cpumask)
+ bpf_cpumask_release(cpumask);
+ else
+ err = 5;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask, local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask;
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local, *prev;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Kptr arrays with one element are special cased, being treated
+ * just like a single pointer.
+ */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
+ if (prev) {
+ bpf_cpumask_release(prev);
+ err = 3;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ local = global_mask_array_one[0];
+ if (!local) {
+ err = 4;
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
+ struct bpf_cpumask **mask1)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Check if two kptrs in the array work and independently */
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ bpf_rcu_read_lock();
+
+ local = bpf_kptr_xchg(mask0, local);
+ if (local) {
+ err = 1;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, *] */
+ if (!*mask0) {
+ err = 2;
+ goto err_exit;
+ }
+
+ if (!mask1)
+ goto err_exit;
+
+ /* [*, NULL] */
+ if (*mask1) {
+ err = 3;
+ goto err_exit;
+ }
+
+ local = create_cpumask();
+ if (!local) {
+ err = 9;
+ goto err_exit;
+ }
+
+ local = bpf_kptr_xchg(mask1, local);
+ if (local) {
+ err = 10;
+ goto err_exit;
+ }
+
+ /* [<mask 0>, <mask 1>] */
+ if (!*mask0 || !*mask1 || *mask0 == *mask1) {
+ err = 11;
+ goto err_exit;
+ }
+
+err_exit:
+ if (local)
+ bpf_cpumask_release(local);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
+{
+ return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
+}
+
+/* Ensure that the field->offset has been correctly advanced from one
+ * nested struct or array sub-tree to another. In the case of
+ * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
+ * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
+ * the verifier should reject the program if the field->offset of any kptr
+ * is incorrect.
+ *
+ * For instance, if we have 10 kptrs in a nested struct and a program that
+ * accesses each kptr individually with bpf_kptr_xchg(), the compiler
+ * should emit instructions to access 10 different offsets if it works
+ * correctly. If the field->offset values of any pair of them are
+ * incorrectly the same, the number of unique offsets in btf_record for
+ * this nested struct should be less than 10. The verifier should fail to
+ * discover some of the offsets emitted by the compiler.
+ *
+ * Even if the field->offset values of kptrs are not duplicated, the
+ * verifier should fail to find a btf_field for the instruction accessing a
+ * kptr if the corresponding field->offset is pointing to a random
+ * incorrect offset.
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int r, i;
+
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
+ &global_mask_nested_deep.ptrs[1].m.mask);
+ if (r)
+ return r;
+
+ for (i = 0; i < 3; i++) {
+ r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
+ &global_mask_nested_deep.ptr_pairs[i].mask_2);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+ int i;
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
+
+ for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
+ _global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ if (bpf_cpumask_weight(cast(local)) != 0) {
+ err = 3;
+ goto out;
+ }
+
+ bpf_cpumask_set_cpu(0, local);
+ if (bpf_cpumask_weight(cast(local)) != 1) {
+ err = 4;
+ goto out;
+ }
+
+ /*
+ * Make sure that adding additional CPUs changes the weight. Test to
+ * see whether the CPU was set to account for running on UP machines.
+ */
+ bpf_cpumask_set_cpu(1, local);
+ if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
+ err = 5;
+ goto out;
+ }
+
+ bpf_cpumask_clear(local);
+ if (bpf_cpumask_weight(cast(local)) != 0) {
+ err = 6;
+ goto out;
+ }
+out:
+ bpf_cpumask_release(local);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask1, *mask2;
+
+ mask1 = bpf_cpumask_create();
+ mask2 = bpf_cpumask_create();
+
+ if (!mask1 || !mask2)
+ goto free_masks_return;
+
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1);
+ bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2);
+
+free_masks_return:
+ if (mask1)
+ bpf_cpumask_release(mask1);
+ if (mask2)
+ bpf_cpumask_release(mask2);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *local;
+ u8 toofewbits;
+ int ret;
+
+ if (!is_test_task())
+ return 0;
+
+ local = create_cpumask();
+ if (!local)
+ return 0;
+
+ /* The kfunc should prevent this operation */
+ ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits));
+ if (ret != -EACCES)
+ err = 2;
+
+ bpf_cpumask_release(local);
+
+ return 0;
+}
+
+/* Mask is guaranteed to be large enough for bpf_cpumask_t. */
+#define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t))
+
+/* Add an extra word for the test_populate_reject_unaligned test. */
+u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1];
+extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask;
+ char *src;
+ int ret;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Skip if unaligned accesses are fine for this arch. */
+ if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return 0;
+
+ mask = bpf_cpumask_create();
+ if (!mask) {
+ err = 1;
+ return 0;
+ }
+
+ /* Misalign the source array by a byte. */
+ src = &((char *)bits)[1];
+
+ ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN);
+ if (ret != -EINVAL)
+ err = 2;
+
+ bpf_cpumask_release(mask);
+
+ return 0;
+}
+
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags)
+{
+ struct bpf_cpumask *mask;
+ bool bit;
+ int ret;
+ int i;
+
+ if (!is_test_task())
+ return 0;
+
+ /* Set only odd bits. */
+ __builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN);
+
+ mask = bpf_cpumask_create();
+ if (!mask) {
+ err = 1;
+ return 0;
+ }
+
+ /* Pass the entire bits array, the kfunc will only copy the valid bits. */
+ ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN);
+ if (ret) {
+ err = 2;
+ goto out;
+ }
+
+ /*
+ * Test is there to appease the verifier. We cannot directly
+ * access NR_CPUS, the upper bound for nr_cpus, so we infer
+ * it from the size of cpumask_t.
+ */
+ if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) {
+ err = 3;
+ goto out;
+ }
+
+ bpf_for(i, 0, nr_cpus) {
+ /* Odd-numbered bits should be set, even ones unset. */
+ bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask);
+ if (bit == (i % 2 != 0))
+ continue;
+
+ err = 4;
+ break;
+ }
+
+out:
+ bpf_cpumask_release(mask);
+
+ return 0;
+}
+
+#undef CPUMASK_TEST_MASKLEN
diff --git a/tools/testing/selftests/bpf/progs/crypto_basic.c b/tools/testing/selftests/bpf/progs/crypto_basic.c
new file mode 100644
index 000000000000..8cf7168b42d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_basic.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+int status;
+SEC("syscall")
+int crypto_release(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .algo = "ecb(aes)",
+ .key_len = 16,
+ };
+
+ struct bpf_crypto_ctx *cctx;
+ int err = 0;
+
+ status = 0;
+
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("Unreleased reference")
+int crypto_acquire(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .algo = "ecb(aes)",
+ .key_len = 16,
+ };
+ struct bpf_crypto_ctx *cctx;
+ int err = 0;
+
+ status = 0;
+
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ cctx = bpf_crypto_ctx_acquire(cctx);
+ if (!cctx)
+ return -EINVAL;
+
+ bpf_crypto_ctx_release(cctx);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c
new file mode 100644
index 000000000000..4ac956b26240
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_bench.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+const volatile unsigned int len = 16;
+char cipher[128] = {};
+u32 key_len, authsize;
+char dst[256] = {};
+u8 key[256] = {};
+long hits = 0;
+int status;
+
+SEC("syscall")
+int crypto_setup(void *args)
+{
+ struct bpf_crypto_ctx *cctx;
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .key_len = key_len,
+ .authsize = authsize,
+ };
+ int err = 0;
+
+ status = 0;
+
+ if (!cipher[0] || !key_len || key_len > 256) {
+ status = -EINVAL;
+ return 0;
+ }
+
+ __builtin_memcpy(&params.algo, cipher, sizeof(cipher));
+ __builtin_memcpy(&params.key, key, sizeof(key));
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+
+ return 0;
+}
+
+SEC("tc")
+int crypto_encrypt(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return 0;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return 0;
+ }
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_from_mem(dst, len, 0, &pdst);
+
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+SEC("tc")
+int crypto_decrypt(struct __sk_buff *skb)
+{
+ struct bpf_dynptr psrc, pdst;
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+
+ v = crypto_ctx_value_lookup();
+ if (!v)
+ return -ENOENT;
+
+ ctx = v->ctx;
+ if (!ctx)
+ return -ENOENT;
+
+ bpf_dynptr_from_skb(skb, 0, &psrc);
+ bpf_dynptr_from_mem(dst, len, 0, &pdst);
+
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/crypto_common.h b/tools/testing/selftests/bpf/progs/crypto_common.h
new file mode 100644
index 000000000000..57dd7a68a8c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_common.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CRYPTO_COMMON_H
+#define _CRYPTO_COMMON_H
+
+#include "errno.h"
+#include <stdbool.h>
+
+struct bpf_crypto_ctx *bpf_crypto_ctx_create(const struct bpf_crypto_params *params,
+ u32 params__sz, int *err) __ksym;
+struct bpf_crypto_ctx *bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) __ksym;
+void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) __ksym;
+int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
+int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
+ const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
+
+struct __crypto_ctx_value {
+ struct bpf_crypto_ctx __kptr * ctx;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct __crypto_ctx_value);
+ __uint(max_entries, 1);
+} __crypto_ctx_map SEC(".maps");
+
+static inline struct __crypto_ctx_value *crypto_ctx_value_lookup(void)
+{
+ u32 key = 0;
+
+ return bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+}
+
+static inline int crypto_ctx_insert(struct bpf_crypto_ctx *ctx)
+{
+ struct __crypto_ctx_value local, *v;
+ struct bpf_crypto_ctx *old;
+ u32 key = 0;
+ int err;
+
+ local.ctx = NULL;
+ err = bpf_map_update_elem(&__crypto_ctx_map, &key, &local, 0);
+ if (err) {
+ bpf_crypto_ctx_release(ctx);
+ return err;
+ }
+
+ v = bpf_map_lookup_elem(&__crypto_ctx_map, &key);
+ if (!v) {
+ bpf_crypto_ctx_release(ctx);
+ return -ENOENT;
+ }
+
+ old = bpf_kptr_xchg(&v->ctx, ctx);
+ if (old) {
+ bpf_crypto_ctx_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _CRYPTO_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
new file mode 100644
index 000000000000..dfd8a258f14a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "crypto_common.h"
+
+unsigned char key[256] = {};
+u16 udp_test_port = 7777;
+u32 authsize, key_len;
+char algo[128] = {};
+char dst[16] = {}, dst_bad[8] = {};
+int status;
+
+static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
+{
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ u32 offset;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return -1;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return -1;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return -1;
+
+ if (udph.dest != __bpf_htons(udp_test_port))
+ return -1;
+
+ offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
+ if (skb->len < offset + 16)
+ return -1;
+
+ /* let's make sure that 16 bytes of payload are in the linear part of skb */
+ bpf_skb_pull_data(skb, offset + 16);
+ bpf_dynptr_from_skb(skb, 0, psrc);
+ bpf_dynptr_adjust(psrc, offset, offset + 16);
+
+ return 0;
+}
+
+SEC("syscall")
+int skb_crypto_setup(void *ctx)
+{
+ struct bpf_crypto_params params = {
+ .type = "skcipher",
+ .key_len = key_len,
+ .authsize = authsize,
+ };
+ struct bpf_crypto_ctx *cctx;
+ int err;
+
+ status = 0;
+ if (key_len > 256) {
+ status = -EINVAL;
+ return 0;
+ }
+
+ __builtin_memcpy(&params.algo, algo, sizeof(algo));
+ __builtin_memcpy(&params.key, key, sizeof(key));
+
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+ if (!cctx) {
+ status = err;
+ return 0;
+ }
+
+ err = crypto_ctx_insert(cctx);
+ if (err && err != -EEXIST)
+ status = err;
+ return 0;
+}
+
+SEC("tc")
+int decrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+ int err;
+
+ status = 0;
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
+ */
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+err:
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int encrypt_sanity(struct __sk_buff *skb)
+{
+ struct __crypto_ctx_value *v;
+ struct bpf_crypto_ctx *ctx;
+ struct bpf_dynptr psrc, pdst;
+ int err;
+
+ status = 0;
+ err = skb_dynptr_validate(skb, &psrc);
+ if (err < 0) {
+ status = err;
+ return TC_ACT_SHOT;
+ }
+
+ v = crypto_ctx_value_lookup();
+ if (!v) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ ctx = v->ctx;
+ if (!ctx) {
+ status = -ENOENT;
+ return TC_ACT_SHOT;
+ }
+
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
+ */
+ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+err:
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/csum_diff_test.c b/tools/testing/selftests/bpf/progs/csum_diff_test.c
new file mode 100644
index 000000000000..9438f1773a58
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/csum_diff_test.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define BUFF_SZ 512
+
+/* Will be updated by benchmark before program loading */
+char to_buff[BUFF_SZ];
+const volatile unsigned int to_buff_len = 0;
+char from_buff[BUFF_SZ];
+const volatile unsigned int from_buff_len = 0;
+unsigned short seed = 0;
+
+short result;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tc")
+int compute_checksum(void *ctx)
+{
+ int to_len_half = to_buff_len / 2;
+ int from_len_half = from_buff_len / 2;
+ short result2;
+
+ /* Calculate checksum in one go */
+ result2 = bpf_csum_diff((void *)from_buff, from_buff_len,
+ (void *)to_buff, to_buff_len, seed);
+
+ /* Calculate checksum by concatenating bpf_csum_diff()*/
+ result = bpf_csum_diff((void *)from_buff, from_buff_len - from_len_half,
+ (void *)to_buff, to_buff_len - to_len_half, seed);
+
+ result = bpf_csum_diff((void *)from_buff + (from_buff_len - from_len_half), from_len_half,
+ (void *)to_buff + (to_buff_len - to_len_half), to_len_half, result);
+
+ result = (result == result2) ? result : 0;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/decap_sanity.c b/tools/testing/selftests/bpf/progs/decap_sanity.c
new file mode 100644
index 000000000000..bd3c657c58a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/decap_sanity.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define UDP_TEST_PORT 7777
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+bool init_csum_partial = false;
+bool final_csum_none = false;
+bool broken_csum_start = false;
+
+static unsigned int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+static unsigned int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data - skb->head;
+}
+
+static int skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+
+SEC("tc")
+int decap_sanity(struct __sk_buff *skb)
+{
+ struct sk_buff *kskb;
+ struct ipv6hdr ip6h;
+ struct udphdr udph;
+ int err;
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+ return TC_ACT_SHOT;
+
+ if (ip6h.nexthdr != IPPROTO_UDP)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+ return TC_ACT_SHOT;
+
+ if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
+ return TC_ACT_SHOT;
+
+ kskb = bpf_cast_to_kern_ctx(skb);
+ init_csum_partial = (kskb->ip_summed == CHECKSUM_PARTIAL);
+ err = bpf_skb_adjust_room(skb, -(s32)(ETH_HLEN + sizeof(ip6h) + sizeof(udph)),
+ 1, BPF_F_ADJ_ROOM_FIXED_GSO);
+ if (err)
+ return TC_ACT_SHOT;
+ final_csum_none = (kskb->ip_summed == CHECKSUM_NONE);
+ if (kskb->ip_summed == CHECKSUM_PARTIAL &&
+ (unsigned int)skb_checksum_start_offset(kskb) >= skb_headlen(kskb))
+ broken_csum_start = true;
+
+ return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index 8924e06bdef0..c1dfbd2b56fc 100644
--- a/tools/testing/selftests/bpf/progs/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
@@ -41,14 +41,14 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor);
#endif
- /* Allow access to /dev/zero and /dev/random.
+ /* Allow access to /dev/null and /dev/urandom.
* Forbid everything else.
*/
if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR)
return 0;
switch (ctx->minor) {
- case 5: /* 1:5 /dev/zero */
+ case 3: /* 1:3 /dev/null */
case 9: /* 1:9 /dev/urandom */
return 1;
}
@@ -57,4 +57,3 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
new file mode 100644
index 000000000000..13cdb11fdeb2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Google LLC */
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+/* From uapi/linux/dma-buf.h */
+#define DMA_BUF_NAME_LEN 32
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, DMA_BUF_NAME_LEN);
+ __type(value, bool);
+ __uint(max_entries, 5);
+} testbuf_hash SEC(".maps");
+
+/*
+ * Fields output by this iterator are delimited by newlines. Convert any
+ * newlines in user-provided printed strings to spaces.
+ */
+static void sanitize_string(char *src, size_t size)
+{
+ for (char *c = src; (size_t)(c - src) < size && *c; ++c)
+ if (*c == '\n')
+ *c = ' ';
+}
+
+SEC("iter/dmabuf")
+int dmabuf_collector(struct bpf_iter__dmabuf *ctx)
+{
+ const struct dma_buf *dmabuf = ctx->dmabuf;
+ struct seq_file *seq = ctx->meta->seq;
+ unsigned long inode = 0;
+ size_t size;
+ const char *pname, *exporter;
+ char name[DMA_BUF_NAME_LEN] = {'\0'};
+
+ if (!dmabuf)
+ return 0;
+
+ if (BPF_CORE_READ_INTO(&inode, dmabuf, file, f_inode, i_ino) ||
+ bpf_core_read(&size, sizeof(size), &dmabuf->size) ||
+ bpf_core_read(&pname, sizeof(pname), &dmabuf->name) ||
+ bpf_core_read(&exporter, sizeof(exporter), &dmabuf->exp_name))
+ return 1;
+
+ /* Buffers are not required to be named */
+ if (pname) {
+ if (bpf_probe_read_kernel(name, sizeof(name), pname))
+ return 1;
+
+ /* Name strings can be provided by userspace */
+ sanitize_string(name, sizeof(name));
+ }
+
+ BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter);
+ return 0;
+}
+
+SEC("syscall")
+int iter_dmabuf_for_each(const void *ctx)
+{
+ struct dma_buf *d;
+
+ bpf_for_each(dmabuf, d) {
+ char name[DMA_BUF_NAME_LEN];
+ const char *pname;
+ bool *found;
+ long len;
+ int i;
+
+ if (bpf_core_read(&pname, sizeof(pname), &d->name))
+ return 1;
+
+ /* Buffers are not required to be named */
+ if (!pname)
+ continue;
+
+ len = bpf_probe_read_kernel_str(name, sizeof(name), pname);
+ if (len < 0)
+ return 1;
+
+ /*
+ * The entire name buffer is used as a map key.
+ * Zeroize any uninitialized trailing bytes after the NUL.
+ */
+ bpf_for(i, len, DMA_BUF_NAME_LEN)
+ name[i] = 0;
+
+ found = bpf_map_lookup_elem(&testbuf_hash, name);
+ if (found) {
+ bool t = true;
+
+ bpf_map_update_elem(&testbuf_hash, name, &t, BPF_EXIST);
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
new file mode 100644
index 000000000000..0bf969a0b5ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops.s/test_2")
+__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops")
+int BPF_PROG(test_unsupported_field_sleepable,
+ struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ /* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = NULL,
+ .test_2 = (void *)test_unsupported_field_sleepable,
+ .test_sleepable = (void *)NULL,
+};
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
new file mode 100644
index 000000000000..ec0c595d47af
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+{
+ int ret;
+
+ /* Check that 'state' nullable status is detected correctly.
+ * If 'state' argument would be assumed non-null by verifier
+ * the code below would be deleted as dead (which it shouldn't).
+ * Hide it from the compiler behind 'asm' block to avoid
+ * unnecessary optimizations.
+ */
+ asm volatile (
+ "if %[state] != 0 goto +2;"
+ "r0 = 0xf2f3f4f5;"
+ "exit;"
+ ::[state]"p"(state));
+
+ ret = state->val;
+ state->val = 0x5a;
+ return ret;
+}
+
+__u64 test_2_args[5];
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+{
+ test_2_args[0] = state->val;
+ test_2_args[1] = a1;
+ test_2_args[2] = a2;
+ test_2_args[3] = a3;
+ test_2_args[4] = a4;
+ return 0;
+}
+
+SEC("struct_ops.s/test_sleepable")
+int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state)
+{
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .test_sleepable = (void *)test_sleepable,
+};
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
new file mode 100644
index 000000000000..dda6a8dada82
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -0,0 +1,1995 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <errno.h>
+#include <string.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/if_ether.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct test_info {
+ int x;
+ struct bpf_dynptr ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct bpf_dynptr);
+} array_map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct test_info);
+} array_map2 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map3 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map4 SEC(".maps");
+
+struct sample {
+ int pid;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+int err, val;
+
+static int get_map_val_dynptr(struct bpf_dynptr *ptr)
+{
+ __u32 key = 0, *map_val;
+
+ bpf_map_update_elem(&array_map3, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map3, &key);
+ if (!map_val)
+ return -ENOENT;
+
+ bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
+
+ return 0;
+}
+
+/* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
+ * bpf_ringbuf_submit/discard_dynptr call
+ */
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=2")
+int ringbuf_missing_release1(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=4")
+int ringbuf_missing_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
+ if (!sample) {
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ return 0;
+ }
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
+
+ return 0;
+}
+
+static int missing_release_callback_fn(__u32 index, void *data)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+/* Any dynptr initialized within a callback must have bpf_dynptr_put called */
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id")
+int ringbuf_missing_release_callback(void *ctx)
+{
+ bpf_loop(10, missing_release_callback_fn, NULL, 0);
+ return 0;
+}
+
+/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int ringbuf_release_uninit_dynptr(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A dynptr can't be used after it has been invalidated */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int use_after_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
+
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
+SEC("?raw_tp")
+__failure __msg("type=mem expected=ringbuf_mem")
+int ringbuf_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ /* invalid API use. need to use dynptr API to submit/discard */
+ bpf_ringbuf_submit(sample, 0);
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't add a dynptr to a map */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int add_dynptr_to_map1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map1, &key, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't add a struct with an embedded dynptr to a map */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int add_dynptr_to_map2(void *ctx)
+{
+ struct test_info x;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map2, &key, &x, 0);
+
+ bpf_ringbuf_submit_dynptr(&x.ptr, 0);
+
+ return 0;
+}
+
+/* A data slice can't be accessed out of bounds */
+SEC("?raw_tp")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+ if (!data)
+ goto done;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + 8);
+
+done:
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be accessed out of bounds */
+SEC("?tc")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ /* this should fail */
+ *(__u8*)(hdr + 1) = 1;
+
+ return SK_PASS;
+}
+
+/* A metadata slice can't be accessed out of bounds */
+SEC("?tc")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_skb_meta(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ /* this should fail */
+ *(md + 1) = 42;
+
+ return SK_PASS;
+}
+
+SEC("?raw_tp")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_map_value(void *ctx)
+{
+ __u32 map_val;
+ struct bpf_dynptr ptr;
+ void *data;
+
+ get_map_val_dynptr(&ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
+ if (!data)
+ return 0;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + (sizeof(map_val) + 1));
+
+ return 0;
+}
+
+/* A data slice can't be used after it has been released */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int data_slice_use_after_release1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ val = sample->pid;
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be used after it has been released.
+ *
+ * This tests the case where the data slice tracks a dynptr (ptr2)
+ * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
+ * ptr2 is at fp - 16).
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int data_slice_use_after_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr2, 0);
+
+ /* this should fail */
+ sample->pid = 23;
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+/* A data slice must be first checked for NULL */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'mem_or_null'")
+int data_slice_missing_null_check1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+
+ /* missing if (!data) check */
+
+ /* this should fail */
+ *(__u8 *)data = 3;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be dereferenced if it wasn't checked for null */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'mem_or_null'")
+int data_slice_missing_null_check2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data1, *data2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ data1 = bpf_dynptr_data(&ptr, 0, 8);
+ data2 = bpf_dynptr_data(&ptr, 0, 8);
+ if (data1)
+ /* this should fail */
+ *data2 = 3;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
+ * dynptr argument
+ */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_helper1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
+
+ return 0;
+}
+
+/* A dynptr can't be passed into a helper function at a non-zero offset */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset=-8")
+int invalid_helper2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
+ return 0;
+}
+
+/* A bpf_dynptr is invalidated if it's been written into */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int invalid_write1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+ __u8 x = 0;
+
+ get_map_val_dynptr(&ptr);
+
+ memcpy(&ptr, &x, sizeof(x));
+
+ /* this should fail */
+ data = bpf_dynptr_data(&ptr, 0, 1);
+ __sink(data);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
+ * offset
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ memcpy((void *)&ptr + 8, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a
+ * non-const offset
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char stack_buf[16];
+ unsigned long len;
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ memcpy(stack_buf, &val, sizeof(val));
+ len = stack_buf[0] & 0xf;
+
+ memcpy((void *)&ptr + len, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int invalid_write4_callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, it should
+ * be invalidated as a dynptr
+ */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int invalid_write4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_write4_callback, &ptr, 0);
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
+struct bpf_dynptr global_dynptr;
+
+SEC("?raw_tp")
+__failure __msg("type=map_value expected=fp")
+int global(void *ctx)
+{
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
+
+ bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
+
+ return 0;
+}
+
+/* A direct read should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ /* this should fail */
+ val = *(int *)&ptr;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset should fail */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset")
+int invalid_read2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset into the lower stack slot should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read3(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
+
+ /* this should fail */
+ memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
+
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+
+ return 0;
+}
+
+static int invalid_read4_callback(__u32 index, void *data)
+{
+ /* this should fail */
+ val = *(__u32 *)data;
+
+ return 0;
+}
+
+/* A direct read within a callback function should fail */
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int invalid_read4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_read4_callback, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Initializing a dynptr on an offset should fail */
+SEC("?raw_tp")
+__failure __msg("cannot pass in dynptr at an offset=0")
+int invalid_offset(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't release a dynptr twice */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int release_twice(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ /* this second release should fail */
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int release_twice_callback_fn(__u32 index, void *data)
+{
+ /* this should fail */
+ bpf_ringbuf_discard_dynptr(data, 0);
+
+ return 0;
+}
+
+/* Test that releasing a dynptr twice, where one of the releases happens
+ * within a callback function, fails
+ */
+SEC("?raw_tp")
+__failure __msg("arg 1 is an unacquired reference")
+int release_twice_callback(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ bpf_loop(10, release_twice_callback_fn, &ptr, 0);
+
+ return 0;
+}
+
+/* Reject unsupported local mem types for dynptr_from_mem API */
+SEC("?raw_tp")
+__failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
+int dynptr_from_mem_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int x = 0;
+
+ /* this should fail */
+ bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto pjmp1; \
+ goto pjmp2; \
+ pjmp1: \
+ *(u64 *)(r10 - 16) = r9; \
+ pjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__success __msg("12: safe") __log_level(2)
+int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 0xeB9F; \
+ r6 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto stjmp1; \
+ goto stjmp2; \
+ stjmp1: \
+ r9 = r9; \
+ stjmp2: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[array_map4] ll; \
+ r7 = %[ringbuf] ll; \
+ r1 = r6; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r10; \
+ r3 += -24; \
+ r9 = 0xeB9FeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ *(u64 *)(r10 - 24) = r9; \
+ r9 = 0; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r6; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto tjmp1; \
+ exit; \
+ tjmp1: \
+ r8 = r0; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ r0 = *(u64 *)(r0 + 0); \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ if r0 == 0 goto tjmp2; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ r8 = r8; \
+ goto tjmp3; \
+ tjmp2: \
+ *(u64 *)(r10 - 8) = r9; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r8; \
+ r1 += 8; \
+ r2 = 0; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ tjmp3: \
+ r1 = r10; \
+ r1 += -16; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(array_map4),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("dynptr has to be at a constant offset") __log_level(2)
+int dynptr_var_off_overwrite(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r9 = 16; \
+ *(u32 *)(r10 - 4) = r9; \
+ r8 = *(u32 *)(r10 - 4); \
+ if r8 >= 0 goto vjmp1; \
+ r0 = 1; \
+ exit; \
+ vjmp1: \
+ if r8 <= 16 goto vjmp2; \
+ r0 = 1; \
+ exit; \
+ vjmp2: \
+ r8 &= 16; \
+ r1 = %[ringbuf] ll; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -32; \
+ r4 += r8; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ r9 = 0xeB9F; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r10; \
+ r1 += -32; \
+ r1 += r8; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm_addr(ringbuf)
+ : __clobber_all
+ );
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
+int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
+{
+ asm volatile (
+ "r6 = %[ringbuf] ll; \
+ r7 = %[array_map4] ll; \
+ r1 = r7; \
+ r2 = r10; \
+ r2 += -8; \
+ r9 = 0; \
+ *(u64 *)(r2 + 0) = r9; \
+ r3 = r2; \
+ r4 = 0; \
+ r8 = r2; \
+ call %[bpf_map_update_elem]; \
+ r1 = r7; \
+ r2 = r8; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto sjmp1; \
+ exit; \
+ sjmp1: \
+ r7 = r0; \
+ r1 = r6; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -24; \
+ call %[bpf_ringbuf_reserve_dynptr]; \
+ *(u64 *)(r10 - 16) = r9; \
+ r1 = r7; \
+ r2 = 8; \
+ r3 = 0; \
+ r4 = r10; \
+ r4 += -16; \
+ call %[bpf_dynptr_from_mem]; \
+ r1 = r10; \
+ r1 += -512; \
+ r2 = 488; \
+ r3 = r10; \
+ r3 += -24; \
+ r4 = 0; \
+ r5 = 0; \
+ call %[bpf_dynptr_read]; \
+ r8 = 1; \
+ if r0 != 0 goto sjmp2; \
+ r8 = 0; \
+ sjmp2: \
+ r1 = r10; \
+ r1 += -24; \
+ r2 = 0; \
+ call %[bpf_ringbuf_discard_dynptr]; "
+ :
+ : __imm(bpf_map_update_elem),
+ __imm(bpf_map_lookup_elem),
+ __imm(bpf_ringbuf_reserve_dynptr),
+ __imm(bpf_ringbuf_discard_dynptr),
+ __imm(bpf_dynptr_from_mem),
+ __imm(bpf_dynptr_read),
+ __imm_addr(ringbuf),
+ __imm_addr(array_map4)
+ : __clobber_all
+ );
+ return 0;
+}
+
+/* Test that it is allowed to overwrite unreferenced dynptr. */
+SEC("?raw_tp")
+__success
+int dynptr_overwrite_unref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ return 0;
+}
+
+/* Test that slices are invalidated on reinitializing a dynptr. */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int dynptr_invalidate_slice_reinit(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ if (!p)
+ return 0;
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+ /* this should fail */
+ return *p;
+}
+
+/* Invalidation of dynptr slices on destruction of dynptr should not miss
+ * mem_or_null pointers.
+ */
+SEC("?raw_tp")
+__failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
+int dynptr_invalidate_slice_or_null(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u8 *p;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ p = bpf_dynptr_data(&ptr, 0, 1);
+ *(__u8 *)&ptr = 0;
+ /* this should fail */
+ bpf_this_cpu_ptr(p);
+ return 0;
+}
+
+/* Destruction of dynptr should also any slices obtained from it */
+SEC("?raw_tp")
+__failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
+int dynptr_invalidate_slice_failure(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 0;
+ if (get_map_val_dynptr(&ptr2))
+ return 0;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 0;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 0;
+
+ *(__u8 *)&ptr1 = 0;
+ /* this should fail */
+ return *p1;
+}
+
+/* Invalidation of slices should be scoped and should not prevent dereferencing
+ * slices of another dynptr after destroying unrelated dynptr
+ */
+SEC("?raw_tp")
+__success
+int dynptr_invalidate_slice_success(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u8 *p1, *p2;
+
+ if (get_map_val_dynptr(&ptr1))
+ return 1;
+ if (get_map_val_dynptr(&ptr2))
+ return 1;
+
+ p1 = bpf_dynptr_data(&ptr1, 0, 1);
+ if (!p1)
+ return 1;
+ p2 = bpf_dynptr_data(&ptr2, 0, 1);
+ if (!p2)
+ return 1;
+
+ *(__u8 *)&ptr1 = 0;
+ return *p2;
+}
+
+/* Overwriting referenced dynptr should be rejected */
+SEC("?raw_tp")
+__failure __msg("cannot overwrite referenced dynptr")
+int dynptr_overwrite_ref(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+ /* this should fail */
+ if (get_map_val_dynptr(&ptr))
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Reject writes to dynptr slot from bpf_dynptr_read */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int dynptr_read_into_slot(void *ctx)
+{
+ union {
+ struct {
+ char _pad[48];
+ struct bpf_dynptr ptr;
+ };
+ char buf[64];
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
+ /* this should fail */
+ bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
+
+ return 0;
+}
+
+/* bpf_dynptr_slice()s are read-only and cannot be written to */
+SEC("?tc")
+__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
+int skb_invalid_slice_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* bpf_dynptr_slice()s are read-only and cannot be written to */
+SEC("?tc")
+__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
+int skb_meta_invalid_slice_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return SK_PASS;
+}
+
+/* The read-write data slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 123;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice3(struct __sk_buff *skb)
+{
+ char write_data[64] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return SK_PASS;
+}
+
+/* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int skb_invalid_data_slice4(struct __sk_buff *skb)
+{
+ char write_data[64] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 123;
+
+ bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return SK_PASS;
+}
+
+/* Read-only skb data slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *d;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ d = bpf_dynptr_slice(&data, 0, NULL, sizeof(*d));
+ if (!d)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *d;
+
+ return SK_PASS;
+}
+
+/* Read-write skb data slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *d;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ d = bpf_dynptr_slice_rdwr(&data, 0, NULL, sizeof(*d));
+ if (!d)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ *d = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated on write to skb data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&data, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated on write to skb data slice */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&data, 0, "x", 1, 0);
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* The read-only data slice is invalidated whenever a helper changes packet data */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int xdp_invalid_data_slice1(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ val = hdr->h_proto;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ val = hdr->h_proto;
+
+ return XDP_PASS;
+}
+
+/* The read-write data slice is invalidated whenever a helper changes packet data */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int xdp_invalid_data_slice2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!hdr)
+ return SK_DROP;
+
+ hdr->h_proto = 9;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ hdr->h_proto = 1;
+
+ return XDP_PASS;
+}
+
+/* Only supported prog type can create skb-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
+int skb_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(ctx, 0, &ptr);
+
+ return 0;
+}
+
+/* Only supported prog type can create skb_meta-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_skb_meta is not allowed")
+int skb_meta_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr meta;
+
+ /* this should fail */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+
+ return 0;
+}
+
+SEC("fentry/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
+SEC("fexit/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fexit, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
+/* Reject writes to dynptr slot for uninit arg */
+SEC("?raw_tp")
+__failure __msg("potential write to dynptr at off=-16")
+int uninit_write_into_slot(void *ctx)
+{
+ struct {
+ char buf[64];
+ struct bpf_dynptr ptr;
+ } data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
+ /* this should fail */
+ bpf_get_current_comm(data.buf, 80);
+
+ return 0;
+}
+
+/* Only supported prog type can create xdp-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
+int xdp_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_xdp(ctx, 0, &ptr);
+
+ return 0;
+}
+
+__u32 hdr_size = sizeof(struct ethhdr);
+/* Can't pass in variable-sized len to bpf_dynptr_slice */
+SEC("?tc")
+__failure __msg("unbounded memory access")
+int dynptr_slice_var_len1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ char buffer[sizeof(*hdr)] = {};
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* this should fail */
+ hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
+ if (!hdr)
+ return SK_DROP;
+
+ return SK_PASS;
+}
+
+/* Can't pass in variable-sized len to bpf_dynptr_slice */
+SEC("?tc")
+__failure __msg("must be a known constant")
+int dynptr_slice_var_len2(struct __sk_buff *skb)
+{
+ char buffer[sizeof(struct ethhdr)] = {};
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ if (hdr_size <= sizeof(buffer)) {
+ /* this should fail */
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
+ if (!hdr)
+ return SK_DROP;
+ hdr->h_proto = 12;
+ }
+
+ return SK_PASS;
+}
+
+static int callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, its data
+ * slices should be invalidated as well.
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int invalid_data_slices(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 *slice;
+
+ if (get_map_val_dynptr(&ptr))
+ return 0;
+
+ slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!slice)
+ return 0;
+
+ bpf_loop(10, callback, &ptr, 0);
+
+ /* this should fail */
+ *slice = 1;
+
+ return 0;
+}
+
+/* Program types that don't allow writes to packet data should fail if
+ * bpf_dynptr_slice_rdwr is called
+ */
+SEC("cgroup_skb/ingress")
+__failure __msg("the prog does not allow writes to packet data")
+int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
+{
+ char buffer[sizeof(struct ethhdr)] = {};
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* this should fail since cgroup_skb doesn't allow
+ * changing packet data
+ */
+ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ __sink(hdr);
+
+ return 0;
+}
+
+/* bpf_dynptr_adjust can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_adjust_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_adjust(&ptr, 1, 2);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_null can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_is_null_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_null(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_is_rdonly_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_rdonly(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_size can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int dynptr_size_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_size(&ptr);
+
+ return 0;
+}
+
+/* Only initialized dynptrs can be cloned */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #0")
+int clone_invalid1(void *ctx)
+{
+ struct bpf_dynptr ptr1 = {};
+ struct bpf_dynptr ptr2;
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &ptr2);
+
+ return 0;
+}
+
+/* Can't overwrite an existing dynptr when cloning */
+SEC("?xdp")
+__failure __msg("cannot overwrite referenced dynptr")
+int clone_invalid2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr clone;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr1);
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its clones */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate1(void *ctx)
+{
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its parent */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its siblings */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #2")
+int clone_invalidate3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its clones
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_data(&clone, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its parent
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate5(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+ data = bpf_dynptr_data(&ptr, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its sibling
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate6(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ data = bpf_dynptr_data(&clone1, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A skb clone's data slices should be invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_data(struct __sk_buff *skb)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ __u32 *data;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A skb clone's metadata slice becomes invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_meta(struct __sk_buff *skb)
+{
+ struct bpf_dynptr clone, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ bpf_dynptr_clone(&meta, &clone);
+ md = bpf_dynptr_slice_rdwr(&clone, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return 0;
+}
+
+/* A xdp clone's data slices should be invalid anytime packet data changes */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_xdp_packet_data(struct xdp_md *xdp)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ __u32 *data;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Buffers that are provided must be sufficiently long */
+SEC("?cgroup_skb/egress")
+__failure __msg("memory, len pair leads to invalid memory access")
+int test_dynptr_skb_small_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char buffer[8] = {};
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
+
+ return !!data;
+}
+
+__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr)
+{
+ long ret = 0;
+ /* Avoid leaving this global function empty to avoid having the compiler
+ * optimize away the call to this global function.
+ */
+ __sink(ret);
+ return ret;
+}
+
+SEC("?raw_tp")
+__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+int test_dynptr_reg_type(void *ctx)
+{
+ struct task_struct *current = NULL;
+ /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a
+ * reg->type that can be passed to a function accepting a
+ * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch
+ * this.
+ */
+ global_call_bpf_dynptr((const struct bpf_dynptr *)current);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
new file mode 100644
index 000000000000..e0d672d93adf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+#define PAGE_SIZE_64K 65536
+
+char _license[] SEC("license") = "GPL";
+
+int pid, err, val;
+
+struct ringbuf_sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_read_write(void *ctx)
+{
+ char write_data[64] = "hello there, world!!";
+ char read_data[64] = {};
+ struct bpf_dynptr ptr;
+ int i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
+
+ /* Write data into the dynptr */
+ err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* Read the data that was written into the dynptr */
+ err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ /* Ensure the data we read matches the data we wrote */
+ for (i = 0; i < sizeof(read_data); i++) {
+ if (read_data[i] != write_data[i]) {
+ err = 1;
+ break;
+ }
+ }
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_data(void *ctx)
+{
+ __u32 key = 0, val = 235, *map_val;
+ struct bpf_dynptr ptr;
+ __u32 map_val_size;
+ void *data;
+
+ map_val_size = sizeof(*map_val);
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map, &key);
+ if (!map_val) {
+ err = 1;
+ return 0;
+ }
+
+ bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
+
+ /* Try getting a data slice that is out of range */
+ data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
+ if (data) {
+ err = 2;
+ return 0;
+ }
+
+ /* Try getting more bytes than available */
+ data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
+ if (data) {
+ err = 3;
+ return 0;
+ }
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!data) {
+ err = 4;
+ return 0;
+ }
+
+ *(__u32 *)data = 999;
+
+ err = bpf_probe_read_kernel(&val, sizeof(val), data);
+ if (err)
+ return 0;
+
+ if (val != *(int *)data)
+ err = 5;
+
+ return 0;
+}
+
+static int ringbuf_callback(__u32 index, void *data)
+{
+ struct ringbuf_sample *sample;
+
+ struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
+
+ sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
+ if (!sample)
+ err = 2;
+ else
+ sample->pid += index;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct ringbuf_sample *sample;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ val = 100;
+
+ /* check that you can reserve a dynamic size reservation */
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample) {
+ err = 1;
+ goto done;
+ }
+
+ sample->pid = 10;
+
+ /* Can pass dynptr to callback functions */
+ bpf_loop(10, ringbuf_callback, &ptr, 0);
+
+ if (sample->pid != 55)
+ err = 2;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_skb_readonly(struct __sk_buff *skb)
+{
+ __u8 write_data[2] = {1, 2};
+ struct bpf_dynptr ptr;
+ int ret;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* since cgroup skbs are read only, writes should fail */
+ ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+ if (ret != -EINVAL) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_data(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This should return NULL. Must use bpf_dynptr_slice API */
+ data = bpf_dynptr_data(&ptr, 0, 1);
+ if (data) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("?tc")
+int test_dynptr_skb_meta_data(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+ int ret;
+
+ err = 1;
+ ret = bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ if (ret)
+ return 1;
+
+ /* This should return NULL. Must use bpf_dynptr_slice API */
+ err = 2;
+ md = bpf_dynptr_data(&meta, 0, sizeof(*md));
+ if (md)
+ return 1;
+
+ err = 0;
+ return 1;
+}
+
+/* Check that skb metadata dynptr ops don't accept any flags. */
+SEC("?tc")
+int test_dynptr_skb_meta_flags(struct __sk_buff *skb)
+{
+ const __u64 INVALID_FLAGS = ~0ULL;
+ struct bpf_dynptr meta;
+ __u8 buf;
+ int ret;
+
+ err = 1;
+ ret = bpf_dynptr_from_skb_meta(skb, INVALID_FLAGS, &meta);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 2;
+ ret = bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ if (ret)
+ return 1;
+
+ err = 3;
+ ret = bpf_dynptr_read(&buf, 0, &meta, 0, INVALID_FLAGS);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 4;
+ ret = bpf_dynptr_write(&meta, 0, &buf, 0, INVALID_FLAGS);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 0;
+ return 1;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 bytes = 64;
+ __u32 off = 10;
+ __u32 trim = 15;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
+ if (err) {
+ err = 1;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes) {
+ err = 2;
+ goto done;
+ }
+
+ /* Advance the dynptr by off */
+ err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
+ if (err) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes - off) {
+ err = 4;
+ goto done;
+ }
+
+ /* Trim the dynptr */
+ err = bpf_dynptr_adjust(&ptr, off, 15);
+ if (err) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that the size was adjusted correctly */
+ if (bpf_dynptr_size(&ptr) != trim - off) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust_err(void *ctx)
+{
+ char write_data[45] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+ __u32 off = 20;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* Check that start can't be greater than end */
+ if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
+ err = 2;
+ goto done;
+ }
+
+ /* Check that start can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
+ err = 3;
+ goto done;
+ }
+
+ /* Check that end can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
+ err = 4;
+ goto done;
+ }
+
+ if (bpf_dynptr_adjust(&ptr, off, size)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that you can't write more bytes than available into the dynptr
+ * after you've adjusted it
+ */
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 6;
+ goto done;
+ }
+
+ /* Check that even after adjusting, submitting/discarding
+ * a ringbuf dynptr works
+ */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_zero_size_dynptr(void *ctx)
+{
+ char write_data = 'x', read_data;
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* After this, the dynptr has a size of 0 */
+ if (bpf_dynptr_adjust(&ptr, size, size)) {
+ err = 2;
+ goto done;
+ }
+
+ /* Test that reading + writing non-zero bytes is not ok */
+ if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 4;
+ goto done;
+ }
+
+ /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
+ if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
+ err = 5;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
+ err = 6;
+ goto done;
+ }
+
+ err = 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_is_null(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u64 size = 4;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ goto exit_early;
+ }
+
+ /* Test that the invalid dynptr is null */
+ if (!bpf_dynptr_is_null(&ptr1)) {
+ err = 2;
+ goto exit_early;
+ }
+
+ /* Get a valid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
+ err = 3;
+ goto exit;
+ }
+
+ /* Test that the valid dynptr is not null */
+ if (bpf_dynptr_is_null(&ptr2)) {
+ err = 4;
+ goto exit;
+ }
+
+exit:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+exit_early:
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_is_rdonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ struct bpf_dynptr ptr3;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ return 0;
+ }
+
+ /* Test that an invalid dynptr is_rdonly returns false */
+ if (bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 2;
+ return 0;
+ }
+
+ /* Get a read-only dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (!bpf_dynptr_is_rdonly(&ptr2)) {
+ err = 4;
+ return 0;
+ }
+
+ /* Get a read-writeable dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (bpf_dynptr_is_rdonly(&ptr3)) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr3, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_clone(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u32 off = 2, size;
+
+ /* Get a dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
+ err = 2;
+ return 0;
+ }
+
+ /* Clone the dynptr */
+ if (bpf_dynptr_clone(&ptr1, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ size = bpf_dynptr_size(&ptr1);
+
+ /* Check that the clone has the same size and rd-only */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 4;
+ return 0;
+ }
+
+ if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 5;
+ return 0;
+ }
+
+ /* Advance and trim the original dynptr */
+ bpf_dynptr_adjust(&ptr1, 5, 5);
+
+ /* Check that only original dynptr was affected, and the clone wasn't */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 6;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_no_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
+
+ return !!data;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_strcmp(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
+ if (data) {
+ bpf_strncmp(data, 10, "foo");
+ return 1;
+ }
+
+ return 1;
+}
+
+SEC("tp_btf/kfree_skb")
+int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location)
+{
+ __u8 write_data[2] = {1, 2};
+ struct bpf_dynptr ptr;
+ int ret;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* since tp_btf skbs are read only, writes should fail */
+ ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+ if (ret != -EINVAL) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
+
+static inline int bpf_memcmp(const char *a, const char *b, u32 size)
+{
+ int i;
+
+ bpf_for(i, 0, size) {
+ if (a[i] != b[i])
+ return a[i] < b[i] ? -1 : 1;
+ }
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_copy(void *ctx)
+{
+ char data[] = "hello there, world!!";
+ char buf[32] = {'\0'};
+ __u32 sz = sizeof(data);
+ struct bpf_dynptr src, dst;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &src);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &dst);
+
+ /* Test basic case of copying contiguous memory backed dynptrs */
+ err = bpf_dynptr_write(&src, 0, data, sz, 0);
+ err = err ?: bpf_dynptr_copy(&dst, 0, &src, 0, sz);
+ err = err ?: bpf_dynptr_read(buf, sz, &dst, 0, 0);
+ err = err ?: bpf_memcmp(data, buf, sz);
+
+ /* Test that offsets are handled correctly */
+ err = err ?: bpf_dynptr_copy(&dst, 3, &src, 5, sz - 5);
+ err = err ?: bpf_dynptr_read(buf, sz - 5, &dst, 3, 0);
+ err = err ?: bpf_memcmp(data + 5, buf, sz - 5);
+
+ bpf_ringbuf_discard_dynptr(&src, 0);
+ bpf_ringbuf_discard_dynptr(&dst, 0);
+ return 0;
+}
+
+SEC("xdp")
+int test_dynptr_copy_xdp(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr_buf, ptr_xdp;
+ char data[] = "qwertyuiopasdfghjkl";
+ char buf[32] = {'\0'};
+ __u32 len = sizeof(data), xdp_data_size;
+ int i, chunks = 200;
+
+ /* ptr_xdp is backed by non-contiguous memory */
+ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ xdp_data_size = bpf_dynptr_size(&ptr_xdp);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf);
+
+ /* Destination dynptr is backed by non-contiguous memory */
+ bpf_for(i, 0, chunks) {
+ err = bpf_dynptr_write(&ptr_buf, i * len, data, len, 0);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks);
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_xdp, i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ /* Source dynptr is backed by non-contiguous memory */
+ __builtin_memset(buf, 0, sizeof(buf));
+ bpf_for(i, 0, chunks) {
+ err = bpf_dynptr_write(&ptr_buf, i * len, buf, len, 0);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks);
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_buf, i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ /* Both source and destination dynptrs are backed by non-contiguous memory */
+ err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1));
+ if (err)
+ goto out;
+
+ bpf_for(i, 0, chunks - 1) {
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = bpf_dynptr_read(&buf, len, &ptr_xdp, 2 + i * len, 0);
+ if (err)
+ goto out;
+ if (bpf_memcmp(data, buf, len) != 0)
+ goto out;
+ }
+
+ if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG)
+ err = 1;
+
+out:
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+ return XDP_DROP;
+}
+
+char memset_zero_data[] = "data to be zeroed";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_zero_data);
+ char zeroes[32] = {'\0'};
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0);
+ err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz);
+
+ return 0;
+}
+
+#define DYNPTR_MEMSET_VAL 42
+
+char memset_notzero_data[] = "data to be overwritten";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_notzero(void *ctx)
+{
+ u32 data_sz = sizeof(memset_notzero_data);
+ struct bpf_dynptr ptr;
+ char expected[32];
+
+ __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz);
+
+ err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL);
+ err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_offset_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_offset(void *ctx)
+{
+ char expected[] = "data to \0\0\0\0eroed partially";
+ __u32 data_sz = sizeof(memset_zero_offset_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_adjusted_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_adjusted(void *ctx)
+{
+ char expected[] = "data\0\0\0\0be zeroed partially";
+ __u32 data_sz = sizeof(memset_zero_adjusted_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_adjust(&ptr, 4, 8);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz);
+
+ return 0;
+}
+
+char memset_overflow_data[] = "memset overflow data";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow_offset(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_memset_readonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* cgroup skbs are read only, memset should fail */
+ ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ if (ret != -EINVAL)
+ err = 1;
+
+ return 0;
+}
+
+#define min_t(type, x, y) ({ \
+ type __x = (x); \
+ type __y = (y); \
+ __x < __y ? __x : __y; })
+
+SEC("xdp")
+int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp)
+{
+ u32 data_sz, chunk_sz, offset = 0;
+ const int max_chunks = 200;
+ struct bpf_dynptr ptr_xdp;
+ char expected_buf[32];
+ char buf[32];
+ int i;
+
+ __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf));
+
+ /* ptr_xdp is backed by non-contiguous memory */
+ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ data_sz = bpf_dynptr_size(&ptr_xdp);
+
+ err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL);
+ if (err) {
+ /* bpf_dynptr_memset() eventually called bpf_xdp_pointer()
+ * where if data_sz is greater than 0xffff, -EFAULT will be
+ * returned. For 64K page size, data_sz is greater than
+ * 64K, so error is expected and let us zero out error and
+ * return success.
+ */
+ if (data_sz >= PAGE_SIZE_64K)
+ err = 0;
+ goto out;
+ }
+
+ bpf_for(i, 0, max_chunks) {
+ offset = i * sizeof(buf);
+ if (offset >= data_sz)
+ goto out;
+ chunk_sz = min_t(u32, sizeof(buf), data_sz - offset);
+ err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0);
+ if (err)
+ goto out;
+ err = bpf_memcmp(buf, expected_buf, sizeof(buf));
+ if (err)
+ goto out;
+ }
+out:
+ return XDP_DROP;
+}
+
+void *user_ptr;
+/* Contains the copy of the data pointed by user_ptr.
+ * Size 384 to make it not fit into a single kernel chunk when copying
+ * but less than the maximum bpf stack size (512).
+ */
+char expected_str[384];
+__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257};
+
+typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr);
+
+/* Returns the offset just before the end of the maximum sized xdp fragment.
+ * Any write larger than 32 bytes will be split between 2 fragments.
+ */
+__u32 xdp_near_frag_end_offset(void)
+{
+ const __u32 headroom = 256;
+ const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info);
+
+ /* 32 bytes before the approximate end of the fragment */
+ return max_frag_size - 32;
+}
+
+/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks
+ * of type bpf_read_dynptr_fn_t to prevent compiler from generating
+ * indirect calls that make program fail to load with "unknown opcode" error.
+ */
+static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ char buf[sizeof(expected_str)];
+ struct bpf_dynptr ptr_buf;
+ int i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr);
+ if (len > sizeof(buf))
+ break;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+
+ if (err || bpf_memcmp(expected_str, buf, len))
+ err = 1;
+
+ /* Reset buffer and dynptr */
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0);
+ }
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_str(void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ char buf[sizeof(expected_str)];
+ struct bpf_dynptr ptr_buf;
+ __u32 cnt, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr);
+ if (cnt != len)
+ err = 1;
+
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+ if (!len)
+ continue;
+ if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+ err = 1;
+ }
+ bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ struct bpf_dynptr ptr_xdp;
+ char buf[sizeof(expected_str)];
+ __u32 off, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ off = xdp_near_frag_end_offset();
+ err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+ if (err || bpf_memcmp(expected_str, buf, len))
+ err = 1;
+ /* Reset buffer and dynptr */
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+ }
+}
+
+static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr,
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+ struct bpf_dynptr ptr_xdp;
+ char buf[sizeof(expected_str)];
+ __u32 cnt, off, i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ off = xdp_near_frag_end_offset();
+ err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ if (err)
+ return;
+
+ bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+ __u32 len = test_len[i];
+
+ cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+ if (cnt != len)
+ err = 1;
+
+ if (len > sizeof(buf))
+ continue;
+ err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+
+ if (!len)
+ continue;
+ if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+ err = 1;
+
+ __builtin_memset(buf, 0, sizeof(buf));
+ err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+ }
+}
+
+SEC("xdp")
+int test_probe_read_user_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr);
+ if (!err)
+ test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr);
+ if (!err)
+ test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_user_str_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr);
+ if (!err)
+ test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr);
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp)
+{
+ test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr);
+ if (!err)
+ test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr);
+ return XDP_PASS;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_dynptr(void *ctx)
+{
+ test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr);
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_str_dynptr(void *ctx)
+{
+ test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr);
+ return 0;
+}
+
+static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_dynptr(void *ctx)
+{
+ test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task);
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_str_dynptr(void *ctx)
+{
+ test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
new file mode 100644
index 000000000000..4b0cd6753251
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/empty_skb.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+int ifindex;
+int ret;
+
+SEC("lwt_xmit")
+int redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("lwt_xmit")
+int redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_ingress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
+ return 0;
+}
+
+SEC("tc")
+int tc_redirect_egress(struct __sk_buff *skb)
+{
+ ret = bpf_clone_redirect(skb, ifindex, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c
new file mode 100644
index 000000000000..35fec7c75bef
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r2 = *(u64 *)(r1 +0)")
+__xlated("3: r3 = 0")
+__xlated("4: r4 = 1")
+__xlated("5: if r2 == 0x0 goto pc+10")
+__xlated("6: r0 = 0")
+__xlated("7: *(u64 *)(r1 +0) = r3")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+/* 2nd part of the main prog after the first exit */
+__xlated("16: *(u64 *)(r1 +0) = r4")
+__xlated("17: r0 = 1")
+/* Clear the r1 to ensure it does not have
+ * off-by-1 error and ensure it jumps back to the
+ * beginning of epilogue which initializes
+ * the r1 with the ctx ptr.
+ */
+__xlated("18: r1 = 0")
+__xlated("19: gotol pc-12")
+SEC("struct_ops/test_epilogue_exit")
+__naked int test_epilogue_exit(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r2 = *(u64 *)(r1 +0);"
+ "r3 = 0;"
+ "r4 = 1;"
+ "if r2 == 0 goto +3;"
+ "r0 = 0;"
+ "*(u64 *)(r1 + 0) = r3;"
+ "exit;"
+ "*(u64 *)(r1 + 0) = r4;"
+ "r0 = 1;"
+ "r1 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_exit = {
+ .test_epilogue = (void *)test_epilogue_exit,
+};
+
+SEC("syscall")
+__retval(20000)
+int syscall_epilogue_exit0(void *ctx)
+{
+ struct st_ops_args args = { .a = 1 };
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(20002)
+int syscall_epilogue_exit1(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
new file mode 100644
index 000000000000..153514691ba4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+SEC("struct_ops/test_epilogue_subprog")
+int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args)
+{
+ subprog(args);
+ return args->a;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __array(values, void (void));
+} epilogue_map SEC(".maps") = {
+ .values = {
+ [0] = (void *)&test_epilogue_subprog,
+ }
+};
+
+SEC("struct_ops/test_epilogue_tailcall")
+int test_epilogue_tailcall(unsigned long long *ctx)
+{
+ bpf_tail_call(ctx, &epilogue_map, 0);
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_tailcall = {
+ .test_epilogue = (void *)test_epilogue_tailcall,
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_subprog = {
+ .test_epilogue = (void *)test_epilogue_subprog,
+};
+
+SEC("syscall")
+int syscall_epilogue_tailcall(struct st_ops_args *args)
+{
+ return bpf_kfunc_st_ops_test_epilogue(args);
+}
diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h
new file mode 100644
index 000000000000..38529779a236
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/err.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ERR_H__
+#define __ERR_H__
+
+#define MAX_ERRNO 4095
+#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO
+
+#define __STR(x) #x
+
+#define set_if_not_errno_or_zero(x, y) \
+({ \
+ asm volatile ("if %0 s< -4095 goto +1\n" \
+ "if %0 s<= 0 goto +1\n" \
+ "%0 = " __STR(y) "\n" \
+ : "+r"(x)); \
+})
+
+static inline int IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static inline long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+#endif /* __ERR_H__ */
diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c
new file mode 100644
index 000000000000..f09cd14d8e04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#ifndef ETH_P_IP
+#define ETH_P_IP 0x0800
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 4);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline int static_func(u64 i)
+{
+ bpf_throw(32);
+ return i;
+}
+
+__noinline int global2static_simple(u64 i)
+{
+ static_func(i + 2);
+ return i - 1;
+}
+
+__noinline int global2static(u64 i)
+{
+ if (i == ETH_P_IP)
+ bpf_throw(16);
+ return static_func(i);
+}
+
+static __noinline int static2global(u64 i)
+{
+ return global2static(i) + i;
+}
+
+SEC("tc")
+int exception_throw_always_1(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+/* In this case, the global func will never be seen executing after call to
+ * static subprog, hence verifier will DCE the remaining instructions. Ensure we
+ * are resilient to that.
+ */
+SEC("tc")
+int exception_throw_always_2(struct __sk_buff *ctx)
+{
+ return global2static_simple(ctx->protocol);
+}
+
+SEC("tc")
+int exception_throw_unwind_1(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol));
+}
+
+SEC("tc")
+int exception_throw_unwind_2(struct __sk_buff *ctx)
+{
+ return static2global(bpf_ntohs(ctx->protocol) - 1);
+}
+
+SEC("tc")
+int exception_throw_default(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 1;
+}
+
+SEC("tc")
+int exception_throw_default_value(struct __sk_buff *ctx)
+{
+ bpf_throw(5);
+ return 1;
+}
+
+SEC("tc")
+int exception_tail_call_target(struct __sk_buff *ctx)
+{
+ bpf_throw(16);
+ return 0;
+}
+
+static __noinline
+int exception_tail_call_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 10;
+
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int exception_tail_call(struct __sk_buff *ctx) {
+ volatile int ret = 0;
+
+ ret = exception_tail_call_subprog(ctx);
+ return ret + 8;
+}
+
+__noinline int exception_ext_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+static __noinline int exception_ext_static(struct __sk_buff *ctx)
+{
+ return exception_ext_global(ctx);
+}
+
+SEC("tc")
+int exception_ext(struct __sk_buff *ctx)
+{
+ return exception_ext_static(ctx);
+}
+
+__noinline int exception_cb_mod_global(u64 cookie)
+{
+ volatile int ret = 0;
+
+ return ret;
+}
+
+/* Example of how the exception callback supplied during verification can still
+ * introduce extensions by calling to dummy global functions, and alter runtime
+ * behavior.
+ *
+ * Right now we don't allow freplace attachment to exception callback itself,
+ * but if the need arises this restriction is technically feasible to relax in
+ * the future.
+ */
+__noinline int exception_cb_mod(u64 cookie)
+{
+ return exception_cb_mod_global(cookie) + cookie + 10;
+}
+
+SEC("tc")
+__exception_cb(exception_cb_mod)
+int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
+{
+ bpf_throw(25);
+ return 0;
+}
+
+__noinline static int subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+__noinline int global_subprog(struct __sk_buff *ctx)
+{
+ return bpf_ktime_get_ns();
+}
+
+__noinline int throwing_global_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->tstamp)
+ bpf_throw(0);
+ return bpf_ktime_get_ns();
+}
+
+SEC("tc")
+int exception_throw_subprog(struct __sk_buff *ctx)
+{
+ switch (ctx->protocol) {
+ case 1:
+ return subprog(ctx);
+ case 2:
+ return global_subprog(ctx);
+ case 3:
+ return throwing_subprog(ctx);
+ case 4:
+ return throwing_global_subprog(ctx);
+ default:
+ break;
+ }
+ bpf_throw(1);
+ return 0;
+}
+
+__noinline int assert_nz_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(cookie != 0);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
+ return 0;
+}
+
+__noinline int assert_neg_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
+ return 0;
+}
+
+__noinline int assert_pos_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
+ return 0;
+}
+
+__noinline int assert_nz_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(cookie != 0, cookie + 100);
+ return 0;
+}
+
+__noinline int assert_zero_gfunc_with(u64 c)
+{
+ volatile u64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_neg_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_pos_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_negeq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
+ return 0;
+}
+
+__noinline int assert_poseq_gfunc_with(s64 c)
+{
+ volatile s64 cookie = c;
+
+ bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
+ return 0;
+}
+
+#define check_assert(name, cookie, tag) \
+SEC("tc") \
+int exception##tag##name(struct __sk_buff *ctx) \
+{ \
+ return name(cookie) + 1; \
+}
+
+check_assert(assert_nz_gfunc, 5, _);
+check_assert(assert_zero_gfunc, 0, _);
+check_assert(assert_neg_gfunc, -100, _);
+check_assert(assert_pos_gfunc, 100, _);
+check_assert(assert_negeq_gfunc, -1, _);
+check_assert(assert_poseq_gfunc, 1, _);
+
+check_assert(assert_nz_gfunc_with, 5, _);
+check_assert(assert_zero_gfunc_with, 0, _);
+check_assert(assert_neg_gfunc_with, -100, _);
+check_assert(assert_pos_gfunc_with, 100, _);
+check_assert(assert_negeq_gfunc_with, -1, _);
+check_assert(assert_poseq_gfunc_with, 1, _);
+
+check_assert(assert_nz_gfunc, 0, _bad_);
+check_assert(assert_zero_gfunc, 5, _bad_);
+check_assert(assert_neg_gfunc, 100, _bad_);
+check_assert(assert_pos_gfunc, -100, _bad_);
+check_assert(assert_negeq_gfunc, 1, _bad_);
+check_assert(assert_poseq_gfunc, -1, _bad_);
+
+check_assert(assert_nz_gfunc_with, 0, _bad_);
+check_assert(assert_zero_gfunc_with, 5, _bad_);
+check_assert(assert_neg_gfunc_with, 100, _bad_);
+check_assert(assert_pos_gfunc_with, -100, _bad_);
+check_assert(assert_negeq_gfunc_with, 1, _bad_);
+check_assert(assert_poseq_gfunc_with, -1, _bad_);
+
+SEC("tc")
+int exception_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, 0, ~0ULL);
+ return 1;
+}
+
+SEC("tc")
+int exception_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, 0, ~0ULL, 10);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range(time, -100, 100);
+ return 1;
+}
+
+SEC("tc")
+int exception_bad_assert_range_with(struct __sk_buff *ctx)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ bpf_assert_range_with(time, -1000, 1000, 10);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
new file mode 100644
index 000000000000..a01c2736890f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <limits.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+#define check_assert(type, op, name, value) \
+ SEC("?tc") \
+ __log_level(2) __failure \
+ int check_assert_##name(void *ctx) \
+ { \
+ type num = bpf_ktime_get_ns(); \
+ bpf_assert(bpf_cmp_unlikely(num, op, value)); \
+ return *(u64 *)num; \
+ }
+
+__msg(": R0=0xffffffff80000000")
+check_assert(s64, ==, eq_int_min, INT_MIN);
+__msg(": R0=0x7fffffff")
+check_assert(s64, ==, eq_int_max, INT_MAX);
+__msg(": R0=0")
+check_assert(s64, ==, eq_zero, 0);
+__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
+check_assert(s64, ==, eq_llong_min, LLONG_MIN);
+__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
+check_assert(s64, ==, eq_llong_max, LLONG_MAX);
+
+__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
+check_assert(s64, <, lt_pos, INT_MAX);
+__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+check_assert(s64, <, lt_zero, 0);
+__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
+check_assert(s64, <, lt_neg, INT_MIN);
+
+__msg(": R0=scalar(id=1,smax=0x7fffffff)")
+check_assert(s64, <=, le_pos, INT_MAX);
+__msg(": R0=scalar(id=1,smax=0)")
+check_assert(s64, <=, le_zero, 0);
+__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
+check_assert(s64, <=, le_neg, INT_MIN);
+
+__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >, gt_pos, INT_MAX);
+__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >, gt_zero, 0);
+__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
+check_assert(s64, >, gt_neg, INT_MIN);
+
+__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >=, ge_pos, INT_MAX);
+__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+check_assert(s64, >=, ge_zero, 0);
+__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
+check_assert(s64, >=, ge_neg, INT_MIN);
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
+int check_assert_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+ bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx() R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
+int check_assert_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 8192);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R0=0 R1=ctx() R2=4096 R10=fp0")
+int check_assert_single_range_s64(struct __sk_buff *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ s64 num;
+
+ _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
+ if (!sk)
+ return 0;
+ num = sk->rx_queue_mapping;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=ctx() R2=4096 R10=fp0")
+int check_assert_single_range_u64(struct __sk_buff *ctx)
+{
+ u64 num = ctx->len;
+
+ bpf_assert_range(num, 4096, 4096);
+ return *((u8 *)ctx + num);
+}
+
+SEC("?tc")
+__log_level(2) __failure
+__msg(": R1=pkt(off=64,r=64) R2=pkt_end() R6=pkt(r=64) R10=fp0")
+int check_assert_generic(struct __sk_buff *ctx)
+{
+ u8 *data_end = (void *)(long)ctx->data_end;
+ u8 *data = (void *)(long)ctx->data;
+
+ bpf_assert(data + 64 <= data_end);
+ return data[128];
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R1 has smin=64 smax=64")
+int check_assert_with_return(void *ctx)
+{
+ bpf_assert_with(!ctx, 64);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_ext.c b/tools/testing/selftests/bpf/progs/exceptions_ext.c
new file mode 100644
index 000000000000..743c05185d9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_ext.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+SEC("?fentry")
+int pfentry(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry")
+int throwing_fentry(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline int exception_cb(u64 cookie)
+{
+ return cookie + 64;
+}
+
+SEC("?freplace")
+int extension(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_exception_cb_extension(u64 cookie)
+{
+ bpf_throw(32);
+ return 0;
+}
+
+SEC("?freplace")
+__exception_cb(exception_cb)
+int throwing_extension(struct __sk_buff *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+SEC("?fexit")
+int pfexit(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fexit")
+int throwing_fexit(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?fmod_ret")
+int pfmod_ret(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fmod_ret")
+int throwing_fmod_ret(void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
new file mode 100644
index 000000000000..8a0fdff89927
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+struct foo {
+ struct bpf_rb_node node;
+};
+
+struct hmap_elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct hmap_elem);
+} hmap SEC(".maps");
+
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root rbtree __contains(foo, node);
+
+__noinline void *exception_cb_bad_ret_type(u64 cookie)
+{
+ return NULL;
+}
+
+__noinline int exception_cb_bad_arg_0(void)
+{
+ return 0;
+}
+
+__noinline int exception_cb_bad_arg_2(int a, int b)
+{
+ return 0;
+}
+
+__noinline int exception_cb_ok_arg_small(int a)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_ret_type)
+__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
+int reject_exception_cb_type_1(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_0)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_2(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_bad_arg_2)
+__failure __msg("exception cb only supports single integer argument")
+int reject_exception_cb_type_3(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb_ok_arg_small)
+__success
+int reject_exception_cb_type_4(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback subprog")
+int reject_async_callback_throw(struct __sk_buff *ctx)
+{
+ struct hmap_elem *elem;
+
+ elem = bpf_map_lookup_elem(&hmap, &(int){0});
+ if (!elem)
+ return 0;
+ return bpf_timer_set_callback(&elem->timer, timer_cb);
+}
+
+__noinline static int subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_spin_lock(&lock);
+ if (ctx->len)
+ bpf_throw(0);
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_lock(void *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_subprog_with_lock(void *ctx)
+{
+ return subprog_lock(ctx);
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int throwing_subprog(struct __sk_buff *ctx)
+{
+ if (ctx->len)
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_subprog_with_rcu_read_lock(void *ctx)
+{
+ bpf_rcu_read_lock();
+ return throwing_subprog(ctx);
+}
+
+static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
+{
+ bpf_throw(0);
+ return true;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_with_rbtree_add_throw(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&rbtree, &f->node, rbless);
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_ref(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int subprog_cb_ref(u32 i, void *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_cb_reference(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ bpf_obj_drop(f);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_with_cb(void *ctx)
+{
+ bpf_loop(5, subprog_cb_ref, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference")
+int reject_with_subprog_reference(void *ctx)
+{
+ return subprog_ref(ctx) + 1;
+}
+
+__noinline int throwing_exception_cb(u64 c)
+{
+ bpf_throw(0);
+ return c;
+}
+
+__noinline int exception_cb1(u64 c)
+{
+ return c;
+}
+
+__noinline int exception_cb2(u64 c)
+{
+ return c;
+}
+
+static __noinline int static_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+__noinline int global_func(struct __sk_buff *ctx)
+{
+ return exception_cb1(ctx->tstamp);
+}
+
+SEC("?tc")
+__exception_cb(throwing_exception_cb)
+__failure __msg("cannot be called from callback subprog")
+int reject_throwing_exception_cb(struct __sk_buff *ctx)
+{
+ return 0;
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
+{
+ return global_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__failure __msg("cannot call exception cb directly")
+int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
+{
+ return static_func(ctx);
+}
+
+SEC("?tc")
+__exception_cb(exception_cb1)
+__exception_cb(exception_cb2)
+__failure __msg("multiple exception callback tags for main subprog")
+int reject_multiple_exception_cb(struct __sk_buff *ctx)
+{
+ bpf_throw(0);
+ return 16;
+}
+
+__noinline int exception_cb_bad_ret(u64 c)
+{
+ return c;
+}
+
+SEC("?fentry/bpf_check")
+__exception_cb(exception_cb_bad_ret)
+__failure __msg("At program exit the register R0 has unknown scalar value should")
+int reject_set_exception_cb_bad_ret1(void *ctx)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_check")
+__failure __msg("At program exit the register R1 has smin=64 smax=64 should")
+int reject_set_exception_cb_bad_ret2(void *ctx)
+{
+ bpf_throw(64);
+ return 0;
+}
+
+__noinline static int loop_cb1(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+__noinline static int loop_cb2(u32 index, int *ctx)
+{
+ bpf_throw(0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb(struct __sk_buff *ctx)
+{
+ bpf_loop(5, loop_cb1, NULL, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot be called from callback")
+int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
+{
+ if (ctx->protocol)
+ bpf_loop(5, loop_cb1, NULL, 0);
+ else
+ bpf_loop(5, loop_cb2, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
new file mode 100644
index 000000000000..20d009e2d266
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned int exception_triggered;
+int test_pid;
+
+/* TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+ struct callback_head *work;
+ void *func;
+
+ if (test_pid != pid)
+ return 0;
+
+ /* To verify we hit an exception we dereference task->task_works->func.
+ * If task work has been added,
+ * - task->task_works is non-NULL; and
+ * - task->task_works->func is non-NULL also (the callback function
+ * must be specified for the task work.
+ *
+ * However, for a newly-created task, task->task_works is NULLed,
+ * so we know the exception handler triggered if task_works is
+ * NULL and func is NULL.
+ */
+ work = task->task_works;
+ func = work->func;
+ /* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
+ * To workaround the issue, use barrier_var() and rewrite as below to
+ * prevent compiler from generating verifier-unfriendly code.
+ */
+ barrier_var(work);
+ if (work)
+ return 0;
+ barrier_var(func);
+ if (func)
+ return 0;
+ exception_triggered++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fd_htab_lookup.c b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c
new file mode 100644
index 000000000000..a4a9e1db626f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct inner_map_type);
+} outer_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
diff --git a/tools/testing/selftests/bpf/progs/fentry_many_args.c b/tools/testing/selftests/bpf/progs/fentry_many_args.c
new file mode 100644
index 000000000000..b61bb92fee2c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_many_args.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Tencent */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_testmod_fentry_test7")
+int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
+ int g)
+{
+ test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fentry/bpf_testmod_fentry_test11")
+int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, unsigned int h, long i, __u64 j, unsigned long k)
+{
+ test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fentry/bpf_testmod_fentry_test11")
+int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
+ __u64 g, __u64 h, __u64 i, __u64 j, __u64 k)
+{
+ test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
+ e == 20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_recursive.c b/tools/testing/selftests/bpf/progs/fentry_recursive.c
new file mode 100644
index 000000000000..2c9fb5ac42b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_recursive.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Red Hat, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Dummy fentry bpf prog for testing fentry attachment chains */
+SEC("fentry/XXX")
+int BPF_PROG(recursive_attach, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_recursive_target.c b/tools/testing/selftests/bpf/progs/fentry_recursive_target.c
new file mode 100644
index 000000000000..267c876d0aba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fentry_recursive_target.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Red Hat, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Dummy fentry bpf prog for testing fentry attachment chains. It's going to be
+ * a start of the chain.
+ */
+SEC("fentry/bpf_testmod_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ return 0;
+}
+
+/* Dummy bpf prog for testing attach_btf presence when attaching an fentry
+ * program.
+ */
+SEC("raw_tp/sys_enter")
+int BPF_PROG(fentry_target, struct pt_regs *regs, long id)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
index 5f645fdaba6f..52a550d281d9 100644
--- a/tools/testing/selftests/bpf/progs/fentry_test.c
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -64,7 +64,7 @@ __u64 test7_result = 0;
SEC("fentry/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index 49a84a3a2306..983b7c233382 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -73,10 +73,10 @@ int test_subprog2(struct args_subprog2 *ctx)
__builtin_preserve_access_index(&skb->len));
ret = ctx->ret;
- /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
- * which randomizes upper 32 bits after BPF_ALU32 insns.
- * Hence after 'w0 <<= 1' upper bits of $rax are random.
- * That is expected and correct. Trim them.
+ /* bpf_prog_test_load() loads "test_pkt_access.bpf.o" with
+ * BPF_F_TEST_RND_HI32 which randomizes upper 32 bits after BPF_ALU32
+ * insns. Hence after 'w0 <<= 1' upper bits of $rax are random. That is
+ * expected and correct. Trim them.
*/
ret = (__u32) ret;
if (len != 74 || ret != 148)
@@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var)
void *data = (void *)(long)skb->data;
struct ipv6hdr ip6, *ip6p;
int ifindex = skb->ifindex;
- __u32 eth_proto;
- __u32 nh_off;
/* check that BPF extension can read packet via direct packet access */
if (data + 14 + sizeof(ip6) > data_end)
diff --git a/tools/testing/selftests/bpf/progs/fexit_many_args.c b/tools/testing/selftests/bpf/progs/fexit_many_args.c
new file mode 100644
index 000000000000..53b335c2dafb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_many_args.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Tencent */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fexit/bpf_testmod_fentry_test7")
+int BPF_PROG(test1, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ test1_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && ret == 133;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_testmod_fentry_test11")
+int BPF_PROG(test2, __u64 a, void *b, short c, int d, void *e, char f,
+ int g, unsigned int h, long i, __u64 j, unsigned long k,
+ int ret)
+{
+ test2_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
+ e == (void *)20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26 && ret == 231;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fexit/bpf_testmod_fentry_test11")
+int BPF_PROG(test3, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f,
+ __u64 g, __u64 h, __u64 i, __u64 j, __u64 k, __u64 ret)
+{
+ test3_result = a == 16 && b == 17 && c == 18 && d == 19 &&
+ e == 20 && f == 21 && g == 22 && h == 23 &&
+ i == 24 && j == 25 && k == 26 && ret == 231;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c
new file mode 100644
index 000000000000..106dc75efcc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fexit_sleep.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char LICENSE[] SEC("license") = "GPL";
+
+int pid = 0;
+int fentry_cnt = 0;
+int fexit_cnt = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int nanosleep_fentry(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ fentry_cnt++;
+ return 0;
+}
+
+SEC("fexit/" SYS_PREFIX "sys_nanosleep")
+int nanosleep_fexit(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ fexit_cnt++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
index 0952affb22a6..8f1ccb7302e1 100644
--- a/tools/testing/selftests/bpf/progs/fexit_test.c
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -65,7 +65,7 @@ __u64 test7_result = 0;
SEC("fexit/bpf_fentry_test7")
int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
{
- if (arg == 0)
+ if (!arg)
test7_result = 1;
return 0;
}
@@ -74,7 +74,7 @@ __u64 test8_result = 0;
SEC("fexit/bpf_fentry_test8")
int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
{
- if (arg->a == 0)
+ if (!arg->a)
test8_result = 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/fib_lookup.c b/tools/testing/selftests/bpf/progs/fib_lookup.c
new file mode 100644
index 000000000000..7b5dd2214ff4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fib_lookup.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_fib_lookup fib_params = {};
+int fib_lookup_ret = 0;
+int lookup_flags = 0;
+
+SEC("tc")
+int fib_lookup(struct __sk_buff *skb)
+{
+ fib_lookup_ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params),
+ lookup_flags);
+
+ return TC_ACT_SHOT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/file_reader.c b/tools/testing/selftests/bpf/progs/file_reader.c
new file mode 100644
index 000000000000..4d756b623557
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/file_reader.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+struct elem {
+ struct file *file;
+ struct bpf_task_work tw;
+};
+
+char user_buf[256000];
+char tmp_buf[256000];
+
+int pid = 0;
+int err, run_success = 0;
+
+static int validate_file_read(struct file *file);
+static int task_work_callback(struct bpf_map *map, void *key, void *value);
+
+SEC("lsm/file_open")
+int on_open_expect_fault(void *c)
+{
+ struct bpf_dynptr dynptr;
+ struct file *file;
+ int local_err = 1;
+ __u32 user_buf_sz = sizeof(user_buf);
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ file = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!file)
+ return 0;
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto out;
+
+ local_err = bpf_dynptr_read(tmp_buf, user_buf_sz, &dynptr, user_buf_sz, 0);
+ if (local_err == -EFAULT) { /* Expect page fault */
+ local_err = 0;
+ run_success = 1;
+ }
+out:
+ bpf_dynptr_file_discard(&dynptr);
+ if (local_err)
+ err = local_err;
+ bpf_put_file(file);
+ return 0;
+}
+
+SEC("lsm/file_open")
+int on_open_validate_file_read(void *c)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct elem *work;
+ int key = 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work) {
+ err = 1;
+ return 0;
+ }
+ bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, task_work_callback, NULL);
+ return 0;
+}
+
+/* Called in a sleepable context, read 256K bytes, cross check with user space read data */
+static int task_work_callback(struct bpf_map *map, void *key, void *value)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+
+ if (!file)
+ return 0;
+
+ err = validate_file_read(file);
+ if (!err)
+ run_success = 1;
+ bpf_put_file(file);
+ return 0;
+}
+
+static int verify_dynptr_read(struct bpf_dynptr *ptr, u32 off, char *user_buf, u32 len)
+{
+ int i;
+
+ if (bpf_dynptr_read(tmp_buf, len, ptr, off, 0))
+ return 1;
+
+ /* Verify file contents read from BPF is the same as the one read from userspace */
+ bpf_for(i, 0, len)
+ {
+ if (tmp_buf[i] != user_buf[i])
+ return 1;
+ }
+ return 0;
+}
+
+static int validate_file_read(struct file *file)
+{
+ struct bpf_dynptr dynptr;
+ int loc_err = 1, off;
+ __u32 user_buf_sz = sizeof(user_buf);
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto cleanup;
+
+ loc_err = verify_dynptr_read(&dynptr, 0, user_buf, user_buf_sz);
+ off = 1;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, user_buf_sz - off);
+ off = user_buf_sz - 1;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, user_buf_sz - off);
+ /* Read file with random offset and length */
+ off = 4097;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, 100);
+
+ /* Adjust dynptr, verify read */
+ loc_err = loc_err ?: bpf_dynptr_adjust(&dynptr, off, off + 1);
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 0, user_buf + off, 1);
+ /* Can't read more than 1 byte */
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 0, user_buf + off, 2) == 0;
+ /* Can't read with far offset */
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 1, user_buf + off, 1) == 0;
+
+cleanup:
+ bpf_dynptr_file_discard(&dynptr);
+ return loc_err;
+}
diff --git a/tools/testing/selftests/bpf/progs/file_reader_fail.c b/tools/testing/selftests/bpf/progs/file_reader_fail.c
new file mode 100644
index 000000000000..32fe28ed2439
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/file_reader_fail.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err;
+void *user_ptr;
+
+SEC("lsm/file_open")
+__failure
+__msg("Unreleased reference id=")
+int on_nanosleep_unreleased_ref(void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+ struct bpf_dynptr dynptr;
+
+ if (!file)
+ return 0;
+
+ err = bpf_dynptr_from_file(file, 0, &dynptr);
+ return err ? 1 : 0;
+}
+
+SEC("xdp")
+__failure
+__msg("Expected a dynptr of type file as arg #0")
+int xdp_wrong_dynptr_type(struct xdp_md *xdp)
+{
+ struct bpf_dynptr dynptr;
+
+ bpf_dynptr_from_xdp(xdp, 0, &dynptr);
+ bpf_dynptr_file_discard(&dynptr);
+ return 0;
+}
+
+SEC("xdp")
+__failure
+__msg("Expected an initialized dynptr as arg #0")
+int xdp_no_dynptr_type(struct xdp_md *xdp)
+{
+ struct bpf_dynptr dynptr;
+
+ bpf_dynptr_file_discard(&dynptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c
new file mode 100644
index 000000000000..02b82774469c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+#define VM_EXEC 0x00000004
+#define DNAME_INLINE_LEN 32
+
+pid_t target_pid = 0;
+char d_iname[DNAME_INLINE_LEN] = {0};
+__u32 found_vm_exec = 0;
+__u64 addr = 0;
+int find_zero_ret = -1;
+int find_addr_ret = -1;
+
+static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ if (vma->vm_file)
+ bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
+ vma->vm_file->f_path.dentry->d_shortname.string);
+
+ /* check for VM_EXEC */
+ if (vma->vm_flags & VM_EXEC)
+ found_vm_exec = 1;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* this should return -ENOENT */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
+
+SEC("perf_event")
+int handle_pe(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* In NMI, this should return -EBUSY, as the previous call is using
+ * the irq_work.
+ */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
new file mode 100644
index 000000000000..7ba9a428f228
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#define vm_flags vm_start
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to vma, which is illegal */
+ vma->vm_start = 0xffffffffff600000;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail2.c b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
new file mode 100644
index 000000000000..9bcf3203e26b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_task(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to task, which is illegal */
+ task->mm = NULL;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_task, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
new file mode 100644
index 000000000000..52f6995ff29c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_array_map_elem.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} arraymap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ int output;
+};
+
+const volatile int bypass_unused = 1;
+
+static __u64
+unused_subprog(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output = 0;
+ return 1;
+}
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output += *val;
+ if (*key == 1)
+ return 1; /* stop the iteration */
+ return 0;
+}
+
+__u32 cpu = 0;
+__u64 percpu_val = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ cpu = bpf_get_smp_processor_id();
+ percpu_val = *val;
+ return 0;
+}
+
+u32 arraymap_output = 0;
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.output = 0;
+ bpf_for_each_map_elem(&arraymap, check_array_elem, &data, 0);
+ if (!bypass_unused)
+ bpf_for_each_map_elem(&arraymap, unused_subprog, &data, 0);
+ arraymap_output = data.output;
+
+ bpf_for_each_map_elem(&percpu_map, check_percpu_elem, (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
new file mode 100644
index 000000000000..276994d5c0c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_map SEC(".maps");
+
+struct callback_ctx {
+ struct __sk_buff *ctx;
+ int input;
+ int output;
+};
+
+static __u64
+check_hash_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ struct __sk_buff *skb = data->ctx;
+ __u32 k;
+ __u64 v;
+
+ if (skb) {
+ k = *key;
+ v = *val;
+ if (skb->len == 10000 && k == 10 && v == 10)
+ data->output = 3; /* impossible path */
+ else
+ data->output = 4;
+ } else {
+ data->output = data->input;
+ bpf_map_delete_elem(map, key);
+ }
+
+ return 0;
+}
+
+__u32 cpu = 0;
+__u32 percpu_called = 0;
+__u32 percpu_key = 0;
+__u64 percpu_val = 0;
+int percpu_output = 0;
+
+static __u64
+check_percpu_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *unused)
+{
+ struct callback_ctx data;
+
+ percpu_called++;
+ cpu = bpf_get_smp_processor_id();
+ percpu_key = *key;
+ percpu_val = *val;
+
+ data.ctx = 0;
+ data.input = 100;
+ data.output = 0;
+ bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ percpu_output = data.output;
+
+ return 0;
+}
+
+int hashmap_output = 0;
+int hashmap_elems = 0;
+int percpu_map_elems = 0;
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.ctx = skb;
+ data.input = 10;
+ data.output = 0;
+ hashmap_elems = bpf_for_each_map_elem(&hashmap, check_hash_elem, &data, 0);
+ hashmap_output = data.output;
+
+ percpu_map_elems = bpf_for_each_map_elem(&percpu_map, check_percpu_elem,
+ (void *)0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_hash_modify.c b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c
new file mode 100644
index 000000000000..82307166f789
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_hash_modify.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Intel Corporation */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, __u64);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+static int cb(struct bpf_map *map, __u64 *key, __u64 *val, void *arg)
+{
+ bpf_map_delete_elem(map, key);
+ bpf_map_update_elem(map, key, val, 0);
+ return 0;
+}
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ (void)skb;
+
+ bpf_for_each_map_elem(&hashmap, cb, NULL, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
new file mode 100644
index 000000000000..8e545865ea33
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map SEC(".maps");
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ void *data)
+{
+ bpf_get_current_comm(key, sizeof(*key));
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int test_map_key_write(const void *ctx)
+{
+ bpf_for_each_map_elem(&array_map, check_array_elem, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/for_each_multi_maps.c b/tools/testing/selftests/bpf/progs/for_each_multi_maps.c
new file mode 100644
index 000000000000..ff0bed7d4459
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_multi_maps.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 3);
+ __type(key, __u32);
+ __type(value, __u64);
+} arraymap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __u64);
+} hashmap SEC(".maps");
+
+struct callback_ctx {
+ int output;
+};
+
+u32 data_output = 0;
+int use_array = 0;
+
+static __u64
+check_map_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ struct callback_ctx *data)
+{
+ data->output += *val;
+ return 0;
+}
+
+SEC("tc")
+int test_pkt_access(struct __sk_buff *skb)
+{
+ struct callback_ctx data;
+
+ data.output = 0;
+ if (use_array)
+ bpf_for_each_map_elem(&arraymap, check_map_elem, &data, 0);
+ else
+ bpf_for_each_map_elem(&hashmap, check_map_elem, &data, 0);
+ data_output = data.output;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/free_timer.c b/tools/testing/selftests/bpf/progs/free_timer.c
new file mode 100644
index 000000000000..4501ae8fc414
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/free_timer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ENTRIES 8
+
+struct map_value {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, MAX_ENTRIES);
+} map SEC(".maps");
+
+static int timer_cb(void *map, void *key, struct map_value *value)
+{
+ volatile int sum = 0;
+ int i;
+
+ bpf_for(i, 0, 1024 * 1024) sum += i;
+
+ return 0;
+}
+
+static int start_cb(int key)
+{
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&map, (void *)&key);
+ if (!value)
+ return 0;
+
+ bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&value->timer, timer_cb);
+ /* Hope 100us will be enough to wake-up and run the overwrite thread */
+ bpf_timer_start(&value->timer, 100000, BPF_F_TIMER_CPU_PIN);
+
+ return 0;
+}
+
+static int overwrite_cb(int key)
+{
+ struct map_value zero = {};
+
+ /* Free the timer which may run on other CPU */
+ bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY);
+
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(start_timer)
+{
+ bpf_loop(MAX_ENTRIES, start_cb, NULL, 0);
+ return 0;
+}
+
+SEC("syscall")
+int BPF_PROG(overwrite_timer)
+{
+ bpf_loop(MAX_ENTRIES, overwrite_cb, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
index bb2a77c5b62b..370a0e1922e0 100644
--- a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
@@ -23,7 +23,7 @@ struct {
SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx)
{
- struct hmap_elem zero = {}, *val;
+ struct hmap_elem *val;
int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
index 68a5a9db928a..7e94412d47a5 100644
--- a/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
@@ -7,12 +7,12 @@
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
-struct bpf_map_def SEC("maps") sock_map = {
- .type = BPF_MAP_TYPE_SOCKMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 2);
+} sock_map SEC(".maps");
SEC("freplace/cls_redirect")
int freplace_cls_redirect_test(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
index 544e5ac90461..d09bbd8ae8a8 100644
--- a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
+++ b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
@@ -12,7 +12,7 @@
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
- // return value thats in invalid range
+ // return value that's in invalid range
return 255;
}
diff --git a/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c b/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c
new file mode 100644
index 000000000000..e6a75f86cac6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_dead_global_func.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("freplace")
+int freplace_prog(void)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c
new file mode 100644
index 000000000000..96cb61a6ce87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_global_func.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline
+int test_ctx_global_func(struct __sk_buff *skb)
+{
+ volatile int retval = 1;
+ return retval;
+}
+
+SEC("freplace/test_pkt_access")
+int new_test_pkt_access(struct __sk_buff *skb)
+{
+ return test_ctx_global_func(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_progmap.c b/tools/testing/selftests/bpf/progs/freplace_progmap.c
new file mode 100644
index 000000000000..81b56b9aa7d6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_progmap.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __type(key, __u32);
+ __type(value, struct bpf_cpumap_val);
+ __uint(max_entries, 1);
+} cpu_map SEC(".maps");
+
+SEC("xdp/cpumap")
+int xdp_drop_prog(struct xdp_md *ctx)
+{
+ return XDP_DROP;
+}
+
+SEC("freplace")
+int xdp_cpumap_prog(struct xdp_md *ctx)
+{
+ return bpf_redirect_map(&cpu_map, 0, XDP_PASS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c
new file mode 100644
index 000000000000..624078abf3de
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+SEC("freplace/btf_unreliable_kprobe")
+/* context type is what BPF verifier expects for kprobe context, but target
+ * program has `stuct whatever *ctx` argument, so freplace operation will be
+ * rejected with the following message:
+ *
+ * arg0 replace_btf_unreliable_kprobe(struct pt_regs *) doesn't match btf_unreliable_kprobe(struct whatever *)
+ */
+int replace_btf_unreliable_kprobe(bpf_user_pt_regs_t *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c
new file mode 100644
index 000000000000..511ac634eef0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_hits = 0;
+__u64 address_low = 0;
+__u64 address_high = 0;
+int wasted_entries = 0;
+long total_entries = 0;
+
+#define ENTRY_CNT 32
+struct perf_branch_entry entries[ENTRY_CNT] = {};
+
+static inline bool gbs_in_range(__u64 val)
+{
+ return (val >= address_low) && (val < address_high);
+}
+
+SEC("fexit/bpf_testmod_loop_test")
+int BPF_PROG(test1, int n, int ret)
+{
+ long i;
+
+ total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
+ total_entries /= sizeof(struct perf_branch_entry);
+
+ for (i = 0; i < ENTRY_CNT; i++) {
+ if (i >= total_entries)
+ break;
+ if (gbs_in_range(entries[i].from) && gbs_in_range(entries[i].to))
+ test1_hits++;
+ else if (!test1_hits)
+ wasted_entries++;
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 6b42db2fe391..30fd504856c7 100644
--- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
@@ -4,37 +4,18 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u64);
-} cg_ids SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u32);
-} pidmap SEC(".maps");
+__u64 cg_id;
+__u64 expected_pid;
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx)
{
__u32 pid = bpf_get_current_pid_tgid();
- __u32 key = 0, *expected_pid;
- __u64 *val;
-
- expected_pid = bpf_map_lookup_elem(&pidmap, &key);
- if (!expected_pid || *expected_pid != pid)
- return 0;
- val = bpf_map_lookup_elem(&cg_ids, &key);
- if (val)
- *val = bpf_get_current_cgroup_id();
+ if (expected_pid == pid)
+ cg_id = bpf_get_current_cgroup_id();
return 0;
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/get_func_args_test.c b/tools/testing/selftests/bpf/progs/get_func_args_test.c
new file mode 100644
index 000000000000..e0f34a55e697
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_args_test.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test1_result = cnt == 1;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+
+ /* We need to cast access to traced function argument values with
+ * proper type cast, because trampoline uses type specific instruction
+ * to save it, like for 'int a' with 32-bit mov like:
+ *
+ * mov %edi,-0x8(%rbp)
+ *
+ * so the upper 4 bytes are not zeroed.
+ */
+ test1_result &= err == 0 && ((int) a == 1);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 1, &z);
+ test1_result &= err == -EINVAL;
+
+ /* return value fails in fentry */
+ err = bpf_get_func_ret(ctx, &ret);
+ test1_result &= err == -EOPNOTSUPP;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_fentry_test2")
+int BPF_PROG(test2)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test2_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test2_result &= err == 0 && (int) a == 2;
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test2_result &= err == 0 && b == 3;
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test2_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test2_result &= err == 0 && ret == 5;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test3_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test3_result &= err == 0 && ((int) a == 1);
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test3_result &= err == 0 && ((int *) b == _b);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test3_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test3_result &= err == 0 && ret == 0;
+
+ /* change return value, it's checked in fexit_test program */
+ return 1234;
+}
+
+__u64 test4_result = 0;
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int _a, int *_b, int _ret)
+{
+ __u64 cnt = bpf_get_func_arg_cnt(ctx);
+ __u64 a = 0, b = 0, z = 0, ret = 0;
+ __s64 err;
+
+ test4_result = cnt == 2;
+
+ /* valid arguments */
+ err = bpf_get_func_arg(ctx, 0, &a);
+ test4_result &= err == 0 && ((int) a == 1);
+
+ err = bpf_get_func_arg(ctx, 1, &b);
+ test4_result &= err == 0 && ((int *) b == _b);
+
+ /* not valid argument */
+ err = bpf_get_func_arg(ctx, 2, &z);
+ test4_result &= err == -EINVAL;
+
+ /* return value */
+ err = bpf_get_func_ret(ctx, &ret);
+ test4_result &= err == 0 && ret == 1234;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
new file mode 100644
index 000000000000..2011cacdeb18
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern int bpf_fentry_test1(int a) __ksym;
+extern int bpf_modify_return_test(int a, int *b) __ksym;
+
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+
+extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+
+/* This function is here to have CONFIG_X86_KERNEL_IBT
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_X86_KERNEL_IBT ? 0 : 1;
+}
+
+__u64 test1_result = 0;
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test1, int a)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test1_result = (const void *) addr == &bpf_fentry_test1;
+ return 0;
+}
+
+__u64 test2_result = 0;
+SEC("fexit/bpf_fentry_test2")
+int BPF_PROG(test2, int a)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test2_result = (const void *) addr == &bpf_fentry_test2;
+ return 0;
+}
+
+__u64 test3_result = 0;
+SEC("kprobe/bpf_fentry_test3")
+int test3(struct pt_regs *ctx)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test3_result = (const void *) addr == &bpf_fentry_test3;
+ return 0;
+}
+
+__u64 test4_result = 0;
+SEC("kretprobe/bpf_fentry_test4")
+int BPF_KRETPROBE(test4)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test4_result = (const void *) addr == &bpf_fentry_test4;
+ return 0;
+}
+
+__u64 test5_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(test5, int a, int *b, int ret)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test5_result = (const void *) addr == &bpf_modify_return_test;
+ return ret;
+}
+
+__u64 test6_result = 0;
+SEC("?kprobe")
+int test6(struct pt_regs *ctx)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test6_result = (const void *) addr == 0;
+ return 0;
+}
+
+unsigned long uprobe_trigger;
+
+__u64 test7_result = 0;
+SEC("uprobe//proc/self/exe:uprobe_trigger")
+int BPF_UPROBE(test7)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test7_result = (const void *) addr == (const void *) uprobe_trigger;
+ return 0;
+}
+
+__u64 test8_result = 0;
+SEC("uretprobe//proc/self/exe:uprobe_trigger")
+int BPF_URETPROBE(test8, int ret)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test8_result = (const void *) addr == (const void *) uprobe_trigger;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c
new file mode 100644
index 000000000000..052f8a4345a8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/get_func_ip_uprobe_test.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned long uprobe_trigger_body;
+
+__u64 test1_result = 0;
+SEC("uprobe//proc/self/exe:uprobe_trigger_body+1")
+int BPF_UPROBE(test1)
+{
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ test1_result = (const void *) addr == (const void *) uprobe_trigger_body + 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/getpeername4_prog.c b/tools/testing/selftests/bpf/progs/getpeername4_prog.c
new file mode 100644
index 000000000000..4c97208cd25d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername4_prog.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP4 0xc0a801fe // 192.168.1.254
+#define REWRITE_ADDRESS_PORT4 4040
+
+SEC("cgroup/getpeername4")
+int getpeername_v4_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip4 = bpf_htonl(REWRITE_ADDRESS_IP4);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT4);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getpeername6_prog.c b/tools/testing/selftests/bpf/progs/getpeername6_prog.c
new file mode 100644
index 000000000000..070e4d7f636c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername6_prog.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP6_0 0xfaceb00c
+#define REWRITE_ADDRESS_IP6_1 0x12345678
+#define REWRITE_ADDRESS_IP6_2 0x00000000
+#define REWRITE_ADDRESS_IP6_3 0x0000abcd
+
+#define REWRITE_ADDRESS_PORT6 6060
+
+SEC("cgroup/getpeername6")
+int getpeername_v6_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip6[0] = bpf_htonl(REWRITE_ADDRESS_IP6_0);
+ ctx->user_ip6[1] = bpf_htonl(REWRITE_ADDRESS_IP6_1);
+ ctx->user_ip6[2] = bpf_htonl(REWRITE_ADDRESS_IP6_2);
+ ctx->user_ip6[3] = bpf_htonl(REWRITE_ADDRESS_IP6_3);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT6);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
new file mode 100644
index 000000000000..5a76754f846b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getpeername_unix")
+int getpeername_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname4_prog.c b/tools/testing/selftests/bpf/progs/getsockname4_prog.c
new file mode 100644
index 000000000000..e298487c6347
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname4_prog.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP4 0xc0a801fe // 192.168.1.254
+#define REWRITE_ADDRESS_PORT4 4040
+
+SEC("cgroup/getsockname4")
+int getsockname_v4_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip4 = bpf_htonl(REWRITE_ADDRESS_IP4);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT4);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname6_prog.c b/tools/testing/selftests/bpf/progs/getsockname6_prog.c
new file mode 100644
index 000000000000..811d10cd5525
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname6_prog.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+#define REWRITE_ADDRESS_IP6_0 0xfaceb00c
+#define REWRITE_ADDRESS_IP6_1 0x12345678
+#define REWRITE_ADDRESS_IP6_2 0x00000000
+#define REWRITE_ADDRESS_IP6_3 0x0000abcd
+
+#define REWRITE_ADDRESS_PORT6 6060
+
+SEC("cgroup/getsockname6")
+int getsockname_v6_prog(struct bpf_sock_addr *ctx)
+{
+ ctx->user_ip6[0] = bpf_htonl(REWRITE_ADDRESS_IP6_0);
+ ctx->user_ip6[1] = bpf_htonl(REWRITE_ADDRESS_IP6_1);
+ ctx->user_ip6[2] = bpf_htonl(REWRITE_ADDRESS_IP6_2);
+ ctx->user_ip6[3] = bpf_htonl(REWRITE_ADDRESS_IP6_3);
+ ctx->user_port = bpf_htons(REWRITE_ADDRESS_PORT6);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
new file mode 100644
index 000000000000..7867113c696f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/getsockname_unix")
+int getsockname_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/htab_mem_bench.c b/tools/testing/selftests/bpf/progs/htab_mem_bench.c
new file mode 100644
index 000000000000..b1b721b14d67
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_mem_bench.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <stdbool.h>
+#include <errno.h>
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define OP_BATCH 64
+
+struct update_ctx {
+ unsigned int from;
+ unsigned int step;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, 4);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+unsigned char zeroed_value[4096];
+unsigned int nr_thread = 0;
+long op_cnt = 0;
+
+static int write_htab(unsigned int i, struct update_ctx *ctx, unsigned int flags)
+{
+ bpf_map_update_elem(&htab, &ctx->from, zeroed_value, flags);
+ ctx->from += ctx->step;
+
+ return 0;
+}
+
+static int overwrite_htab(unsigned int i, struct update_ctx *ctx)
+{
+ return write_htab(i, ctx, 0);
+}
+
+static int newwrite_htab(unsigned int i, struct update_ctx *ctx)
+{
+ return write_htab(i, ctx, BPF_NOEXIST);
+}
+
+static int del_htab(unsigned int i, struct update_ctx *ctx)
+{
+ bpf_map_delete_elem(&htab, &ctx->from);
+ ctx->from += ctx->step;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int overwrite(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id();
+ update.step = nr_thread;
+ bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int batch_add_batch_del(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id();
+ update.step = nr_thread;
+ bpf_loop(OP_BATCH, overwrite_htab, &update, 0);
+
+ update.from = bpf_get_smp_processor_id();
+ bpf_loop(OP_BATCH, del_htab, &update, 0);
+
+ __sync_fetch_and_add(&op_cnt, 2);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getpgid")
+int add_only(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id() / 2;
+ update.step = nr_thread / 2;
+ bpf_loop(OP_BATCH, newwrite_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_getppid")
+int del_only(void *ctx)
+{
+ struct update_ctx update;
+
+ update.from = bpf_get_smp_processor_id() / 2;
+ update.step = nr_thread / 2;
+ bpf_loop(OP_BATCH, del_htab, &update, 0);
+ __sync_fetch_and_add(&op_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c
new file mode 100644
index 000000000000..7f7368cb3095
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_reuse.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct htab_val {
+ struct bpf_spin_lock lock;
+ unsigned int data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 64);
+ __type(key, unsigned int);
+ __type(value, struct htab_val);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} htab SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/htab_update.c b/tools/testing/selftests/bpf/progs/htab_update.c
new file mode 100644
index 000000000000..195d3b2fba00
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_update.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Map value type: has BTF-managed field (bpf_timer) */
+struct val {
+ struct bpf_timer t;
+ __u64 payload;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct val);
+} htab SEC(".maps");
+
+int pid = 0;
+int update_err = 0;
+
+SEC("?fentry/bpf_obj_free_fields")
+int bpf_obj_free_fields(void *ctx)
+{
+ __u32 key = 0;
+ struct val value = { .payload = 1 };
+
+ if ((bpf_get_current_pid_tgid() >> 32) != pid)
+ return 0;
+
+ update_err = bpf_map_update_elem(&htab, &key, &value, BPF_ANY);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c
index 86b21aff4bc5..e16a2c208481 100644
--- a/tools/testing/selftests/bpf/progs/ima.c
+++ b/tools/testing/selftests/bpf/progs/ima.c
@@ -9,20 +9,95 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-long ima_hash_ret = -1;
-u64 ima_hash = 0;
u32 monitored_pid = 0;
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
char _license[] SEC("license") = "GPL";
+bool use_ima_file_hash;
+bool enable_bprm_creds_for_exec;
+bool enable_kernel_read_file;
+bool test_deny;
+
+static void ima_test_common(struct file *file)
+{
+ u64 ima_hash = 0;
+ u64 *sample;
+ int ret;
+ u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid == monitored_pid) {
+ if (!use_ima_file_hash)
+ ret = bpf_ima_inode_hash(file->f_inode, &ima_hash,
+ sizeof(ima_hash));
+ else
+ ret = bpf_ima_file_hash(file, &ima_hash,
+ sizeof(ima_hash));
+ if (ret < 0 || ima_hash == 0)
+ return;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(u64), 0);
+ if (!sample)
+ return;
+
+ *sample = ima_hash;
+ bpf_ringbuf_submit(sample, 0);
+ }
+
+ return;
+}
+
+static int ima_test_deny(void)
+{
+ u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid == monitored_pid && test_deny)
+ return -EPERM;
+
+ return 0;
+}
+
SEC("lsm.s/bprm_committed_creds")
-int BPF_PROG(ima, struct linux_binprm *bprm)
+void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm)
{
- u32 pid = bpf_get_current_pid_tgid() >> 32;
+ ima_test_common(bprm->file);
+}
+
+SEC("lsm.s/bprm_creds_for_exec")
+int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
+{
+ if (!enable_bprm_creds_for_exec)
+ return 0;
+
+ ima_test_common(bprm->file);
+ return 0;
+}
+
+SEC("lsm.s/kernel_read_file")
+int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id,
+ bool contents)
+{
+ int ret;
+
+ if (!enable_kernel_read_file)
+ return 0;
+
+ if (!contents)
+ return 0;
+
+ if (id != READING_POLICY)
+ return 0;
- if (pid == monitored_pid)
- ima_hash_ret = bpf_ima_inode_hash(bprm->file->f_inode,
- &ima_hash, sizeof(ima_hash));
+ ret = ima_test_deny();
+ if (ret < 0)
+ return ret;
+ ima_test_common(file);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/inner_array_lookup.c b/tools/testing/selftests/bpf/progs/inner_array_lookup.c
new file mode 100644
index 000000000000..c2c8f2fa451d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/inner_array_lookup.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, int);
+ __type(value, int);
+} inner_map1 SEC(".maps");
+
+struct outer_map {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 3);
+ __type(key, int);
+ __array(values, struct inner_map);
+} outer_map1 SEC(".maps") = {
+ .values = {
+ [2] = &inner_map1,
+ },
+};
+
+SEC("raw_tp/sys_enter")
+int handle__sys_enter(void *ctx)
+{
+ int outer_key = 2, inner_key = 3;
+ int *val;
+ void *map;
+
+ map = bpf_map_lookup_elem(&outer_map1, &outer_key);
+ if (!map)
+ return 1;
+
+ val = bpf_map_lookup_elem(map, &inner_key);
+ if (!val)
+ return 1;
+
+ if (*val == 1)
+ *val = 2;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
new file mode 100644
index 000000000000..0e87ad1ebcfa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+
+#define NF_DROP 0
+#define NF_ACCEPT 1
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+#define NEXTHDR_FRAGMENT 44
+
+volatile int shootdowns = 0;
+
+static bool is_frag_v4(struct iphdr *iph)
+{
+ int offset;
+ int flags;
+
+ offset = bpf_ntohs(iph->frag_off);
+ flags = offset & ~IP_OFFSET;
+ offset &= IP_OFFSET;
+ offset <<= 3;
+
+ return (flags & IP_MF) || offset;
+}
+
+static bool is_frag_v6(struct ipv6hdr *ip6h)
+{
+ /* Simplifying assumption that there are no extension headers
+ * between fixed header and fragmentation header. This assumption
+ * is only valid in this test case. It saves us the hassle of
+ * searching all potential extension headers.
+ */
+ return ip6h->nexthdr == NEXTHDR_FRAGMENT;
+}
+
+static int handle_v4(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ u8 iph_buf[20] = {};
+ struct iphdr *iph;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr))
+ return NF_DROP;
+
+ iph = bpf_dynptr_slice(&ptr, 0, iph_buf, sizeof(iph_buf));
+ if (!iph)
+ return NF_DROP;
+
+ /* Shootdown any frags */
+ if (is_frag_v4(iph)) {
+ shootdowns++;
+ return NF_DROP;
+ }
+
+ return NF_ACCEPT;
+}
+
+static int handle_v6(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ struct ipv6hdr *ip6h;
+ u8 ip6h_buf[40] = {};
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr))
+ return NF_DROP;
+
+ ip6h = bpf_dynptr_slice(&ptr, 0, ip6h_buf, sizeof(ip6h_buf));
+ if (!ip6h)
+ return NF_DROP;
+
+ /* Shootdown any frags */
+ if (is_frag_v6(ip6h)) {
+ shootdowns++;
+ return NF_DROP;
+ }
+
+ return NF_ACCEPT;
+}
+
+SEC("netfilter")
+int defrag(struct bpf_nf_ctx *ctx)
+{
+ struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
+
+ switch (bpf_ntohs(ctx->skb->protocol)) {
+ case ETH_P_IP:
+ return handle_v4(skb);
+ case ETH_P_IPV6:
+ return handle_v6(skb);
+ default:
+ return NF_ACCEPT;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c
new file mode 100644
index 000000000000..74d912b22de9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/irq.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+unsigned long global_flags;
+
+extern void bpf_local_irq_save(unsigned long *) __weak __ksym;
+extern void bpf_local_irq_restore(unsigned long *) __weak __ksym;
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
+struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
+struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_save_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_save(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#0 doesn't point to an irq flag on stack")
+int irq_restore_bad_arg(struct __sk_buff *ctx)
+{
+ bpf_local_irq_restore(&global_flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+static __noinline void local_irq_save(unsigned long *flags)
+{
+ bpf_local_irq_save(flags);
+}
+
+static __noinline void local_irq_restore(unsigned long *flags)
+{
+ bpf_local_irq_restore(flags);
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_1_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
+int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_balance_n(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+ return 0;
+}
+
+static __noinline void local_irq_balance(void)
+{
+ unsigned long flags;
+
+ local_irq_save(&flags);
+ local_irq_restore(&flags);
+}
+
+static __noinline void local_irq_balance_n(void)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save(&flags1);
+ local_irq_save(&flags2);
+ local_irq_save(&flags3);
+ local_irq_restore(&flags3);
+ local_irq_restore(&flags2);
+ local_irq_restore(&flags1);
+}
+
+SEC("?tc")
+__success
+int irq_balance_subprog(struct __sk_buff *ctx)
+{
+ local_irq_balance();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("sleepable helper bpf_copy_from_user#")
+int irq_sleepable_helper(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region")
+int irq_sleepable_kfunc(void *ctx)
+{
+ unsigned long flags;
+ u32 data;
+
+ local_irq_save(&flags);
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ local_irq_restore(&flags);
+ return 0;
+}
+
+int __noinline global_local_irq_balance(void)
+{
+ local_irq_balance_n();
+ return 0;
+}
+
+SEC("?tc")
+__success
+int irq_global_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_local_irq_balance();
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags2);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ bpf_local_irq_restore(&flags3);
+ return 0;
+}
+
+static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2,
+ unsigned long *flags3)
+{
+ local_irq_save(flags1);
+ local_irq_save(flags2);
+ local_irq_save(flags3);
+}
+
+SEC("?tc")
+__success
+int irq_restore_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_4_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+ unsigned long flags4;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_save(&flags4);
+ bpf_local_irq_restore(&flags4);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_restore_ooo_3_subprog(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags2;
+ unsigned long flags3;
+
+ local_irq_save_3(&flags1, &flags2, &flags3);
+ bpf_local_irq_restore(&flags3);
+ bpf_local_irq_restore(&flags2);
+ bpf_local_irq_save(&flags3);
+ bpf_local_irq_restore(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+ unsigned long flags = 0xfaceb00c;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected uninitialized")
+int irq_save_invalid(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+
+ bpf_local_irq_save(&flags1);
+ bpf_local_irq_save(&flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_restore_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=1")
+int irq_save_iter(struct __sk_buff *ctx)
+{
+ struct bpf_iter_num it;
+
+ /* Ensure same sized slot has st->ref_obj_id set, so we reject based on
+ * slot_type != STACK_IRQ_FLAG...
+ */
+ _Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size");
+
+ bpf_iter_num_new(&it, 0, 42);
+ bpf_local_irq_save((unsigned long *)&it);
+ bpf_local_irq_restore((unsigned long *)&it);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ flags = 0xdeadbeef;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("expected an initialized")
+int irq_flag_overwrite_partial(struct __sk_buff *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ *(((char *)&flags) + 1) = 0xff;
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_ooo_refs_array(struct __sk_buff *ctx)
+{
+ unsigned long flags[4];
+ struct { int i; } *p;
+
+ /* refs=1 */
+ bpf_local_irq_save(&flags[0]);
+
+ /* refs=1,2 */
+ p = bpf_obj_new(typeof(*p));
+ if (!p) {
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+ }
+
+ /* refs=1,2,3 */
+ bpf_local_irq_save(&flags[1]);
+
+ /* refs=1,2,3,4 */
+ bpf_local_irq_save(&flags[2]);
+
+ /* Now when we remove ref=2, the verifier must not break the ordering in
+ * the refs array between 1,3,4. With an older implementation, the
+ * verifier would swap the last element with the removed element, but to
+ * maintain the stack property we need to use memmove.
+ */
+ bpf_obj_drop(p);
+
+ /* Save and restore to reset active_irq_id to 3, as the ordering is now
+ * refs=1,4,3. When restoring the linear scan will find prev_id in order
+ * as 3 instead of 4.
+ */
+ bpf_local_irq_save(&flags[3]);
+ bpf_local_irq_restore(&flags[3]);
+
+ /* With the incorrect implementation, we can release flags[1], flags[2],
+ * and flags[0], i.e. in the wrong order.
+ */
+ bpf_local_irq_restore(&flags[1]);
+ bpf_local_irq_restore(&flags[2]);
+ bpf_local_irq_restore(&flags[0]);
+ return 0;
+}
+
+int __noinline
+global_subprog(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+__success
+int irq_non_sleepable_global_subprog(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_subprog(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int irq_sleepable_helper_global_subprog(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_sleepable_helper_subprog(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int irq_sleepable_global_subprog_indirect(void *ctx)
+{
+ unsigned long flags;
+
+ bpf_local_irq_save(&flags);
+ global_subprog_calling_sleepable_global(0);
+ bpf_local_irq_restore(&flags);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("cannot restore irq state out of order")
+int irq_ooo_lock_cond_inv(struct __sk_buff *ctx)
+{
+ unsigned long flags1, flags2;
+
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&lockB, &flags2)) {
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+ }
+
+ bpf_res_spin_unlock_irqrestore(&lockB, &flags1);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed")
+int irq_wrong_kfunc_class_1(struct __sk_buff *ctx)
+{
+ unsigned long flags1;
+
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags1))
+ return 0;
+ /* For now, bpf_local_irq_restore is not allowed in critical section,
+ * but this test ensures error will be caught with kfunc_class when it's
+ * opened up. Tested by temporarily permitting this kfunc in critical
+ * section.
+ */
+ bpf_local_irq_restore(&flags1);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("function calls are not allowed")
+int irq_wrong_kfunc_class_2(struct __sk_buff *ctx)
+{
+ unsigned long flags1, flags2;
+
+ bpf_local_irq_save(&flags1);
+ if (bpf_res_spin_lock_irqsave(&lockA, &flags2))
+ return 0;
+ bpf_local_irq_restore(&flags2);
+ bpf_res_spin_unlock_irqrestore(&lockA, &flags1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
new file mode 100644
index 000000000000..7dd92a303bf6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -0,0 +1,1929 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_compiler.h"
+
+static volatile int zero = 0;
+
+int my_pid;
+int arr[256];
+int small_arr[16] SEC(".data.small_arr");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 10);
+ __type(key, int);
+ __type(value, int);
+} amap SEC(".maps");
+
+#ifdef REAL_TEST
+#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
+#else
+#define MY_PID_GUARD() ({ })
+#endif
+
+SEC("?raw_tp")
+__failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
+int iter_err_unsafe_c_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i = zero; /* obscure initial value of i */
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 1000);
+ while ((v = bpf_iter_num_next(&it))) {
+ i++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ small_arr[i] = 123; /* invalid */
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("unbounded memory access")
+int iter_err_unsafe_asm_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ MY_PID_GUARD();
+
+ asm volatile (
+ "r6 = %[zero];" /* iteration counter */
+ "r1 = %[it];" /* iterator state */
+ "r2 = 0;"
+ "r3 = 1000;"
+ "r4 = 1;"
+ "call %[bpf_iter_num_new];"
+ "loop:"
+ "r1 = %[it];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto out;"
+ "r6 += 1;"
+ "goto loop;"
+ "out:"
+ "r1 = %[it];"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = %[small_arr];"
+ "r2 = r6;"
+ "r2 <<= 2;"
+ "r1 += r2;"
+ "*(u32 *)(r1 + 0) = r6;" /* invalid */
+ :
+ : [it]"r"(&it),
+ [small_arr]"r"(small_arr),
+ [zero]"r"(zero),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_common, "r6"
+ );
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_while_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_while_loop_auto_cleanup(const void *ctx)
+{
+ __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ /* (!) no explicit bpf_iter_num_destroy() */
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_for_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 5, 10);
+ for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_bpf_for_each_macro(const void *ctx)
+{
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_for_each(num, v, 5, 10) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_bpf_for_macro(const void *ctx)
+{
+ int i;
+
+ MY_PID_GUARD();
+
+ bpf_for(i, 5, 10) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_pragma_unroll_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 2);
+ __pragma_loop_no_unroll
+ for (i = 0; i < 3; i++) {
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ }
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_manual_unroll_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 100, 200);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_multiple_sequential_loops(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 3);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 5, 10);
+ for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
+ bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 0, 2);
+ __pragma_loop_no_unroll
+ for (i = 0; i < 3; i++) {
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_iter_num_new(&it, 100, 200);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
+ v = bpf_iter_num_next(&it);
+ bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_limit_cond_break_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, i = 0, sum = 0;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
+ sum += *v;
+
+ i++;
+ if (i > 3)
+ break;
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_obfuscate_counter(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, sum = 0;
+ /* Make i's initial value unknowable for verifier to prevent it from
+ * pruning if/else branch inside the loop body and marking i as precise.
+ */
+ int i = zero;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ int x;
+
+ i += 1;
+
+ /* If we initialized i as `int i = 0;` above, verifier would
+ * track that i becomes 1 on first iteration after increment
+ * above, and here verifier would eagerly prune else branch
+ * and mark i as precise, ruining open-coded iterator logic
+ * completely, as each next iteration would have a different
+ * *precise* value of i, and thus there would be no
+ * convergence of state. This would result in reaching maximum
+ * instruction limit, no matter what the limit is.
+ */
+ if (i == 1)
+ x = 123;
+ else
+ x = i * 3 + 1;
+
+ bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
+
+ sum += x;
+ }
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_search_loop(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int *v, *elem = NULL;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_iter_num_new(&it, 0, 10);
+
+ while ((v = bpf_iter_num_next(&it))) {
+ bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
+
+ if (*v == 2) {
+ found = true;
+ elem = v;
+ barrier_var(elem);
+ }
+ }
+
+ /* should fail to verify if bpf_iter_num_destroy() is here */
+
+ if (found)
+ /* here found element will be wrong, we should have copied
+ * value to a variable, but here we want to make sure we can
+ * access memory after the loop anyways
+ */
+ bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
+ else
+ bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
+
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_array_fill(const void *ctx)
+{
+ int sum, i;
+
+ MY_PID_GUARD();
+
+ bpf_for(i, 0, ARRAY_SIZE(arr)) {
+ arr[i] = i * 2;
+ }
+
+ sum = 0;
+ bpf_for(i, 0, ARRAY_SIZE(arr)) {
+ sum += arr[i];
+ }
+
+ bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
+
+ return 0;
+}
+
+static int arr2d[4][5];
+static int arr2d_row_sums[4];
+static int arr2d_col_sums[5];
+
+SEC("raw_tp")
+__success
+int iter_nested_iters(const void *ctx)
+{
+ int sum, row, col;
+
+ MY_PID_GUARD();
+
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d[row][col] = row * col;
+ }
+ }
+
+ /* zero-initialize sums */
+ sum = 0;
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ arr2d_row_sums[row] = 0;
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d_col_sums[col] = 0;
+ }
+
+ /* calculate sums */
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ sum += arr2d[row][col];
+ arr2d_row_sums[row] += arr2d[row][col];
+ arr2d_col_sums[col] += arr2d[row][col];
+ }
+ }
+
+ bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
+ col, arr2d_col_sums[col],
+ col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
+ }
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_nested_deeply_iters(const void *ctx)
+{
+ int sum = 0;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ bpf_repeat(10) {
+ sum += 1;
+ }
+ }
+ }
+ }
+ /* validate that we can break from inside bpf_repeat() */
+ break;
+ }
+
+ return sum;
+}
+
+static __noinline void fill_inner_dimension(int row)
+{
+ int col;
+
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d[row][col] = row * col;
+ }
+}
+
+static __noinline int sum_inner_dimension(int row)
+{
+ int sum = 0, col;
+
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ sum += arr2d[row][col];
+ arr2d_row_sums[row] += arr2d[row][col];
+ arr2d_col_sums[col] += arr2d[row][col];
+ }
+
+ return sum;
+}
+
+SEC("raw_tp")
+__success
+int iter_subprog_iters(const void *ctx)
+{
+ int sum, row, col;
+
+ MY_PID_GUARD();
+
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ fill_inner_dimension(row);
+ }
+
+ /* zero-initialize sums */
+ sum = 0;
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ arr2d_row_sums[row] = 0;
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ arr2d_col_sums[col] = 0;
+ }
+
+ /* calculate sums */
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ sum += sum_inner_dimension(row);
+ }
+
+ bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
+ bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
+ bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
+ row, arr2d_row_sums[row]);
+ }
+ bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
+ bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
+ col, arr2d_col_sums[col],
+ col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
+ }
+
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1000);
+} hash_map SEC(".maps");
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int iter_err_too_permissive1(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+
+ MY_PID_GUARD();
+
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!map_val)
+ return 0;
+
+ bpf_repeat(1000000) {
+ map_val = NULL;
+ }
+
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'map_value_or_null'")
+int iter_err_too_permissive2(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+
+ MY_PID_GUARD();
+
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!map_val)
+ return 0;
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ }
+
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'map_value_or_null'")
+int iter_err_too_permissive3(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ found = true;
+ }
+
+ if (found)
+ *map_val = 123;
+
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int iter_tricky_but_fine(const void *ctx)
+{
+ int *map_val = NULL;
+ int key = 0;
+ bool found = false;
+
+ MY_PID_GUARD();
+
+ bpf_repeat(1000000) {
+ map_val = bpf_map_lookup_elem(&hash_map, &key);
+ if (map_val) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ *map_val = 123;
+
+ return 0;
+}
+
+#define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
+
+SEC("raw_tp")
+__success
+int iter_stack_array_loop(const void *ctx)
+{
+ long arr1[16], arr2[16], sum = 0;
+ int i;
+
+ MY_PID_GUARD();
+
+ /* zero-init arr1 and arr2 in such a way that verifier doesn't know
+ * it's all zeros; if we don't do that, we'll make BPF verifier track
+ * all combination of zero/non-zero stack slots for arr1/arr2, which
+ * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
+ * states
+ */
+ __bpf_memzero(arr1, sizeof(arr1));
+ __bpf_memzero(arr2, sizeof(arr1));
+
+ /* validate that we can break and continue when using bpf_for() */
+ bpf_for(i, 0, ARRAY_SIZE(arr1)) {
+ if (i & 1) {
+ arr1[i] = i;
+ continue;
+ } else {
+ arr2[i] = i;
+ break;
+ }
+ }
+
+ bpf_for(i, 0, ARRAY_SIZE(arr1)) {
+ sum += arr1[i] + arr2[i];
+ }
+
+ return sum;
+}
+
+static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
+{
+ int *t, i;
+
+ while ((t = bpf_iter_num_next(it))) {
+ i = *t;
+ if (i >= n)
+ break;
+ arr[i] = i * mul;
+ }
+}
+
+static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
+{
+ int *t, i, sum = 0;
+
+ while ((t = bpf_iter_num_next(it))) {
+ i = *t;
+ if ((__u32)i >= n)
+ break;
+ sum += arr[i];
+ }
+
+ return sum;
+}
+
+SEC("raw_tp")
+__success
+int iter_pass_iter_ptr_to_subprog(const void *ctx)
+{
+ int arr1[16], arr2[32];
+ struct bpf_iter_num it;
+ int n, sum1, sum2;
+
+ MY_PID_GUARD();
+
+ /* fill arr1 */
+ n = ARRAY_SIZE(arr1);
+ bpf_iter_num_new(&it, 0, n);
+ fill(&it, arr1, n, 2);
+ bpf_iter_num_destroy(&it);
+
+ /* fill arr2 */
+ n = ARRAY_SIZE(arr2);
+ bpf_iter_num_new(&it, 0, n);
+ fill(&it, arr2, n, 10);
+ bpf_iter_num_destroy(&it);
+
+ /* sum arr1 */
+ n = ARRAY_SIZE(arr1);
+ bpf_iter_num_new(&it, 0, n);
+ sum1 = sum(&it, arr1, n);
+ bpf_iter_num_destroy(&it);
+
+ /* sum arr2 */
+ n = ARRAY_SIZE(arr2);
+ bpf_iter_num_new(&it, 0, n);
+ sum2 = sum(&it, arr2, n);
+ bpf_iter_num_destroy(&it);
+
+ bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("R1 type=scalar expected=fp")
+__naked int delayed_read_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
+ * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read mark.
+ * Loop body with r7=0xdead would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of read mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r7 = &fp[-16]
+ * fp[-16] = 0
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r6++
+ * if (r6 != 42) {
+ * r7 = 0xdead
+ * continue;
+ * }
+ * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r7 = r10;"
+ "r7 += -16;"
+ "r0 = 0;"
+ "*(u64 *)(r7 + 0) = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "r6 += 1;"
+ "if r6 != 42 goto 3f;"
+ "r7 = 0xdead;"
+ "goto 1b;"
+ "3:"
+ "r1 = r7;"
+ "r2 = 8;"
+ "r3 = 0xdeadbeef;"
+ "call %[bpf_probe_read_user];"
+ "goto 1b;"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__naked int delayed_precision_mark(void)
+{
+ /* This is equivalent to C program below.
+ * The test is similar to delayed_iter_mark but verifies that incomplete
+ * precision don't fool verifier.
+ * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
+ * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
+ * At this point iterator next() call is reached with r7 that has no read
+ * and precision marks.
+ * Loop body with r7=-32 would only be visited if verifier would decide to continue
+ * with second loop iteration. Absence of precision mark on r7 might affect state
+ * equivalent logic used for iterator convergence tracking.
+ *
+ * r8 = 0
+ * fp[-16] = 0
+ * r7 = -16
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (r6 != 42) {
+ * r7 = -32
+ * r6 = bpf_get_prandom_u32()
+ * continue;
+ * }
+ * r0 = r10
+ * r0 += r7
+ * r8 = *(u64 *)(r0 + 0) // this is not safe
+ * r6 = bpf_get_prandom_u32()
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return r8
+ */
+ asm volatile (
+ "r8 = 0;"
+ "*(u64 *)(r10 - 16) = r8;"
+ "r7 = -16;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;\n"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "if r6 != 42 goto 3f;"
+ "r7 = -33;"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "3:"
+ "r0 = r10;"
+ "r0 += r7;"
+ "r8 = *(u64 *)(r0 + 0);"
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "goto 1b;\n"
+ "2:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = r8;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_probe_read_user)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps1(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with c=-25 are explored only on a second iteration
+ * of the outer loop;
+ * - states with read+precise mark on c are explored only on
+ * second iteration of the inner loop and in a state which
+ * is pushed to states stack first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * a = 0;
+ * b = 0;
+ * c = -25;
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -25;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * The case turns out to be tricky in a sense that:
+ * - states with read+precise mark on c are explored only on a second
+ * iteration of the first inner loop and in a state which is pushed to
+ * states stack first.
+ * - states with c=-25 are explored only on a second iteration of the
+ * second inner loop and in a state which is pushed to states stack
+ * first.
+ *
+ * Depending on the details of iterator convergence logic
+ * verifier might stop states traversal too early and miss
+ * unsafe c=-25 memory access.
+ *
+ * j = iter_new(); // fp[-16]
+ * a = 0; // r6
+ * b = 0; // r7
+ * c = -24; // r8
+ * while (iter_next(j)) {
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * *(r10 + c) = 7; // this is not safe
+ * iter_destroy(i);
+ * iter_destroy(j);
+ * return;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * i = iter_new(); // fp[-8]
+ * a = 0; // r6
+ * b = 0; // r7
+ * while (iter_next(i)) {
+ * if (a == 1) {
+ * a = 0;
+ * b = 1;
+ * } else if (a == 0) {
+ * a = 1;
+ * if (random() == 42)
+ * continue;
+ * if (b == 1) {
+ * a = 0;
+ * c = -25;
+ * }
+ * }
+ * }
+ * iter_destroy(i);
+ * }
+ * iter_destroy(j);
+ * return;
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -16;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "r8 = -24;"
+ "j_loop_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto j_loop_end_%=;"
+
+ /* first inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i_loop_end_%=;"
+ "check_one_r6_%=:"
+ "if r6 != 1 goto check_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i_loop_%=;"
+ "check_zero_r6_%=:"
+ "if r6 != 0 goto i_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check_one_r7_%=;"
+ "goto i_loop_%=;"
+ "check_one_r7_%=:"
+ "if r7 != 1 goto i_loop_%=;"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "i_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ /* second inner loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r6 = 0;"
+ "r7 = 0;"
+ "i2_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto i2_loop_end_%=;"
+ "check2_one_r6_%=:"
+ "if r6 != 1 goto check2_zero_r6_%=;"
+ "r6 = 0;"
+ "r7 = 1;"
+ "goto i2_loop_%=;"
+ "check2_zero_r6_%=:"
+ "if r6 != 0 goto i2_loop_%=;"
+ "r6 = 1;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto check2_one_r7_%=;"
+ "goto i2_loop_%=;"
+ "check2_one_r7_%=:"
+ "if r7 != 1 goto i2_loop_%=;"
+ "r6 = 0;"
+ "r8 = -25;"
+ "goto i2_loop_%=;"
+ "i2_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+
+ "r6 = 0;"
+ "r7 = 0;"
+ "goto j_loop_%=;"
+ "j_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__failure
+__msg("math between fp pointer and register with unbounded")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked int loop_state_deps3(void)
+{
+ /* This is equivalent to a C program below.
+ *
+ * if (random() != 24) { // assume false branch is placed first
+ * i = iter_new(); // fp[-8]
+ * while (iter_next(i));
+ * iter_destroy(i);
+ * return;
+ * }
+ *
+ * for (i = 10; i > 0; i--); // increase dfs_depth for child states
+ *
+ * i = iter_new(); // fp[-8]
+ * b = -24; // r8
+ * for (;;) { // checkpoint (L)
+ * if (iter_next(i)) // checkpoint (N)
+ * break;
+ * if (random() == 77) { // assume false branch is placed first
+ * *(u64 *)(r10 + b) = 7; // this is not safe when b == -25
+ * iter_destroy(i);
+ * return;
+ * }
+ * if (random() == 42) { // assume false branch is placed first
+ * b = -25;
+ * }
+ * }
+ * iter_destroy(i);
+ *
+ * In case of a buggy verifier first loop might poison
+ * env->cur_state->loop_entry with a state having 0 branches
+ * and small dfs_depth. This would trigger NOT_EXACT states
+ * comparison for some states within second loop.
+ * Specifically, checkpoint (L) might be problematic if:
+ * - branch with '*(u64 *)(r10 + b) = 7' is not explored yet;
+ * - checkpoint (L) is first reached in state {b=-24};
+ * - traversal is pruned at checkpoint (N) setting checkpoint's (L)
+ * branch count to 0, thus making it eligible for use in pruning;
+ * - checkpoint (L) is next reached in state {b=-25},
+ * this would cause NOT_EXACT comparison with a state {b=-24}
+ * while 'b' is not marked precise yet.
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 24 goto 2f;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 5;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 != 0 goto 1b;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "2:"
+ /* loop to increase dfs_depth */
+ "r0 = 10;"
+ "3:"
+ "r0 -= 1;"
+ "if r0 != 0 goto 3b;"
+ /* end of loop */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r8 = -24;"
+ "main_loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto main_loop_end_%=;"
+ /* first if */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 77 goto unsafe_write_%=;"
+ /* second if */
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 42 goto poison_r8_%=;"
+ /* iterate */
+ "goto main_loop_%=;"
+ "main_loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+
+ "unsafe_write_%=:"
+ "r0 = r10;"
+ "r0 += r8;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto main_loop_end_%=;"
+
+ "poison_r8_%=:"
+ "r8 = -25;"
+ "goto main_loop_%=;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int triple_continue(void)
+{
+ /* This is equivalent to C program below.
+ * High branching factor of the loop body turned out to be
+ * problematic for one of the iterator convergence tracking
+ * algorithms explored.
+ *
+ * r6 = bpf_get_prandom_u32()
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * if (bpf_get_prandom_u32() != 42)
+ * continue;
+ * r0 += 0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto loop_%=;"
+ "r0 += 0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__success
+__naked int widen_spill(void)
+{
+ /* This is equivalent to C program below.
+ * The counter is stored in fp[-16], if this counter is not widened
+ * verifier states representing loop iterations would never converge.
+ *
+ * fp[-16] = 0
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * r0 = fp[-16];
+ * r0 += 1;
+ * fp[-16] = r0;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "r0 = *(u64 *)(r10 - 16);"
+ "r0 += 1;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy)
+ : __clobber_all
+ );
+}
+
+SEC("raw_tp")
+__success
+__naked int checkpoint_states_deletion(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * int *a, *b, *c, *d, *e, *f;
+ * int i, sum = 0;
+ * bpf_for(i, 0, 10) {
+ * a = bpf_map_lookup_elem(&amap, &i);
+ * b = bpf_map_lookup_elem(&amap, &i);
+ * c = bpf_map_lookup_elem(&amap, &i);
+ * d = bpf_map_lookup_elem(&amap, &i);
+ * e = bpf_map_lookup_elem(&amap, &i);
+ * f = bpf_map_lookup_elem(&amap, &i);
+ * if (a) sum += 1;
+ * if (b) sum += 1;
+ * if (c) sum += 1;
+ * if (d) sum += 1;
+ * if (e) sum += 1;
+ * if (f) sum += 1;
+ * }
+ * return 0;
+ *
+ * The body of the loop spawns multiple simulation paths
+ * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
+ * Each combination is unique from states_equal() point of view.
+ * Explored states checkpoint is created after each iterator next call.
+ * Iterator convergence logic expects that eventually current state
+ * would get equal to one of the explored states and thus loop
+ * exploration would be finished (at-least for a specific path).
+ * Verifier evicts explored states with high miss to hit ratio
+ * to to avoid comparing current state with too many explored
+ * states per instruction.
+ * This test is designed to "stress test" eviction policy defined using formula:
+ *
+ * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
+ *
+ * Currently N is set to 64, which allows for 6 variables in this test.
+ */
+ asm volatile (
+ "r6 = 0;" /* a */
+ "r7 = 0;" /* b */
+ "r8 = 0;" /* c */
+ "*(u64 *)(r10 - 24) = r6;" /* d */
+ "*(u64 *)(r10 - 32) = r6;" /* e */
+ "*(u64 *)(r10 - 40) = r6;" /* f */
+ "r9 = 0;" /* sum */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+
+ "*(u64 *)(r10 - 16) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r6 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r7 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "r8 = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 24) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 32) = r0;"
+
+ "r1 = %[amap] ll;"
+ "r2 = r10;"
+ "r2 += -16;"
+ "call %[bpf_map_lookup_elem];"
+ "*(u64 *)(r10 - 40) = r0;"
+
+ "if r6 == 0 goto +1;"
+ "r9 += 1;"
+ "if r7 == 0 goto +1;"
+ "r9 += 1;"
+ "if r8 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 24);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 32);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+ "r0 = *(u64 *)(r10 - 40);"
+ "if r0 == 0 goto +1;"
+ "r9 += 1;"
+
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm_addr(amap)
+ : __clobber_all
+ );
+}
+
+struct {
+ int data[32];
+ int n;
+} loop_data;
+
+SEC("raw_tp")
+__success
+int iter_arr_with_actual_elem_count(const void *ctx)
+{
+ int i, n = loop_data.n, sum = 0;
+
+ if (n > ARRAY_SIZE(loop_data.data))
+ return 0;
+
+ bpf_for(i, 0, n) {
+ /* no rechecking of i against ARRAY_SIZE(loop_data.n) */
+ sum += loop_data.data[i];
+ }
+
+ return sum;
+}
+
+__u32 upper, select_n, result;
+__u64 global;
+
+static __noinline bool nest_2(char *str)
+{
+ /* some insns (including branch insns) to ensure stacksafe() is triggered
+ * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
+ */
+ if (str[0] == 't')
+ return true;
+ if (str[1] == 'e')
+ return true;
+ if (str[2] == 's')
+ return true;
+ if (str[3] == 't')
+ return true;
+ return false;
+}
+
+static __noinline bool nest_1(int n)
+{
+ /* case 0: allocate stack, case 1: no allocate stack */
+ switch (n) {
+ case 0: {
+ char comm[16];
+
+ if (bpf_get_current_comm(comm, 16))
+ return false;
+ return nest_2(comm);
+ }
+ case 1:
+ return nest_2((char *)&global);
+ default:
+ return false;
+ }
+}
+
+SEC("raw_tp")
+__success
+int iter_subprog_check_stacksafe(const void *ctx)
+{
+ long i;
+
+ bpf_for(i, 0, upper) {
+ if (!nest_1(select_n)) {
+ result = 1;
+ return 0;
+ }
+ }
+
+ result = 2;
+ return 0;
+}
+
+struct bpf_iter_num global_it;
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_new_bad_arg(const void *ctx)
+{
+ bpf_iter_num_new(&global_it, 0, 1);
+ return 0;
+}
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_next_bad_arg(const void *ctx)
+{
+ bpf_iter_num_next(&global_it);
+ return 0;
+}
+
+SEC("raw_tp")
+__failure __msg("arg#0 expected pointer to an iterator on stack")
+int iter_destroy_bad_arg(const void *ctx)
+{
+ bpf_iter_num_destroy(&global_it);
+ return 0;
+}
+
+SEC("raw_tp")
+__success
+int clean_live_states(const void *ctx)
+{
+ char buf[1];
+ int i, j, k, l, m, n, o;
+
+ bpf_for(i, 0, 10)
+ bpf_for(j, 0, 10)
+ bpf_for(k, 0, 10)
+ bpf_for(l, 0, 10)
+ bpf_for(m, 0, 10)
+ bpf_for(n, 0, 10)
+ bpf_for(o, 0, 10) {
+ if (unlikely(bpf_get_prandom_u32()))
+ buf[0] = 42;
+ bpf_printk("%s", buf);
+ }
+ return 0;
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "call noop;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int noop(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32())) {
+ * r6 = -31;
+ * jump_into_loop:
+ * goto +0;
+ * goto loop;
+ * }
+ * if (unlikely(bpf_get_prandom_u32()))
+ * goto jump_into_loop;
+ * loop:
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto jump_into_loop_%=;"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "jump_into_loop_%=: "
+ "goto +0;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state3(void)
+{
+ /*
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * loop1(-32, &fp[-8])
+ * loop1_wrapper(&fp[-8])
+ * bpf_iter_num_destroy(&fp[-8])
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ /* call #1 */
+ "r1 = -32;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call loop1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ /* call #2 */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call loop1_wrapper;"
+ /* return */
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1(void)
+{
+ /*
+ * int loop1(num, iter) {
+ * r6 = num;
+ * r7 = iter;
+ * while (bpf_iter_num_next(r7)) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * return 0
+ * }
+ */
+ asm volatile (
+ "r6 = r1;"
+ "r7 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "loop_%=:"
+ "r1 = r7;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1_wrapper(void)
+{
+ /*
+ * int loop1_wrapper(iter) {
+ * r6 = -32;
+ * r7 = iter;
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * loop1(r6, r7);
+ * return 0;
+ * }
+ */
+ asm volatile (
+ "r6 = -32;"
+ "r7 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "r1 = r6;"
+ "r2 = r7;"
+ "call loop1;"
+ "r0 = 0;"
+ "exit;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_css.c b/tools/testing/selftests/bpf/progs/iters_css.c
new file mode 100644
index 000000000000..ec1f6c2f590b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+u64 root_cg_id, leaf_cg_id;
+u64 first_cg_id, last_cg_id;
+
+int pre_order_cnt, post_order_cnt, tree_high;
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_css_for_each(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *root_css, *leaf_css, *pos;
+ struct cgroup *root_cgrp, *leaf_cgrp, *cur_cgrp;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+
+ root_cgrp = bpf_cgroup_from_id(root_cg_id);
+
+ if (!root_cgrp)
+ return 0;
+
+ leaf_cgrp = bpf_cgroup_from_id(leaf_cg_id);
+
+ if (!leaf_cgrp) {
+ bpf_cgroup_release(root_cgrp);
+ return 0;
+ }
+ root_css = &root_cgrp->self;
+ leaf_css = &leaf_cgrp->self;
+ pre_order_cnt = post_order_cnt = tree_high = 0;
+ first_cg_id = last_cg_id = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ cur_cgrp = pos->cgroup;
+ post_order_cnt++;
+ last_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_PRE) {
+ cur_cgrp = pos->cgroup;
+ pre_order_cnt++;
+ if (!first_cg_id)
+ first_cg_id = cur_cgrp->kn->id;
+ }
+
+ bpf_for_each(css, pos, leaf_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high++;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_ANCESTORS_UP)
+ tree_high--;
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(root_cgrp);
+ bpf_cgroup_release(leaf_cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c
new file mode 100644
index 000000000000..9ac758649cb8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_css_task.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+
+pid_t target_pid;
+int css_task_cnt;
+u64 cg_id;
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (cur_task->pid != target_pid)
+ return ret;
+
+ cgrp = bpf_cgroup_from_id(cg_id);
+
+ if (!cgrp)
+ return -EPERM;
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS)
+ if (task->pid == target_pid)
+ css_task_cnt++;
+
+ bpf_cgroup_release(cgrp);
+
+ return -EPERM;
+}
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
+SEC("?iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct cgroup *cgrp = ctx->cgroup;
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ /* epilogue */
+ if (cgrp == NULL) {
+ BPF_SEQ_PRINTF(seq, "epilogue\n");
+ return 0;
+ }
+
+ /* prologue */
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "prologue\n");
+
+ BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+ css = &cgrp->self;
+ css_task_cnt = 0;
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+ if (task->pid == target_pid)
+ css_task_cnt++;
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int BPF_PROG(iter_css_task_for_each_sleep)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c
new file mode 100644
index 000000000000..d00fd570255a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_looping.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define ITER_HELPERS \
+ __imm(bpf_iter_num_new), \
+ __imm(bpf_iter_num_next), \
+ __imm(bpf_iter_num_destroy)
+
+SEC("?raw_tp")
+__success
+int force_clang_to_emit_btf_for_externs(void *ctx)
+{
+ /* we need this as a workaround to enforce compiler emitting BTF
+ * information for bpf_iter_num_{new,next,destroy}() kfuncs,
+ * as, apparently, it doesn't emit it for symbols only referenced from
+ * assembly (or cleanup attribute, for that matter, as well)
+ */
+ bpf_repeat(0);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success
+int consume_first_item_only(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto +1;"
+ "r0 = *(u32 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("R0 invalid mem access 'scalar'")
+int missing_null_check_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first element */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ /* FAIL: deref with no NULL check */
+ "r1 = *(u32 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("invalid access to memory, mem_size=4 off=0 size=8")
+__msg("R0 min value is outside of the allowed memory range")
+int wrong_sized_read_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* consume first element */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto +1;"
+ /* FAIL: deref more than available 4 bytes */
+ "r0 = *(u64 *)(r0 + 0);"
+
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+int simplest_loop(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "r6 = 0;" /* init sum */
+
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+
+ "1:"
+ /* consume next item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+
+ "if r0 == 0 goto 2f;"
+ "r0 = *(u32 *)(r0 + 0);"
+ "r6 += r0;" /* accumulate sum */
+ "goto 1b;"
+
+ "2:"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common, "r6"
+ );
+
+ return 0;
+}
+
+__used
+static void iterator_with_diff_stack_depth(int x)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "if r1 == 42 goto 0f;"
+ "*(u64 *)(r10 - 128) = 0;"
+ "0:"
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ /* consume next item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "goto 1b;"
+ "2:"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("socket")
+__success
+__naked int widening_stack_size_bug(void *ctx)
+{
+ /*
+ * Depending on iterator_with_diff_stack_depth() parameter value,
+ * subprogram stack depth is either 8 or 128 bytes. Arrange values so
+ * that it is 128 on a first call and 8 on a second. This triggered a
+ * bug in verifier's widen_imprecise_scalars() logic.
+ */
+ asm volatile (
+ "r6 = 0;"
+ "r1 = 0;"
+ "1:"
+ "call iterator_with_diff_stack_depth;"
+ "r1 = 42;"
+ "r6 += 1;"
+ "if r6 < 2 goto 1b;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_num.c b/tools/testing/selftests/bpf/progs/iters_num.c
new file mode 100644
index 000000000000..7a77a8daee0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_num.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <limits.h>
+#include <linux/errno.h>
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+const volatile __s64 exp_empty_zero = 0 + 1;
+__s64 res_empty_zero;
+
+SEC("raw_tp/sys_enter")
+int num_empty_zero(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, 0, 0) sum += i;
+ res_empty_zero = 1 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_int_min = 0 + 2;
+__s64 res_empty_int_min;
+
+SEC("raw_tp/sys_enter")
+int num_empty_int_min(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MIN, INT_MIN) sum += i;
+ res_empty_int_min = 2 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_int_max = 0 + 3;
+__s64 res_empty_int_max;
+
+SEC("raw_tp/sys_enter")
+int num_empty_int_max(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MAX, INT_MAX) sum += i;
+ res_empty_int_max = 3 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_empty_minus_one = 0 + 4;
+__s64 res_empty_minus_one;
+
+SEC("raw_tp/sys_enter")
+int num_empty_minus_one(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -1, -1) sum += i;
+ res_empty_minus_one = 4 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_simple_sum = 9 * 10 / 2;
+__s64 res_simple_sum;
+
+SEC("raw_tp/sys_enter")
+int num_simple_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, 0, 10) sum += i;
+ res_simple_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_neg_sum = -11 * 10 / 2;
+__s64 res_neg_sum;
+
+SEC("raw_tp/sys_enter")
+int num_neg_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -10, 0) sum += i;
+ res_neg_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1);
+__s64 res_very_neg_sum;
+
+SEC("raw_tp/sys_enter")
+int num_very_neg_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MIN, INT_MIN + 2) sum += i;
+ res_very_neg_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2);
+__s64 res_very_big_sum;
+
+SEC("raw_tp/sys_enter")
+int num_very_big_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, INT_MAX - 2, INT_MAX) sum += i;
+ res_very_big_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_neg_pos_sum = -3;
+__s64 res_neg_pos_sum;
+
+SEC("raw_tp/sys_enter")
+int num_neg_pos_sum(const void *ctx)
+{
+ __s64 sum = 0, i;
+
+ bpf_for(i, -3, 3) sum += i;
+ res_neg_pos_sum = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_invalid_range = -EINVAL;
+__s64 res_invalid_range;
+
+SEC("raw_tp/sys_enter")
+int num_invalid_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_invalid_range = bpf_iter_num_new(&it, 1, 0);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_max_range = 0 + 10;
+__s64 res_max_range;
+
+SEC("raw_tp/sys_enter")
+int num_max_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_e2big_range = -E2BIG;
+__s64 res_e2big_range;
+
+SEC("raw_tp/sys_enter")
+int num_e2big_range(const void *ctx)
+{
+ struct bpf_iter_num it;
+
+ res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS);
+ bpf_iter_num_destroy(&it);
+
+ return 0;
+}
+
+const volatile __s64 exp_succ_elem_cnt = 10;
+__s64 res_succ_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_succ_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v;
+
+ bpf_iter_num_new(&it, 0, 10);
+ while ((v = bpf_iter_num_next(&it))) {
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_succ_elem_cnt = cnt;
+
+ return 0;
+}
+
+const volatile __s64 exp_overfetched_elem_cnt = 5;
+__s64 res_overfetched_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_overfetched_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v, i;
+
+ bpf_iter_num_new(&it, 0, 5);
+ for (i = 0; i < 10; i++) {
+ v = bpf_iter_num_next(&it);
+ if (v)
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_overfetched_elem_cnt = cnt;
+
+ return 0;
+}
+
+const volatile __s64 exp_fail_elem_cnt = 20 + 0;
+__s64 res_fail_elem_cnt;
+
+SEC("raw_tp/sys_enter")
+int num_fail_elem_cnt(const void *ctx)
+{
+ struct bpf_iter_num it;
+ int cnt = 0, *v, i;
+
+ bpf_iter_num_new(&it, 100, 10);
+ for (i = 0; i < 10; i++) {
+ v = bpf_iter_num_next(&it);
+ if (v)
+ cnt++;
+ }
+ bpf_iter_num_destroy(&it);
+
+ res_fail_elem_cnt = 20 + cnt;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
new file mode 100644
index 000000000000..d273b46dfc7c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define ITER_HELPERS \
+ __imm(bpf_iter_num_new), \
+ __imm(bpf_iter_num_next), \
+ __imm(bpf_iter_num_destroy)
+
+SEC("?raw_tp")
+__success
+int force_clang_to_emit_btf_for_externs(void *ctx)
+{
+ /* we need this as a workaround to enforce compiler emitting BTF
+ * information for bpf_iter_num_{new,next,destroy}() kfuncs,
+ * as, apparently, it doesn't emit it for symbols only referenced from
+ * assembly (or cleanup attribute, for that matter, as well)
+ */
+ bpf_repeat(0);
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("fp-8=iter_num(ref_id=1,state=active,depth=0)")
+int create_and_destroy(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=1")
+int create_and_forget_to_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int destroy_without_creating_fail(void *ctx)
+{
+ /* init with zeros to stop verifier complaining about uninit stack */
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int compromise_iter_w_direct_write_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* directly write over first half of iter state */
+ "*(u64 *)(%[iter] + 0) = r0;"
+
+ /* (attempt to) destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("Unreleased reference id=1")
+int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* directly write over first half of iter state */
+ "*(u64 *)(%[iter] + 0) = r0;"
+
+ /* don't destroy iter, leaking ref, which should fail */
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int compromise_iter_w_helper_write_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* overwrite 8th byte with bpf_probe_read_kernel() */
+ "r1 = %[iter];"
+ "r1 += 7;"
+ "r2 = 1;"
+ "r3 = 0;" /* NULL */
+ "call %[bpf_probe_read_kernel];"
+
+ /* (attempt to) destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel)
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+static __noinline void subprog_with_iter(void)
+{
+ struct bpf_iter_num iter;
+
+ bpf_iter_num_new(&iter, 0, 1);
+
+ return;
+}
+
+SEC("?raw_tp")
+__failure
+/* ensure there was a call to subprog, which might happen without __noinline */
+__msg("returning from callee:")
+__msg("Unreleased reference id=1")
+int leak_iter_from_subprog_fail(void *ctx)
+{
+ subprog_with_iter();
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("fp-8=iter_num(ref_id=1,state=active,depth=0)")
+int valid_stack_reuse(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+
+ /* now reuse same stack slots */
+
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected uninitialized iter_num as arg #0")
+int double_create_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* (attempt to) create iterator again */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int double_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ /* (attempt to) destroy iterator again */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int next_without_new_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* don't create iterator and try to iterate*/
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("expected an initialized iter_num as arg #0")
+int next_after_destroy_fail(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ /* don't create iterator and try to iterate*/
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common
+ );
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure __msg("invalid read from stack")
+int __naked read_from_iter_slot_fail(void)
+{
+ asm volatile (
+ /* r6 points to struct bpf_iter_num on the stack */
+ "r6 = r10;"
+ "r6 += -24;"
+
+ /* create iterator */
+ "r1 = r6;"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* attempt to leak bpf_iter_num state */
+ "r7 = *(u64 *)(r6 + 0);"
+ "r8 = *(u64 *)(r6 + 8);"
+
+ /* destroy iterator */
+ "r1 = r6;"
+ "call %[bpf_iter_num_destroy];"
+
+ /* leak bpf_iter_num state */
+ "r0 = r7;"
+ "if r7 > r8 goto +1;"
+ "r0 = r8;"
+ "exit;"
+ :
+ : ITER_HELPERS
+ : __clobber_common, "r6", "r7", "r8"
+ );
+}
+
+int zero;
+
+SEC("?raw_tp")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("Unreleased reference")
+int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ /* Create a fork in logic, with general setup as follows:
+ * - fallthrough (first) path is valid;
+ * - branch (second) path is invalid.
+ * Then depending on what we do in fallthrough vs branch path,
+ * we try to detect bugs in func_states_equal(), regsafe(),
+ * refsafe(), stack_safe(), and similar by tricking verifier
+ * into believing that branch state is a valid subset of
+ * a fallthrough state. Verifier should reject overall
+ * validation, unless there is a bug somewhere in verifier
+ * logic.
+ */
+ "call %[bpf_get_prandom_u32];"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+
+ "if r6 > r7 goto bad;" /* fork */
+
+ /* spill r6 into stack slot of bpf_iter_num var */
+ "*(u64 *)(%[iter] + 0) = r6;"
+
+ "goto skip_bad;"
+
+ "bad:"
+ /* create iterator in the same stack slot */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 1000;"
+ "call %[bpf_iter_num_new];"
+
+ /* but then forget about it and overwrite it back to r6 spill */
+ "*(u64 *)(%[iter] + 0) = r6;"
+
+ "skip_bad:"
+ "goto +0;" /* force checkpoint */
+
+ /* corrupt stack slots, if they are really dynptr */
+ "*(u64 *)(%[iter] + 0) = r6;"
+ :
+ : __imm_ptr(iter),
+ __imm_addr(zero),
+ __imm(bpf_get_prandom_u32),
+ __imm(bpf_dynptr_from_mem),
+ ITER_HELPERS
+ : __clobber_common, "r6", "r7"
+ );
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c
new file mode 100644
index 000000000000..e4d53e40ff20
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid;
+int procs_cnt, threads_cnt, proc_threads_cnt, invalid_cnt;
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
+int iter_task_for_each_sleep(void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct task_struct *pos;
+
+ if (cur_task->pid != target_pid)
+ return 0;
+ procs_cnt = threads_cnt = proc_threads_cnt = 0;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, ~0U) {
+ /* Below instructions shouldn't be executed for invalid flags */
+ invalid_cnt++;
+ }
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_PROC_THREADS) {
+ /* Below instructions shouldn't be executed for invalid task__nullable */
+ invalid_cnt++;
+ }
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
+ if (pos->pid == target_pid)
+ procs_cnt++;
+
+ bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS)
+ proc_threads_cnt++;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS)
+ if (pos->tgid == target_pid)
+ threads_cnt++;
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c
new file mode 100644
index 000000000000..fe3663dedbe1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_iter_task_new requires RCU critical section protection")
+int BPF_PROG(iter_tasks_without_lock)
+{
+ struct task_struct *pos;
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+
+ }
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_iter_css_new requires RCU critical section protection")
+int BPF_PROG(iter_css_without_lock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+int BPF_PROG(iter_tasks_lock_and_unlock)
+{
+ struct task_struct *pos;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+int BPF_PROG(iter_css_lock_and_unlock)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *root_css, *pos;
+
+ if (!cgrp)
+ return 0;
+ root_css = &cgrp->self;
+
+ bpf_rcu_read_lock();
+ bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ }
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
+int BPF_PROG(iter_css_task_for_each)
+{
+ u64 cg_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ if (cgrp == NULL)
+ return 0;
+ css = &cgrp->self;
+
+ bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+ }
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_task_vma.c b/tools/testing/selftests/bpf/progs/iters_task_vma.c
new file mode 100644
index 000000000000..dc0c3691dcc2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_task_vma.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+pid_t target_pid = 0;
+unsigned int vmas_seen = 0;
+
+struct {
+ __u64 vm_start;
+ __u64 vm_end;
+} vm_ranges[1000];
+
+SEC("raw_tp/sys_enter")
+int iter_task_vma_for_each(const void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct vm_area_struct *vma;
+ unsigned int seen = 0;
+
+ if (task->pid != target_pid)
+ return 0;
+
+ if (vmas_seen)
+ return 0;
+
+ bpf_for_each(task_vma, vma, task, 0) {
+ if (bpf_cmp_unlikely(seen, >=, 1000))
+ break;
+
+ vm_ranges[seen].vm_start = vma->vm_start;
+ vm_ranges[seen].vm_end = vma->vm_end;
+ seen++;
+ }
+
+ vmas_seen = seen;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
new file mode 100644
index 000000000000..5379e9960ffd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+ if (vma_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+out:
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_trusted_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_rcu(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_rcu_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 must be referenced or trusted")
+int iter_next_rcu_not_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 cannot write into rdonly_mem")
+/* Message should not be 'R1 cannot write into rdonly_trusted_mem' */
+int iter_next_ptr_mem_not_trusted(const void *ctx)
+{
+ struct bpf_iter_num num_it;
+ int *num_ptr;
+
+ bpf_iter_num_new(&num_it, 0, 10);
+
+ num_ptr = bpf_iter_num_next(&num_it);
+ if (num_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_num_test(num_ptr);
+out:
+ bpf_iter_num_destroy(&num_it);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_kfunc_ret_rcu_test requires RCU critical section protection")
+int iter_ret_rcu_test_protected(const void *ctx)
+{
+ struct task_struct *p;
+
+ p = bpf_kfunc_ret_rcu_test();
+ return p->pid;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("R1 type=rcu_ptr_or_null_ expected=")
+int iter_ret_rcu_test_type(const void *ctx)
+{
+ struct task_struct *p;
+
+ bpf_rcu_read_lock();
+ p = bpf_kfunc_ret_rcu_test();
+ bpf_this_cpu_ptr(p);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_kfunc_ret_rcu_test_nostruct requires RCU critical section protection")
+int iter_ret_rcu_test_protected_nostruct(const void *ctx)
+{
+ void *p;
+
+ p = bpf_kfunc_ret_rcu_test_nostruct(4);
+ return *(int *)p;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("R1 type=rdonly_rcu_mem_or_null expected=")
+int iter_ret_rcu_test_type_nostruct(const void *ctx)
+{
+ void *p;
+
+ bpf_rcu_read_lock();
+ p = bpf_kfunc_ret_rcu_test_nostruct(4);
+ bpf_this_cpu_ptr(p);
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
new file mode 100644
index 000000000000..83791348bed5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct bpf_iter_testmod_seq {
+ u64 :64;
+ u64 :64;
+};
+
+extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym;
+extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym;
+extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym;
+extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym;
+
+const volatile __s64 exp_empty = 0 + 1;
+__s64 res_empty;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_empty(const void *ctx)
+{
+ __s64 sum = 0, *i;
+
+ bpf_for_each(testmod_seq, i, 1000, 0) sum += *i;
+ res_empty = 1 + sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_full = 1000000;
+__s64 res_full;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_full(const void *ctx)
+{
+ __s64 sum = 0, *i;
+
+ bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i;
+ res_full = sum;
+
+ return 0;
+}
+
+const volatile __s64 exp_truncated = 10 * 1000000;
+__s64 res_truncated;
+
+static volatile int zero = 0;
+
+SEC("raw_tp/sys_enter")
+__success __log_level(2)
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
+__msg("call bpf_iter_testmod_seq_destroy")
+int testmod_seq_truncated(const void *ctx)
+{
+ __s64 sum = 0, *i;
+ int cnt = zero;
+
+ bpf_for_each(testmod_seq, i, 10, 2000000) {
+ sum += *i;
+ cnt++;
+ if (cnt >= 1000000)
+ break;
+ }
+ res_truncated = sum;
+
+ return 0;
+}
+
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #1")
+int testmod_seq_getter_before_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+
+ return bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #1")
+int testmod_seq_getter_after_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum + bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?socket")
+__success __retval(1000000)
+int testmod_seq_getter_good(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ sum *= bpf_iter_testmod_seq_value(0, &it);
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
new file mode 100644
index 000000000000..4d619bea9c75
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#endif
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, u64);
+ __type(value, u64);
+} m_hash SEC(".maps");
+
+SEC("?raw_tp")
+__failure __msg("R8 invalid mem access 'map_value_or_null")
+int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+{
+ struct bpf_map *map = (struct bpf_map *)&m_hash;
+ struct bpf_map *inner_map = map->inner_map_meta;
+ u64 key = 0, ret = 0, *val;
+
+ val = bpf_map_lookup_elem(map, &key);
+ /* Do not mark ptr as non-null if one of them is
+ * PTR_TO_BTF_ID (R9), reject because of invalid
+ * access to map value (R8).
+ *
+ * Here, we need to inline those insns to access
+ * R8 directly, since compiler may use other reg
+ * once it figures out val==inner_map.
+ */
+ asm volatile("r8 = %[val];\n"
+ "r9 = %[inner_map];\n"
+ "if r8 != r9 goto +1;\n"
+ "%[ret] = *(u64 *)(r8 +0);\n"
+ : [ret] "+r"(ret)
+ : [inner_map] "r"(inner_map), [val] "r"(val)
+ : "r8", "r9");
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
new file mode 100644
index 000000000000..82190d79de37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+static struct prog_test_ref_kfunc __kptr *v;
+long total_sum = -1;
+
+SEC("tc")
+int test_jit_probe_mem(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long zero = 0, sum;
+
+ p = bpf_kfunc_call_test_acquire(&zero);
+ if (!p)
+ return 1;
+
+ p = bpf_kptr_xchg(&v, p);
+ if (p)
+ goto release_out;
+
+ /* Direct map value access of kptr, should be PTR_UNTRUSTED */
+ p = v;
+ if (!p)
+ return 1;
+
+ asm volatile (
+ "r9 = %[p];"
+ "%[sum] = 0;"
+
+ /* r8 = p->a */
+ "r8 = *(u32 *)(r9 + 0);"
+ "%[sum] += r8;"
+
+ /* r8 = p->b */
+ "r8 = *(u32 *)(r9 + 4);"
+ "%[sum] += r8;"
+
+ "r9 += 8;"
+ /* r9 = p->a */
+ "r9 = *(u32 *)(r9 - 8);"
+ "%[sum] += r9;"
+
+ : [sum] "=r"(sum)
+ : [p] "r"(p)
+ : "r8", "r9"
+ );
+
+ total_sum = sum;
+ return 0;
+release_out:
+ bpf_kfunc_call_test_release(p);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index a46a264ce24e..7236da72ce80 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -9,8 +9,8 @@
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
#define _(P) (__builtin_preserve_access_index(P))
@@ -109,10 +109,10 @@ int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
return 0;
}
-static volatile struct {
+struct {
bool fentry_test_ok;
bool fexit_test_ok;
-} result;
+} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
new file mode 100644
index 000000000000..b9670e9a6e3d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_destructive_test(void)
+{
+ bpf_kfunc_call_test_destructive();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
new file mode 100644
index 000000000000..a1963497f0bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct syscall_test_args {
+ __u8 data[16];
+ size_t size;
+};
+
+SEC("?syscall")
+int kfunc_syscall_test_fail(struct syscall_test_args *args)
+{
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args) + 1);
+
+ return 0;
+}
+
+SEC("?syscall")
+int kfunc_syscall_test_null_fail(struct syscall_test_args *args)
+{
+ /* Must be called with args as a NULL pointer
+ * we do not check for it to have the verifier consider that
+ * the pointer might not be null, and so we can load it.
+ *
+ * So the following can not be added:
+ *
+ * if (args)
+ * return -22;
+ */
+
+ bpf_kfunc_call_test_mem_len_pass1(args, sizeof(*args));
+
+ return 0;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_rdonly(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ p[0] = 42; /* this is a read-only buffer, so -EACCES */
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_use_after_free(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
+ if (p) {
+ p[0] = 42;
+ ret = p[1]; /* 108 */
+ } else {
+ ret = -1;
+ }
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ if (p)
+ ret = p[0]; /* p is not valid anymore */
+
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_oob(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[2 * sizeof(int)]; /* oob access, so -EACCES */
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+int not_const_size = 2 * sizeof(int);
+
+SEC("?tc")
+int kfunc_call_test_get_mem_fail_not_const(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, not_const_size); /* non const size, -EINVAL */
+ if (p)
+ ret = p[0];
+ else
+ ret = -1;
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ /* we are failing on this one, because we are not acquiring a PTR_TO_BTF_ID (a struct ptr) */
+ p = bpf_kfunc_call_test_acq_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[0];
+ else
+ ret = -1;
+
+ bpf_kfunc_call_int_mem_release(p);
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("?tc")
+int kfunc_call_test_pointer_arg_type_mismatch(struct __sk_buff *skb)
+{
+ bpf_kfunc_call_test_pass_ctx((void *)10);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
new file mode 100644
index 000000000000..48f64827cd93
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_call_fail(struct __sk_buff *ctx)
+{
+ bpf_testmod_test_mod_kfunc(0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
new file mode 100644
index 000000000000..8b86113a0126
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_call_test4(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ long tmp;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ tmp = bpf_kfunc_call_test4(-3, -30, -200, -1000);
+ return (tmp >> 32) + tmp;
+}
+
+SEC("tc")
+int kfunc_call_test2(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
+}
+
+SEC("tc")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ __u64 a = 1ULL << 32;
+ __u32 ret;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
+ ret = a >> 32; /* ret should be 2 */
+ ret += (__u32)a; /* ret should be 12 */
+
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_ref_btf_id(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ if (pt->a != 42 || pt->b != 108)
+ ret = -1;
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_pass(struct __sk_buff *skb)
+{
+ struct prog_test_pass1 p1 = {};
+ struct prog_test_pass2 p2 = {};
+ short a = 0;
+ __u64 b = 0;
+ long c = 0;
+ char d = 0;
+ int e = 0;
+
+ bpf_kfunc_call_test_pass_ctx(skb);
+ bpf_kfunc_call_test_pass1(&p1);
+ bpf_kfunc_call_test_pass2(&p2);
+
+ bpf_kfunc_call_test_mem_len_pass1(&a, sizeof(a));
+ bpf_kfunc_call_test_mem_len_pass1(&b, sizeof(b));
+ bpf_kfunc_call_test_mem_len_pass1(&c, sizeof(c));
+ bpf_kfunc_call_test_mem_len_pass1(&d, sizeof(d));
+ bpf_kfunc_call_test_mem_len_pass1(&e, sizeof(e));
+ bpf_kfunc_call_test_mem_len_fail2(&b, -1);
+
+ return 0;
+}
+
+struct syscall_test_args {
+ __u8 data[16];
+ size_t size;
+};
+
+SEC("syscall")
+int kfunc_syscall_test(struct syscall_test_args *args)
+{
+ const long size = args->size;
+
+ if (size > sizeof(args->data))
+ return -7; /* -E2BIG */
+
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(args->data));
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, sizeof(*args));
+ bpf_kfunc_call_test_mem_len_pass1(&args->data, size);
+
+ return 0;
+}
+
+SEC("syscall")
+int kfunc_syscall_test_null(struct syscall_test_args *args)
+{
+ /* Must be called with args as a NULL pointer
+ * we do not check for it to have the verifier consider that
+ * the pointer might not be null, and so we can load it.
+ *
+ * So the following can not be added:
+ *
+ * if (args)
+ * return -22;
+ */
+
+ bpf_kfunc_call_test_mem_len_pass1(args, 0);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_call_test_get_mem(struct __sk_buff *skb)
+{
+ struct prog_test_ref_kfunc *pt;
+ unsigned long s = 0;
+ int *p = NULL;
+ int ret = 0;
+
+ pt = bpf_kfunc_call_test_acquire(&s);
+ if (pt) {
+ p = bpf_kfunc_call_test_get_rdwr_mem(pt, 2 * sizeof(int));
+ if (p) {
+ p[0] = 42;
+ ret = p[1]; /* 108 */
+ } else {
+ ret = -1;
+ }
+
+ if (ret >= 0) {
+ p = bpf_kfunc_call_test_get_rdonly_mem(pt, 2 * sizeof(int));
+ if (p)
+ ret = p[0]; /* 42 */
+ else
+ ret = -1;
+ }
+
+ bpf_kfunc_call_test_release(pt);
+ }
+ return ret;
+}
+
+SEC("tc")
+int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
+{
+
+ u32 expected = 5, actual;
+
+ actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
+ return actual != expected ? -1 : 0;
+}
+
+struct ctx_val {
+ struct bpf_testmod_ctx __kptr *ctx;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct ctx_val);
+} ctx_map SEC(".maps");
+
+SEC("tc")
+int kfunc_call_ctx(struct __sk_buff *skb)
+{
+ struct bpf_testmod_ctx *ctx;
+ int err = 0;
+
+ ctx = bpf_testmod_ctx_create(&err);
+ if (!ctx && !err)
+ err = -1;
+ if (ctx) {
+ int key = 0;
+ struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key);
+
+ /* Transfer ctx to map to be freed via implicit dtor call
+ * on cleanup.
+ */
+ if (ctx_val)
+ ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx);
+ if (ctx) {
+ bpf_testmod_ctx_release(ctx);
+ err = -1;
+ }
+ }
+ return err;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
new file mode 100644
index 000000000000..8e150e85b50d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+extern const int bpf_prog_active __ksym;
+int active_res = -1;
+int sk_state_res = -1;
+
+int __noinline f1(struct __sk_buff *skb)
+{
+ struct bpf_sock *sk = skb->sk;
+ int *active;
+
+ if (!sk)
+ return -1;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk)
+ return -1;
+
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
+ bpf_get_smp_processor_id());
+ if (active)
+ active_res = *active;
+
+ sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
+
+ return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
+}
+
+SEC("tc")
+int kfunc_call_test1(struct __sk_buff *skb)
+{
+ return f1(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_module_order.c b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
new file mode 100644
index 000000000000..76003d04c95f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kfunc_module_order.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+extern int bpf_test_modorder_retx(void) __ksym;
+extern int bpf_test_modorder_rety(void) __ksym;
+
+SEC("classifier")
+int call_kfunc_xy(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_retx();
+ ret2 = bpf_test_modorder_rety();
+
+ return ret1 == 'x' && ret2 == 'y' ? 0 : -1;
+}
+
+SEC("classifier")
+int call_kfunc_yx(struct __sk_buff *skb)
+{
+ int ret1, ret2;
+
+ ret1 = bpf_test_modorder_rety();
+ ret2 = bpf_test_modorder_retx();
+
+ return ret1 == 'y' && ret2 == 'x' ? 0 : -1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
new file mode 100644
index 000000000000..b9c8f9457492
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define SLAB_NAME_MAX 32
+
+struct kmem_cache_result {
+ char name[SLAB_NAME_MAX];
+ long obj_size;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(void *));
+ __uint(value_size, SLAB_NAME_MAX);
+ __uint(max_entries, 1);
+} slab_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct kmem_cache_result));
+ __uint(max_entries, 1024);
+} slab_result SEC(".maps");
+
+extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
+
+/* Result, will be checked by userspace */
+int task_struct_found;
+int kmem_cache_seen;
+int open_coded_seen;
+
+SEC("iter/kmem_cache")
+int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct kmem_cache *s = ctx->s;
+ struct kmem_cache_result *r;
+ int idx;
+
+ if (s) {
+ /* To make sure if the slab_iter implements the seq interface
+ * properly and it's also useful for debugging.
+ */
+ BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size);
+
+ idx = kmem_cache_seen;
+ r = bpf_map_lookup_elem(&slab_result, &idx);
+ if (r == NULL)
+ return 0;
+
+ kmem_cache_seen++;
+
+ /* Save name and size to match /proc/slabinfo */
+ bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name);
+ r->obj_size = s->size;
+
+ if (!bpf_strncmp(r->name, 11, "task_struct"))
+ bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST);
+ }
+
+ return 0;
+}
+
+SEC("raw_tp/bpf_test_finish")
+int BPF_PROG(check_task_struct)
+{
+ u64 curr = bpf_get_current_task();
+ struct kmem_cache *s;
+ char *name;
+
+ s = bpf_get_kmem_cache(curr);
+ if (s == NULL) {
+ task_struct_found = -1;
+ return 0;
+ }
+ name = bpf_map_lookup_elem(&slab_hash, &s);
+ if (name && !bpf_strncmp(name, 11, "task_struct"))
+ task_struct_found = 1;
+ else
+ task_struct_found = -2;
+ return 0;
+}
+
+SEC("syscall")
+int open_coded_iter(const void *ctx)
+{
+ struct kmem_cache *s;
+
+ bpf_for_each(kmem_cache, s) {
+ struct kmem_cache_result *r;
+
+ r = bpf_map_lookup_elem(&slab_result, &open_coded_seen);
+ if (!r)
+ break;
+
+ if (r->obj_size != s->size)
+ break;
+
+ open_coded_seen++;
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
new file mode 100644
index 000000000000..9e1ca8e34913
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern const void bpf_fentry_test1 __ksym;
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+extern const void bpf_fentry_test5 __ksym;
+extern const void bpf_fentry_test6 __ksym;
+extern const void bpf_fentry_test7 __ksym;
+extern const void bpf_fentry_test8 __ksym;
+
+int pid = 0;
+bool test_cookie = false;
+
+__u64 kprobe_test1_result = 0;
+__u64 kprobe_test2_result = 0;
+__u64 kprobe_test3_result = 0;
+__u64 kprobe_test4_result = 0;
+__u64 kprobe_test5_result = 0;
+__u64 kprobe_test6_result = 0;
+__u64 kprobe_test7_result = 0;
+__u64 kprobe_test8_result = 0;
+
+__u64 kretprobe_test1_result = 0;
+__u64 kretprobe_test2_result = 0;
+__u64 kretprobe_test3_result = 0;
+__u64 kretprobe_test4_result = 0;
+__u64 kretprobe_test5_result = 0;
+__u64 kretprobe_test6_result = 0;
+__u64 kretprobe_test7_result = 0;
+__u64 kretprobe_test8_result = 0;
+
+static void kprobe_multi_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
+ __u64 addr = bpf_get_func_ip(ctx);
+
+#define SET(__var, __addr, __cookie) ({ \
+ if (((const void *) addr == __addr) && \
+ (!test_cookie || (cookie == __cookie))) \
+ __var = 1; \
+})
+
+ if (is_return) {
+ SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
+ SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
+ SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
+ SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
+ SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
+ SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
+ SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
+ SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
+ } else {
+ SET(kprobe_test1_result, &bpf_fentry_test1, 1);
+ SET(kprobe_test2_result, &bpf_fentry_test2, 7);
+ SET(kprobe_test3_result, &bpf_fentry_test3, 2);
+ SET(kprobe_test4_result, &bpf_fentry_test4, 3);
+ SET(kprobe_test5_result, &bpf_fentry_test5, 4);
+ SET(kprobe_test6_result, &bpf_fentry_test6, 5);
+ SET(kprobe_test7_result, &bpf_fentry_test7, 6);
+ SET(kprobe_test8_result, &bpf_fentry_test8, 8);
+ }
+
+#undef SET
+}
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_tes??")
+int test_kprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi/bpf_fentry_test*")
+int test_kretprobe(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
+
+SEC("kprobe.multi")
+int test_kprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
+
+extern const void bpf_testmod_fentry_test1 __ksym;
+extern const void bpf_testmod_fentry_test2 __ksym;
+extern const void bpf_testmod_fentry_test3 __ksym;
+
+__u64 kprobe_testmod_test1_result = 0;
+__u64 kprobe_testmod_test2_result = 0;
+__u64 kprobe_testmod_test3_result = 0;
+
+__u64 kretprobe_testmod_test1_result = 0;
+__u64 kretprobe_testmod_test2_result = 0;
+__u64 kretprobe_testmod_test3_result = 0;
+
+static void kprobe_multi_testmod_check(void *ctx, bool is_return)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return;
+
+ __u64 addr = bpf_get_func_ip(ctx);
+
+ if (is_return) {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kretprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kretprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kretprobe_testmod_test3_result = 1;
+ } else {
+ if ((const void *) addr == &bpf_testmod_fentry_test1)
+ kprobe_testmod_test1_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test2)
+ kprobe_testmod_test2_result = 1;
+ if ((const void *) addr == &bpf_testmod_fentry_test3)
+ kprobe_testmod_test3_result = 1;
+ }
+}
+
+SEC("kprobe.multi")
+int test_kprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_testmod(struct pt_regs *ctx)
+{
+ kprobe_multi_testmod_check(ctx, true);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
new file mode 100644
index 000000000000..e76e499aca39
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi/")
+int test_kprobe_empty(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_override.c b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
new file mode 100644
index 000000000000..28f8487c9059
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_override.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi")
+int test_override(struct pt_regs *ctx)
+{
+ bpf_override_return(ctx, 123);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
new file mode 100644
index 000000000000..bd8b7fb7061e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern const void bpf_fentry_test1 __ksym;
+extern const void bpf_fentry_test2 __ksym;
+extern const void bpf_fentry_test3 __ksym;
+extern const void bpf_fentry_test4 __ksym;
+extern const void bpf_fentry_test5 __ksym;
+extern const void bpf_fentry_test6 __ksym;
+extern const void bpf_fentry_test7 __ksym;
+extern const void bpf_fentry_test8 __ksym;
+
+int pid = 0;
+
+__u64 kprobe_session_result[8];
+
+static int session_check(void *ctx)
+{
+ unsigned int i;
+ __u64 addr;
+ const void *kfuncs[] = {
+ &bpf_fentry_test1,
+ &bpf_fentry_test2,
+ &bpf_fentry_test3,
+ &bpf_fentry_test4,
+ &bpf_fentry_test5,
+ &bpf_fentry_test6,
+ &bpf_fentry_test7,
+ &bpf_fentry_test8,
+ };
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ addr = bpf_get_func_ip(ctx);
+
+ for (i = 0; i < ARRAY_SIZE(kfuncs); i++) {
+ if (kfuncs[i] == (void *) addr) {
+ kprobe_session_result[i]++;
+ break;
+ }
+ }
+
+ /*
+ * Force probes for function bpf_fentry_test[5-8] not to
+ * install and execute the return probe
+ */
+ if (((const void *) addr == &bpf_fentry_test5) ||
+ ((const void *) addr == &bpf_fentry_test6) ||
+ ((const void *) addr == &bpf_fentry_test7) ||
+ ((const void *) addr == &bpf_fentry_test8))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.session/bpf_fentry_test*")
+int test_kprobe(struct pt_regs *ctx)
+{
+ return session_check(ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
new file mode 100644
index 000000000000..0835b5edf685
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+
+__u64 test_kprobe_1_result = 0;
+__u64 test_kprobe_2_result = 0;
+__u64 test_kprobe_3_result = 0;
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+static int check_cookie(__u64 val, __u64 *result)
+{
+ __u64 *cookie;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ cookie = bpf_session_cookie();
+
+ if (bpf_session_is_return())
+ *result = *cookie == val ? val : 0;
+ else
+ *cookie = val;
+ return 0;
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_1(struct pt_regs *ctx)
+{
+ return check_cookie(1, &test_kprobe_1_result);
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_2(struct pt_regs *ctx)
+{
+ return check_cookie(2, &test_kprobe_2_result);
+}
+
+SEC("kprobe.session/bpf_fentry_test1")
+int test_kprobe_3(struct pt_regs *ctx)
+{
+ return check_cookie(3, &test_kprobe_3_result);
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
new file mode 100644
index 000000000000..288577e81deb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_0(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.session")
+__success
+int kprobe_session_return_1(struct pt_regs *ctx)
+{
+ return 1;
+}
+
+SEC("kprobe.session")
+__failure
+__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
+int kprobe_session_return_2(struct pt_regs *ctx)
+{
+ return 2;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
new file mode 100644
index 000000000000..f77aef0474d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86)
+SEC("kprobe")
+int kprobe_write_ctx(struct pt_regs *ctx)
+{
+ ctx->ax = 0;
+ return 0;
+}
+
+SEC("kprobe.multi")
+int kprobe_multi_write_ctx(struct pt_regs *ctx)
+{
+ ctx->ax = 0;
+ return 0;
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
new file mode 100644
index 000000000000..2414ac20b6d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bin_data {
+ char blob[32];
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(kptr) struct bin_data __kptr * ptr;
+
+SEC("tc")
+__naked int kptr_xchg_inline(void)
+{
+ asm volatile (
+ "r1 = %[ptr] ll;"
+ "r2 = 0;"
+ "call %[bpf_kptr_xchg];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r0;"
+ "r2 = 0;"
+ "call %[bpf_obj_drop_impl];"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(ptr),
+ __imm(bpf_kptr_xchg),
+ __imm(bpf_obj_drop_impl)
+ : __clobber_all
+ );
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __btf_root(void)
+{
+ bpf_obj_drop(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/ksym_race.c b/tools/testing/selftests/bpf/progs/ksym_race.c
new file mode 100644
index 000000000000..def97f2fed90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/ksym_race.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+extern int bpf_testmod_ksym_percpu __ksym;
+
+SEC("tc")
+int ksym_fail(struct __sk_buff *ctx)
+{
+ return *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
new file mode 100644
index 000000000000..049a1f78de3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+/* weak and shared between two files */
+const volatile __u32 my_tid __weak;
+long syscall_id __weak;
+
+int output_val1;
+int output_ctx1;
+int output_weak1;
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 1;
+}
+
+/* Global functions can't be void */
+int set_output_val1(int x)
+{
+ output_val1 = x + subprog(x);
+ return x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx1(__u64 *ctx)
+{
+ output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should win because it's the first one */
+__weak int set_output_weak(int x)
+{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ output_weak1 = x;
+ return x;
+}
+
+extern int set_output_val2(int x);
+
+/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx2(__u64 *ctx);
+
+void *bpf_cast_to_kern_ctx(void *obj) __ksym;
+
+SEC("?raw_tp/sys_enter")
+int BPF_PROG(handler1, struct pt_regs *regs, long id)
+{
+ static volatile int whatever;
+
+ if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ set_output_val2(1000);
+ set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+/* Generate BTF FUNC record and test linking with duplicate extern functions */
+void kfunc_gen1(void)
+{
+ bpf_cast_to_kern_ctx(0);
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
new file mode 100644
index 000000000000..96850759fd8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+/* weak and shared between both files */
+const volatile int my_tid __weak;
+long syscall_id __weak;
+
+int output_val2;
+int output_ctx2;
+int output_weak2; /* should stay zero */
+
+/* same "subprog" name in all files, but it's ok because they all are static */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+/* Global functions can't be void */
+int set_output_val2(int x)
+{
+ output_val2 = 2 * x + 2 * subprog(x);
+ return 2 * x;
+}
+
+/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+ * context and accesses syscall id (second argument). So we mark it as
+ * __hidden, so that libbpf will mark it as static in the final object file,
+ * right before verifying it in the kernel.
+ *
+ * But we don't mark it as __hidden here, rather at extern site. __hidden is
+ * "contaminating" visibility, so it will get propagated from either extern or
+ * actual definition (including from the losing __weak definition).
+ */
+void set_output_ctx2(__u64 *ctx)
+{
+ output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
+}
+
+/* this weak instance should lose, because it will be processed second */
+__weak int set_output_weak(int x)
+{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = 2 * bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ output_weak2 = x;
+ return 2 * x;
+}
+
+extern int set_output_val1(int x);
+
+/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
+__hidden extern void set_output_ctx1(__u64 *ctx);
+
+void *bpf_cast_to_kern_ctx(void *obj) __ksym;
+
+SEC("?raw_tp/sys_enter")
+int BPF_PROG(handler2, struct pt_regs *regs, long id)
+{
+ static volatile int whatever;
+
+ if (my_tid != (s32)bpf_get_current_pid_tgid() || id != syscall_id)
+ return 0;
+
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+ __sink(whatever);
+
+ set_output_val1(2000);
+ set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
+
+ /* keep input value the same across both files to avoid dependency on
+ * handler call order; differentiate by output_weak1 vs output_weak2.
+ */
+ set_output_weak(42);
+
+ return 0;
+}
+
+/* Generate BTF FUNC record and test linking with duplicate extern functions */
+void kfunc_gen2(void)
+{
+ bpf_cast_to_kern_ctx(0);
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
new file mode 100644
index 000000000000..421f40835acd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+#include "linked_list.h"
+
+struct head_nested_inner {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct head_nested {
+ int dummy;
+ struct head_nested_inner inner;
+};
+
+private(C) struct bpf_spin_lock glock_c;
+private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
+private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
+
+private(D) struct head_nested ghead_nested;
+
+static __always_inline
+int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ bpf_obj_drop(f);
+ return 3;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ bpf_obj_drop(f);
+ return 4;
+ }
+
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+ if (leave_in_map)
+ return 0;
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 13;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 7;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 13) {
+ bpf_obj_drop(f);
+ return 8;
+ }
+ bpf_obj_drop(f);
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 9;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 10;
+ }
+ return 0;
+}
+
+
+static __always_inline
+int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct foo *f[200], *pf;
+ int i;
+
+ /* Loop following this check adds nodes 2-at-a-time in order to
+ * validate multiple release_on_unlock release logic
+ */
+ if (ARRAY_SIZE(f) % 2)
+ return 10;
+
+ for (i = 0; i < ARRAY_SIZE(f); i += 2) {
+ f[i] = bpf_obj_new(typeof(**f));
+ if (!f[i])
+ return 2;
+ f[i]->data = i;
+
+ f[i + 1] = bpf_obj_new(typeof(**f));
+ if (!f[i + 1]) {
+ bpf_obj_drop(f[i]);
+ return 9;
+ }
+ f[i + 1]->data = i + 1;
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &f[i]->node2);
+ bpf_list_push_front(head, &f[i + 1]->node2);
+ bpf_spin_unlock(lock);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 3;
+ pf = container_of(n, struct foo, node2);
+ if (pf->data != (ARRAY_SIZE(f) - i - 1)) {
+ bpf_obj_drop(pf);
+ return 4;
+ }
+ bpf_spin_lock(lock);
+ bpf_list_push_back(head, &pf->node2);
+ bpf_spin_unlock(lock);
+ }
+
+ if (leave_in_map)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 5;
+ pf = container_of(n, struct foo, node2);
+ if (pf->data != i) {
+ bpf_obj_drop(pf);
+ return 6;
+ }
+ bpf_obj_drop(pf);
+ }
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_back(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 7;
+ }
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (n) {
+ bpf_obj_drop(container_of(n, struct foo, node2));
+ return 8;
+ }
+ return 0;
+}
+
+static __always_inline
+int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
+{
+ struct bpf_list_node *n;
+ struct bar *ba[8], *b;
+ struct foo *f;
+ int i;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 2;
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 3;
+ }
+ b->data = i;
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_back(&f->head, &b->node);
+ bpf_spin_unlock(&f->lock);
+ }
+
+ bpf_spin_lock(lock);
+ f->data = 42;
+ bpf_list_push_front(head, &f->node2);
+ bpf_spin_unlock(lock);
+
+ if (leave_in_map)
+ return 0;
+
+ bpf_spin_lock(lock);
+ n = bpf_list_pop_front(head);
+ bpf_spin_unlock(lock);
+ if (!n)
+ return 4;
+ f = container_of(n, struct foo, node2);
+ if (f->data != 42) {
+ bpf_obj_drop(f);
+ return 5;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ba); i++) {
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (!n) {
+ bpf_obj_drop(f);
+ return 6;
+ }
+ b = container_of(n, struct bar, node);
+ if (b->data != i) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(b);
+ return 7;
+ }
+ bpf_obj_drop(b);
+ }
+ bpf_spin_lock(&f->lock);
+ n = bpf_list_pop_front(&f->head);
+ bpf_spin_unlock(&f->lock);
+ if (n) {
+ bpf_obj_drop(f);
+ bpf_obj_drop(container_of(n, struct bar, node));
+ return 8;
+ }
+ bpf_obj_drop(f);
+ return 0;
+}
+
+static __always_inline
+int test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop(lock, head, false);
+ if (ret)
+ return ret;
+ return list_push_pop(lock, head, true);
+}
+
+static __always_inline
+int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(lock, head, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(lock, head, true);
+}
+
+static __always_inline
+int test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head)
+{
+ int ret;
+
+ ret = list_in_list(lock, head, false);
+ if (ret)
+ return ret;
+ return list_in_list(lock, head, true);
+}
+
+SEC("tc")
+int map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop(void *ctx)
+{
+ return test_list_push_pop(&glock, &ghead);
+}
+
+SEC("tc")
+int global_list_push_pop_nested(void *ctx)
+{
+ return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
+}
+
+SEC("tc")
+int global_list_array_push_pop(void *ctx)
+{
+ int r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[0]);
+ if (r)
+ return r;
+
+ r = test_list_push_pop(&glock_c, &ghead_array[1]);
+ if (r)
+ return r;
+
+ /* Arrays with only one element is a special case, being treated
+ * just like a bpf_list_head variable by the verifier, not an
+ * array.
+ */
+ return test_list_push_pop(&glock_c, &ghead_array_one[0]);
+}
+
+SEC("tc")
+int map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_push_pop_multiple(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_push_pop_multiple(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_push_pop_multiple(void *ctx)
+{
+ int ret;
+
+ ret = list_push_pop_multiple(&glock, &ghead, false);
+ if (ret)
+ return ret;
+ return list_push_pop_multiple(&glock, &ghead, true);
+}
+
+SEC("tc")
+int map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+
+ v = bpf_map_lookup_elem(&array_map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int inner_map_list_in_list(void *ctx)
+{
+ struct map_value *v;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
+ if (!map)
+ return 1;
+ v = bpf_map_lookup_elem(map, &(int){0});
+ if (!v)
+ return 1;
+ return test_list_in_list(&v->lock, &v->head);
+}
+
+SEC("tc")
+int global_list_in_list(void *ctx)
+{
+ return test_list_in_list(&glock, &ghead);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h
new file mode 100644
index 000000000000..c0f3609a7ffa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef LINKED_LIST_H
+#define LINKED_LIST_H
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct bar {
+ struct bpf_list_node node;
+ int data;
+};
+
+struct foo {
+ struct bpf_list_node node;
+ struct bpf_list_head head __contains(bar, node);
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_node node2;
+};
+
+struct map_value {
+ struct bpf_spin_lock lock;
+ int data;
+ struct bpf_list_head head __contains(foo, node2);
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+};
+
+struct array_map array_map SEC(".maps");
+struct array_map inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_list_head ghead __contains(foo, node2);
+private(B) struct bpf_spin_lock glock2;
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
new file mode 100644
index 000000000000..ddd26d1a083f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+#include "linked_list.h"
+
+#define INIT \
+ struct map_value *v, *v2, *iv, *iv2; \
+ struct foo *f, *f1, *f2; \
+ struct bar *b; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &(int){ 0 }); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v) \
+ return 0; \
+ v2 = bpf_map_lookup_elem(&array_map, &(int){ 0 }); \
+ if (!v2) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv) \
+ return 0; \
+ iv2 = bpf_map_lookup_elem(map, &(int){ 0 }); \
+ if (!iv2) \
+ return 0; \
+ f = bpf_obj_new(typeof(*f)); \
+ if (!f) \
+ return 0; \
+ f1 = f; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ b = bpf_obj_new(typeof(*b)); \
+ if (!b) { \
+ bpf_obj_drop(f2); \
+ bpf_obj_drop(f1); \
+ return 0; \
+ }
+
+#define CHECK(test, op, hexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ p(hexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, pop_front, &f->head);
+CHECK(kptr, pop_back, &f->head);
+
+CHECK(global, pop_front, &ghead);
+CHECK(global, pop_back, &ghead);
+
+CHECK(map, pop_front, &v->head);
+CHECK(map, pop_back, &v->head);
+
+CHECK(inner_map, pop_front, &iv->head);
+CHECK(inner_map, pop_back, &iv->head);
+
+#undef CHECK
+
+#define CHECK(test, op, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_missing_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ bpf_list_##op(hexpr, nexpr); \
+ return 0; \
+ }
+
+CHECK(kptr, push_front, &f->head, &b->node);
+CHECK(kptr, push_back, &f->head, &b->node);
+
+CHECK(global, push_front, &ghead, &f->node2);
+CHECK(global, push_back, &ghead, &f->node2);
+
+CHECK(map, push_front, &v->head, &f->node2);
+CHECK(map, push_back, &v->head, &f->node2);
+
+CHECK(inner_map, push_front, &iv->head, &f->node2);
+CHECK(inner_map, push_back, &iv->head, &f->node2);
+
+#undef CHECK
+
+#define CHECK(test, op, lexpr, hexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ void (*p)(void *) = (void *)&bpf_list_##op; \
+ bpf_spin_lock(lexpr); \
+ p(hexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head); \
+ CHECK(kptr_global, op, &f1->lock, &ghead); \
+ CHECK(kptr_map, op, &f1->lock, &v->head); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head); \
+ \
+ CHECK(global_global, op, &glock2, &ghead); \
+ CHECK(global_kptr, op, &glock, &f1->head); \
+ CHECK(global_map, op, &glock, &v->head); \
+ CHECK(global_inner_map, op, &glock, &iv->head); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head); \
+ CHECK(map_kptr, op, &v->lock, &f2->head); \
+ CHECK(map_global, op, &v->lock, &ghead); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head); \
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head);
+
+CHECK_OP(pop_front);
+CHECK_OP(pop_back);
+
+#undef CHECK
+#undef CHECK_OP
+
+#define CHECK(test, op, lexpr, hexpr, nexpr) \
+ SEC("?tc") \
+ int test##_incorrect_lock_##op(void *ctx) \
+ { \
+ INIT; \
+ bpf_spin_lock(lexpr); \
+ bpf_list_##op(hexpr, nexpr); \
+ return 0; \
+ }
+
+#define CHECK_OP(op) \
+ CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \
+ CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \
+ CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \
+ CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \
+ \
+ CHECK(global_global, op, &glock2, &ghead, &f->node2); \
+ CHECK(global_kptr, op, &glock, &f1->head, &b->node); \
+ CHECK(global_map, op, &glock, &v->head, &f->node2); \
+ CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \
+ \
+ CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \
+ CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \
+ CHECK(map_global, op, &v->lock, &ghead, &f->node2); \
+ CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \
+ \
+ CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\
+ CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \
+ CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \
+ CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2);
+
+CHECK_OP(push_front);
+CHECK_OP(push_back);
+
+#undef CHECK
+#undef CHECK_OP
+#undef INIT
+
+SEC("?kprobe/xyz")
+int map_compat_kprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?kretprobe/xyz")
+int map_compat_kretprobe(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tracepoint/xyz")
+int map_compat_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?perf_event")
+int map_compat_perf(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp/xyz")
+int map_compat_raw_tp(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?raw_tp.w/xyz")
+int map_compat_raw_tp_w(void *ctx)
+{
+ bpf_list_push_front(&ghead, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_type_id_oor(void *ctx)
+{
+ bpf_obj_new_impl(~0UL, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_composite(void *ctx)
+{
+ bpf_obj_new_impl(bpf_core_type_id_local(int), (void *)42);
+ return 0;
+}
+
+SEC("?tc")
+int obj_new_no_struct(void *ctx)
+{
+ (void)bpf_obj_new(union { int data; unsigned udata; });
+ return 0;
+}
+
+SEC("?tc")
+int obj_drop_non_zero_off(void *ctx)
+{
+ void *f;
+
+ f = bpf_obj_new(struct foo);
+ if (!f)
+ return 0;
+ bpf_obj_drop(f+1);
+ return 0;
+}
+
+SEC("?tc")
+int new_null_ret(void *ctx)
+{
+ return bpf_obj_new(struct foo)->data;
+}
+
+SEC("?tc")
+int obj_new_acq(void *ctx)
+{
+ (void)bpf_obj_new(struct foo);
+ return 0;
+}
+
+SEC("?tc")
+int use_after_drop(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_obj_drop(f);
+ return f->data;
+}
+
+SEC("?tc")
+int ptr_walk_scalar(void *ctx)
+{
+ struct test1 {
+ struct test2 {
+ struct test2 *next;
+ } *ptr;
+ } *p;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_this_cpu_ptr(p->ptr);
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->lock;
+}
+
+SEC("?tc")
+int direct_write_lock(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->lock = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->head;
+}
+
+SEC("?tc")
+int direct_write_head(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->head = 0;
+ return 0;
+}
+
+SEC("?tc")
+int direct_read_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ return *(int *)&f->node2;
+}
+
+SEC("?tc")
+int direct_write_node(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ *(int *)&f->node2 = 0;
+ return 0;
+}
+
+static __always_inline
+int use_after_unlock(bool push_front)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ f->data = 42;
+ if (push_front)
+ bpf_list_push_front(&ghead, &f->node2);
+ else
+ bpf_list_push_back(&ghead, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return f->data;
+}
+
+SEC("?tc")
+int use_after_unlock_push_front(void *ctx)
+{
+ return use_after_unlock(true);
+}
+
+SEC("?tc")
+int use_after_unlock_push_back(void *ctx)
+{
+ return use_after_unlock(false);
+}
+
+static __always_inline
+int list_double_add(bool push_front)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ if (push_front) {
+ bpf_list_push_front(&ghead, &f->node2);
+ bpf_list_push_front(&ghead, &f->node2);
+ } else {
+ bpf_list_push_back(&ghead, &f->node2);
+ bpf_list_push_back(&ghead, &f->node2);
+ }
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int double_push_front(void *ctx)
+{
+ return list_double_add(true);
+}
+
+SEC("?tc")
+int double_push_back(void *ctx)
+{
+ return list_double_add(false);
+}
+
+SEC("?tc")
+int no_node_value_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(struct { int data; });
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, p);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_value_type(void *ctx)
+{
+ struct bar *b;
+
+ b = bpf_obj_new(typeof(*b));
+ if (!b)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &b->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_var_off(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off1(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, (void *)&f->node2 + 1);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_node_off2(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(&ghead, &f->node);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int no_head_type(void *ctx)
+{
+ void *p;
+
+ p = bpf_obj_new(typeof(struct { int data; }));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front(p, NULL);
+ bpf_spin_lock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off1(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_var_off2(struct __sk_buff *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off1(void *ctx)
+{
+ struct foo *f;
+ struct bar *b;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ b = bpf_obj_new(typeof(*b));
+ if (!b) {
+ bpf_obj_drop(f);
+ return 0;
+ }
+
+ bpf_spin_lock(&f->lock);
+ bpf_list_push_front((void *)&f->head + 1, &b->node);
+ bpf_spin_unlock(&f->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int incorrect_head_off2(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+
+ bpf_spin_lock(&glock);
+ bpf_list_push_front((void *)&ghead + 1, &f->node2);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+static __always_inline
+int pop_ptr_off(void *(*op)(void *head))
+{
+ struct {
+ struct bpf_list_head head __contains(foo, node2);
+ struct bpf_spin_lock lock;
+ } *p;
+ struct bpf_list_node *n;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 0;
+ bpf_spin_lock(&p->lock);
+ n = op(&p->head);
+ bpf_spin_unlock(&p->lock);
+
+ if (!n)
+ return 0;
+ bpf_spin_lock((void *)n);
+ return 0;
+}
+
+SEC("?tc")
+int pop_front_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_front);
+}
+
+SEC("?tc")
+int pop_back_off(void *ctx)
+{
+ return pop_ptr_off((void *)bpf_list_pop_back);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_list_peek.c b/tools/testing/selftests/bpf/progs/linked_list_peek.c
new file mode 100644
index 000000000000..264e81bfb287
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_list_peek.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ struct bpf_list_node l;
+ int key;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_list_head ghead __contains(node_data, l);
+
+#define list_entry(ptr, type, member) container_of(ptr, type, member)
+#define NR_NODES 16
+
+int zero = 0;
+
+SEC("syscall")
+__retval(0)
+long list_peek(void *ctx)
+{
+ struct bpf_list_node *l_n;
+ struct node_data *n;
+ int i, err = 0;
+
+ bpf_spin_lock(&glock);
+ l_n = bpf_list_front(&ghead);
+ bpf_spin_unlock(&glock);
+ if (l_n)
+ return __LINE__;
+
+ bpf_spin_lock(&glock);
+ l_n = bpf_list_back(&ghead);
+ bpf_spin_unlock(&glock);
+ if (l_n)
+ return __LINE__;
+
+ for (i = zero; i < NR_NODES && can_loop; i++) {
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return __LINE__;
+ n->key = i;
+ bpf_spin_lock(&glock);
+ bpf_list_push_back(&ghead, &n->l);
+ bpf_spin_unlock(&glock);
+ }
+
+ bpf_spin_lock(&glock);
+
+ l_n = bpf_list_front(&ghead);
+ if (!l_n) {
+ err = __LINE__;
+ goto done;
+ }
+
+ n = list_entry(l_n, struct node_data, l);
+ if (n->key != 0) {
+ err = __LINE__;
+ goto done;
+ }
+
+ l_n = bpf_list_back(&ghead);
+ if (!l_n) {
+ err = __LINE__;
+ goto done;
+ }
+
+ n = list_entry(l_n, struct node_data, l);
+ if (n->key != NR_NODES - 1) {
+ err = __LINE__;
+ goto done;
+ }
+
+done:
+ bpf_spin_unlock(&glock);
+ return err;
+}
+
+#define TEST_FB(op, dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_##op##_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_list_node *l_n; \
+ __u64 jiffies = 0; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock); \
+ l_n = bpf_list_##op(&ghead); \
+ if (l_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock); \
+ \
+ return !!jiffies; \
+}
+
+#define MSG "call bpf_list_{{(front|back).+}}; R0{{(_w)?}}=ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_FB(front, true)
+TEST_FB(back, true)
+#undef MSG
+
+#define MSG "bpf_spin_lock at off=0 must be held for bpf_list_head"
+TEST_FB(front, false)
+TEST_FB(back, false)
+#undef MSG
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
new file mode 100644
index 000000000000..00bf1ca95986
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps1.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct my_key { long x; };
+struct my_value { long x; };
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct my_key);
+ __type(value, struct my_value);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+ /* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't
+ * matter.
+ */
+typedef struct {
+ __uint(max_entries, 8);
+ __type(key, int);
+ __type(value, int);
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+} map2_t;
+
+extern map2_t map2 SEC(".maps");
+
+/* This should be the winning map definition, but we have no way of verifying,
+ * so we just make sure that it links and works without errors
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first1;
+int output_second1;
+int output_weak1;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter1)
+{
+ /* update values with key = 1 */
+ int key = 1, val = 1;
+ struct my_key key_struct = { .x = 1 };
+ struct my_value val_struct = { .x = 1000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit1)
+{
+ /* lookup values with key = 2, set in another file */
+ int key = 2, *val;
+ struct my_key key_struct = { .x = 2 };
+ struct my_value *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first1 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second1 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak1 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c
new file mode 100644
index 000000000000..0693687474ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_maps2.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* modifiers and typedefs are ignored when comparing key/value types */
+typedef struct my_key { long x; } key_type;
+typedef struct my_value { long x; } value_type;
+
+extern struct {
+ __uint(max_entries, 16);
+ __type(key, key_type);
+ __type(value, value_type);
+ __uint(type, BPF_MAP_TYPE_HASH);
+} map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 8);
+} map2 SEC(".maps");
+
+/* this definition will lose, but it has to exactly match the winner */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 16);
+} map_weak __weak SEC(".maps");
+
+int output_first2;
+int output_second2;
+int output_weak2;
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler_enter2)
+{
+ /* update values with key = 2 */
+ int key = 2, val = 2;
+ key_type key_struct = { .x = 2 };
+ value_type val_struct = { .x = 2000 };
+
+ bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+ bpf_map_update_elem(&map2, &key, &val, 0);
+ bpf_map_update_elem(&map_weak, &key, &val, 0);
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int BPF_PROG(handler_exit2)
+{
+ /* lookup values with key = 1, set in another file */
+ int key = 1, *val;
+ key_type key_struct = { .x = 1 };
+ value_type *value_struct;
+
+ value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+ if (value_struct)
+ output_first2 = value_struct->x;
+
+ val = bpf_map_lookup_elem(&map2, &key);
+ if (val)
+ output_second2 = *val;
+
+ val = bpf_map_lookup_elem(&map_weak, &key);
+ if (val)
+ output_weak2 = *val;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c
new file mode 100644
index 000000000000..ef9e9d0bb0ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars1.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* this weak extern will be strict due to the other file's strong extern */
+extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
+extern const void bpf_link_fops __ksym __weak;
+
+int input_bss1;
+int input_data1 = 1;
+const volatile int input_rodata1 = 11;
+
+int input_bss_weak __weak;
+/* these two definitions should win */
+int input_data_weak __weak = 10;
+const volatile int input_rodata_weak __weak = 100;
+
+extern int input_bss2;
+extern int input_data2;
+extern const int input_rodata2;
+
+int output_bss1;
+int output_data1;
+int output_rodata1;
+
+long output_sink1;
+
+static __noinline int get_bss_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_bss1 + input_bss2 + input_bss_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler1)
+{
+ output_bss1 = get_bss_res();
+ output_data1 = input_data1 + input_data2 + input_data_weak;
+ output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink1 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&bpf_link_fops;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c
new file mode 100644
index 000000000000..e4f5bd388a3c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/linked_vars2.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern int LINUX_KERNEL_VERSION __kconfig;
+/* when an extern is defined as both strong and weak, resulting symbol will be strong */
+extern bool CONFIG_BPF_SYSCALL __kconfig;
+extern const void __start_BTF __ksym;
+
+int input_bss2;
+int input_data2 = 2;
+const volatile int input_rodata2 = 22;
+
+int input_bss_weak __weak;
+/* these two weak variables should lose */
+int input_data_weak __weak = 20;
+const volatile int input_rodata_weak __weak = 200;
+
+extern int input_bss1;
+extern int input_data1;
+extern const int input_rodata1;
+
+int output_bss2;
+int output_data2;
+int output_rodata2;
+
+int output_sink2;
+
+static __noinline int get_data_res(void)
+{
+ /* just make sure all the relocations work against .text as well */
+ return input_data1 + input_data2 + input_data_weak;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handler2)
+{
+ output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
+ output_data2 = get_data_res();
+ output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
+
+ /* make sure we actually use above special externs, otherwise compiler
+ * will optimize them out
+ */
+ output_sink2 = LINUX_KERNEL_VERSION
+ + CONFIG_BPF_SYSCALL
+ + (long)&__start_BTF;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/livepatch_trampoline.c b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
new file mode 100644
index 000000000000..15579d5bcd91
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int fentry_hit;
+int fexit_hit;
+int my_pid;
+
+SEC("fentry/cmdline_proc_show")
+int BPF_PROG(fentry_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fentry_hit = 1;
+ return 0;
+}
+
+SEC("fexit/cmdline_proc_show")
+int BPF_PROG(fexit_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fexit_hit = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
new file mode 100644
index 000000000000..d736506a4c80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "../bpf_experimental.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct plain_local;
+
+struct node_data {
+ long key;
+ long data;
+ struct plain_local __kptr * stashed_in_local_kptr;
+ struct bpf_rb_node node;
+};
+
+struct refcounted_node {
+ long data;
+ struct bpf_rb_node rb_node;
+ struct bpf_refcount refcount;
+};
+
+struct stash {
+ struct bpf_spin_lock l;
+ struct refcounted_node __kptr *stashed;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct stash);
+ __uint(max_entries, 10);
+} refcounted_node_stash SEC(".maps");
+
+struct plain_local {
+ long key;
+ long data;
+};
+
+struct local_with_root {
+ long key;
+ struct bpf_spin_lock l;
+ struct bpf_rb_root r __contains(node_data, node);
+};
+
+struct map_value {
+ struct prog_test_ref_kfunc *not_kptr;
+ struct prog_test_ref_kfunc __kptr *val;
+ struct node_data __kptr *node;
+ struct plain_local __kptr *plain;
+ struct local_with_root __kptr *local_root;
+};
+
+/* This is necessary so that LLVM generates BTF for node_data struct
+ * If it's not included, a fwd reference for node_data will be generated but
+ * no struct. Example BTF of "node" field in map_value when not included:
+ *
+ * [10] PTR '(anon)' type_id=35
+ * [34] FWD 'node_data' fwd_kind=struct
+ * [35] TYPE_TAG 'kptr_ref' type_id=34
+ *
+ * (with no node_data struct defined)
+ * Had to do the same w/ bpf_kfunc_call_test_release below
+ */
+struct node_data *just_here_because_btf_bug;
+struct refcounted_node *just_here_because_btf_bug2;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} some_nodes SEC(".maps");
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+static int create_and_stash(int idx, int val)
+{
+ struct plain_local *inner_local_kptr;
+ struct map_value *mapval;
+ struct node_data *res;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ inner_local_kptr = bpf_obj_new(typeof(*inner_local_kptr));
+ if (!inner_local_kptr)
+ return 2;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res) {
+ bpf_obj_drop(inner_local_kptr);
+ return 3;
+ }
+ res->key = val;
+
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (inner_local_kptr) {
+ /* Should never happen, we just obj_new'd res */
+ bpf_obj_drop(inner_local_kptr);
+ bpf_obj_drop(res);
+ return 4;
+ }
+
+ res = bpf_kptr_xchg(&mapval->node, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+long stash_rb_nodes(void *ctx)
+{
+ return create_and_stash(0, 41) ?: create_and_stash(1, 42);
+}
+
+SEC("tc")
+long stash_plain(void *ctx)
+{
+ struct map_value *mapval;
+ struct plain_local *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key = 41;
+
+ res = bpf_kptr_xchg(&mapval->plain, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+long stash_local_with_root(void *ctx)
+{
+ struct local_with_root *res;
+ struct map_value *mapval;
+ struct node_data *n;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 2;
+ res->key = 41;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n) {
+ bpf_obj_drop(res);
+ return 3;
+ }
+
+ bpf_spin_lock(&res->l);
+ bpf_rbtree_add(&res->r, &n->node, less);
+ bpf_spin_unlock(&res->l);
+
+ res = bpf_kptr_xchg(&mapval->local_root, res);
+ if (res) {
+ bpf_obj_drop(res);
+ return 4;
+ }
+ return 0;
+}
+
+SEC("tc")
+long unstash_rb_node(void *ctx)
+{
+ struct plain_local *inner_local_kptr = NULL;
+ struct map_value *mapval;
+ struct node_data *res;
+ long retval;
+ int key = 1;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &key);
+ if (!mapval)
+ return 1;
+
+ res = bpf_kptr_xchg(&mapval->node, NULL);
+ if (res) {
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (!inner_local_kptr) {
+ bpf_obj_drop(res);
+ return 1;
+ }
+ bpf_obj_drop(inner_local_kptr);
+
+ retval = res->key;
+ bpf_obj_drop(res);
+ return retval;
+ }
+ return 1;
+}
+
+SEC("tc")
+long stash_test_ref_kfunc(void *ctx)
+{
+ struct prog_test_ref_kfunc *res;
+ struct map_value *mapval;
+ int key = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &key);
+ if (!mapval)
+ return 1;
+
+ res = bpf_kptr_xchg(&mapval->val, NULL);
+ if (res)
+ bpf_kfunc_call_test_release(res);
+ return 0;
+}
+
+SEC("tc")
+long refcount_acquire_without_unstash(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int ret = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &ret);
+ if (!s)
+ return 1;
+
+ if (!s->stashed)
+ /* refcount_acquire failure is expected when no refcounted_node
+ * has been stashed before this program executes
+ */
+ return 2;
+
+ p = bpf_refcount_acquire(s->stashed);
+ if (!p)
+ return 3;
+
+ ret = s->stashed ? s->stashed->data : -1;
+ bpf_obj_drop(p);
+ return ret;
+}
+
+/* Helper for refcount_acquire_without_unstash test */
+SEC("tc")
+long stash_refcounted_node(void *ctx)
+{
+ struct refcounted_node *p;
+ struct stash *s;
+ int key = 0;
+
+ s = bpf_map_lookup_elem(&refcounted_node_stash, &key);
+ if (!s)
+ return 1;
+
+ p = bpf_obj_new(typeof(*p));
+ if (!p)
+ return 2;
+ p->data = 42;
+
+ p = bpf_kptr_xchg(&s->stashed, p);
+ if (p) {
+ bpf_obj_drop(p);
+ return 3;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
new file mode 100644
index 000000000000..fcf7a7567da2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "../bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct node_data2 {
+ long key[4];
+};
+
+/* This is necessary so that LLVM generates BTF for node_data struct
+ * If it's not included, a fwd reference for node_data will be generated but
+ * no struct. Example BTF of "node" field in map_value when not included:
+ *
+ * [10] PTR '(anon)' type_id=35
+ * [34] FWD 'node_data' fwd_kind=struct
+ * [35] TYPE_TAG 'kptr_ref' type_id=34
+ */
+struct node_data *just_here_because_btf_bug;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} some_nodes SEC(".maps");
+
+SEC("tc")
+__failure __msg("invalid kptr access, R2 type=ptr_node_data2 expected=ptr_node_data")
+long stash_rb_nodes(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data2 *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key[0] = 40;
+
+ res = bpf_kptr_xchg(&mapval->node, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("R1 must have zero offset when passed to release func")
+long drop_rb_node_off(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ /* Try releasing with graph node offset */
+ bpf_obj_drop(&res->node);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
index 3e3de130f28f..637e75df2e14 100644
--- a/tools/testing/selftests/bpf/progs/local_storage.c
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -13,14 +13,14 @@ char _license[] SEC("license") = "GPL";
#define DUMMY_STORAGE_VALUE 0xdeadbeef
-int monitored_pid = 0;
+__u32 monitored_pid = 0;
int inode_storage_result = -1;
int sk_storage_result = -1;
+int task_storage_result = -1;
struct local_storage {
struct inode *exec_inode;
__u32 value;
- struct bpf_spin_lock lock;
};
struct {
@@ -38,138 +38,191 @@ struct {
} sk_storage_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
+ __type(key, int);
+ __type(value, struct local_storage);
+} sk_storage_map2 SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct local_storage);
} task_storage_map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct local_storage);
+} task_storage_map2 SEC(".maps");
+
SEC("lsm/inode_unlink")
int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct bpf_local_storage *local_storage;
struct local_storage *storage;
+ struct task_struct *task;
bool is_self_unlink;
- int err;
if (pid != monitored_pid)
return 0;
- storage = bpf_task_storage_get(&task_storage_map,
- bpf_get_current_task_btf(), 0, 0);
- if (storage) {
- /* Don't let an executable delete itself */
- bpf_spin_lock(&storage->lock);
- is_self_unlink = storage->exec_inode == victim->d_inode;
- bpf_spin_unlock(&storage->lock);
- if (is_self_unlink)
- return -EPERM;
- }
-
- storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+
+ task_storage_result = -1;
+
+ storage = bpf_task_storage_get(&task_storage_map, task, 0, 0);
if (!storage)
return 0;
- bpf_spin_lock(&storage->lock);
- if (storage->value != DUMMY_STORAGE_VALUE)
- inode_storage_result = -1;
- bpf_spin_unlock(&storage->lock);
+ /* Don't let an executable delete itself */
+ is_self_unlink = storage->exec_inode == victim->d_inode;
- err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
- if (!err)
- inode_storage_result = err;
+ storage = bpf_task_storage_get(&task_storage_map2, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage || storage->value)
+ return 0;
- return 0;
+ if (bpf_task_storage_delete(&task_storage_map, task))
+ return 0;
+
+ /* Ensure that the task_storage_map is disconnected from the storage.
+ * The storage memory should not be freed back to the
+ * bpf_mem_alloc.
+ */
+ local_storage = task->bpf_storage;
+ if (!local_storage || local_storage->smap)
+ return 0;
+
+ task_storage_result = 0;
+
+ return is_self_unlink ? -EPERM : 0;
}
-SEC("lsm/socket_bind")
-int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
- int addrlen)
+SEC("lsm.s/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
- __u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
int err;
- if (pid != monitored_pid)
- return 0;
+ /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+ * that did not exist before. The helper should be able to handle this
+ * NULL pointer.
+ */
+ bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+ 0, 0);
if (!storage)
return 0;
- bpf_spin_lock(&storage->lock);
if (storage->value != DUMMY_STORAGE_VALUE)
- sk_storage_result = -1;
- bpf_spin_unlock(&storage->lock);
+ inode_storage_result = -1;
- err = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
+ err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
if (!err)
- sk_storage_result = err;
+ inode_storage_result = err;
return 0;
}
-SEC("lsm/socket_post_create")
-int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
- int protocol, int kern)
+SEC("lsm.s/socket_bind")
+int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
+ if (pid != monitored_pid || !sk)
+ return 0;
+
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0);
+ if (!storage)
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
+ sk_storage_result = -1;
+ if (storage->value != DUMMY_STORAGE_VALUE)
+ return 0;
+
+ /* This tests that we can associate multiple elements
+ * with the local storage.
+ */
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
- bpf_spin_lock(&storage->lock);
- storage->value = DUMMY_STORAGE_VALUE;
- bpf_spin_unlock(&storage->lock);
+ if (bpf_sk_storage_delete(&sk_storage_map2, sk))
+ return 0;
+
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+ if (bpf_sk_storage_delete(&sk_storage_map, sk))
+ return 0;
+
+ /* Ensure that the sk_storage_map is disconnected from the storage. */
+ if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap)
+ return 0;
+
+ sk_storage_result = 0;
return 0;
}
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
- return 0;
-
- if (!file->f_inode)
+ if (pid != monitored_pid || !sk)
return 0;
- storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
- bpf_spin_lock(&storage->lock);
storage->value = DUMMY_STORAGE_VALUE;
- bpf_spin_unlock(&storage->lock);
+
return 0;
}
/* This uses the local storage to remember the inode of the binary that a
* process was originally executing.
*/
-SEC("lsm/bprm_committed_creds")
+SEC("lsm.s/bprm_committed_creds")
void BPF_PROG(exec, struct linux_binprm *bprm)
{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ if (pid != monitored_pid)
+ return;
+
storage = bpf_task_storage_get(&task_storage_map,
bpf_get_current_task_btf(), 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (storage) {
- bpf_spin_lock(&storage->lock);
+ if (storage)
storage->exec_inode = bprm->file->f_inode;
- bpf_spin_unlock(&storage->lock);
- }
+
+ storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+ 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return;
+
+ storage->value = DUMMY_STORAGE_VALUE;
}
diff --git a/tools/testing/selftests/bpf/progs/local_storage_bench.c b/tools/testing/selftests/bpf/progs/local_storage_bench.c
new file mode 100644
index 000000000000..2c3234c5b73a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_bench.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define HASHMAP_SZ 4194304
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_local_storage_maps SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, HASHMAP_SZ);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_hash_maps SEC(".maps");
+
+long important_hits;
+long hits;
+
+/* set from user-space */
+const volatile unsigned int use_hashmap;
+const volatile unsigned int hashmap_num_keys;
+const volatile unsigned int num_maps;
+const volatile unsigned int interleave;
+
+struct loop_ctx {
+ struct task_struct *task;
+ long loop_hits;
+ long loop_important_hits;
+};
+
+static int do_lookup(unsigned int elem, struct loop_ctx *lctx)
+{
+ void *map, *inner_map;
+ int idx = 0;
+
+ if (use_hashmap)
+ map = &array_of_hash_maps;
+ else
+ map = &array_of_local_storage_maps;
+
+ inner_map = bpf_map_lookup_elem(map, &elem);
+ if (!inner_map)
+ return -1;
+
+ if (use_hashmap) {
+ idx = bpf_get_prandom_u32() % hashmap_num_keys;
+ bpf_map_lookup_elem(inner_map, &idx);
+ } else {
+ bpf_task_storage_get(inner_map, lctx->task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ }
+
+ lctx->loop_hits++;
+ if (!elem)
+ lctx->loop_important_hits++;
+ return 0;
+}
+
+static long loop(u32 index, void *ctx)
+{
+ struct loop_ctx *lctx = (struct loop_ctx *)ctx;
+ unsigned int map_idx = index % num_maps;
+
+ do_lookup(map_idx, lctx);
+ if (interleave && map_idx % 3 == 0)
+ do_lookup(0, lctx);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct loop_ctx lctx;
+
+ lctx.task = bpf_get_current_task_btf();
+ lctx.loop_hits = 0;
+ lctx.loop_important_hits = 0;
+ bpf_loop(10000, &loop, &lctx, 0);
+ __sync_add_and_fetch(&hits, lctx.loop_hits);
+ __sync_add_and_fetch(&important_hits, lctx.loop_important_hits);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
new file mode 100644
index 000000000000..03bf69f49075
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+long hits;
+long gp_hits;
+long gp_times;
+long current_gp_start;
+long unexpected;
+bool postgp_seen;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct task_struct *task;
+ int idx;
+ int *s;
+
+ idx = 0;
+ task = bpf_get_current_task_btf();
+ s = bpf_task_storage_get(&task_storage, task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!s)
+ return 0;
+
+ *s = 3;
+ bpf_task_storage_delete(&task_storage, task);
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_pregp_step")
+int pregp_step(struct pt_regs *ctx)
+{
+ current_gp_start = bpf_ktime_get_ns();
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_postgp")
+int postgp(struct pt_regs *ctx)
+{
+ if (!current_gp_start && postgp_seen) {
+ /* Will only happen if prog tracing rcu_tasks_trace_pregp_step doesn't
+ * execute before this prog
+ */
+ __sync_add_and_fetch(&unexpected, 1);
+ return 0;
+ }
+
+ __sync_add_and_fetch(&gp_times, bpf_ktime_get_ns() - current_gp_start);
+ __sync_add_and_fetch(&gp_hits, 1);
+ current_gp_start = 0;
+ postgp_seen = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c
index 50e66772c046..b0fa26fb4760 100644
--- a/tools/testing/selftests/bpf/progs/loop1.c
+++ b/tools/testing/selftests/bpf/progs/loop1.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c
index 947bb7e988c2..0227409d4b0e 100644
--- a/tools/testing/selftests/bpf/progs/loop2.c
+++ b/tools/testing/selftests/bpf/progs/loop2.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c
index 76e93b31c14b..5d1c9a775e6b 100644
--- a/tools/testing/selftests/bpf/progs/loop3.c
+++ b/tools/testing/selftests/bpf/progs/loop3.c
@@ -1,20 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
-int while_true(volatile struct pt_regs* ctx)
+int while_true(struct pt_regs *ctx)
{
- __u64 i = 0, sum = 0;
+ volatile __u64 i = 0, sum = 0;
do {
i++;
sum += PT_REGS_RC(ctx);
diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c
index b35337926d66..0de0357f57cc 100644
--- a/tools/testing/selftests/bpf/progs/loop4.c
+++ b/tools/testing/selftests/bpf/progs/loop4.c
@@ -3,6 +3,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("socket")
@@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb)
{
int ret = 0, i;
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 20; i++)
if (skb->len)
ret |= 1 << i;
diff --git a/tools/testing/selftests/bpf/progs/loop5.c b/tools/testing/selftests/bpf/progs/loop5.c
index 913791923fa3..1b13f37f85ec 100644
--- a/tools/testing/selftests/bpf/progs/loop5.c
+++ b/tools/testing/selftests/bpf/progs/loop5.c
@@ -2,7 +2,6 @@
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#define barrier() __asm__ __volatile__("": : :"memory")
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c
new file mode 100644
index 000000000000..dd36aff4fba3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop6.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* typically virtio scsi has max SGs of 6 */
+#define VIRTIO_MAX_SGS 6
+
+/* Verifier will fail with SG_MAX = 128. The failure can be
+ * workarounded with a smaller SG_MAX, e.g. 10.
+ */
+#define WORKAROUND
+#ifdef WORKAROUND
+#define SG_MAX 10
+#else
+/* typically virtio blk has max SEG of 128 */
+#define SG_MAX 128
+#endif
+
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
+
+#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
+#define sg_is_last(sg) ((sg)->page_link & SG_END)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+
+static inline struct scatterlist *__sg_next(struct scatterlist *sgp)
+{
+ struct scatterlist sg;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_last(&sg))
+ return NULL;
+
+ sgp++;
+
+ bpf_probe_read_kernel(&sg, sizeof(sg), sgp);
+ if (sg_is_chain(&sg))
+ sgp = sg_chain_ptr(&sg);
+
+ return sgp;
+}
+
+static inline struct scatterlist *get_sgp(struct scatterlist **sgs, int i)
+{
+ struct scatterlist *sgp;
+
+ bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
+ return sgp;
+}
+
+int run_once = 0;
+int result = 0;
+
+SEC("kprobe/virtqueue_add_sgs")
+int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
+ unsigned int out_sgs, unsigned int in_sgs)
+{
+ struct scatterlist *sgp = NULL;
+ __u64 length1 = 0, length2 = 0;
+ unsigned int i, n, len;
+
+ if (run_once != 0)
+ return 0;
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
+ __sink(out_sgs);
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ len = BPF_CORE_READ(sgp, length);
+ length1 += len;
+ n++;
+ }
+ }
+
+ for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) {
+ __sink(in_sgs);
+ for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
+ sgp = __sg_next(sgp)) {
+ len = BPF_CORE_READ(sgp, length);
+ length2 += len;
+ n++;
+ }
+ }
+
+ run_once = 1;
+ result = length2 - length1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie.h b/tools/testing/selftests/bpf/progs/lpm_trie.h
new file mode 100644
index 000000000000..76aa5821807f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PROGS_LPM_TRIE_H
+#define __PROGS_LPM_TRIE_H
+
+struct trie_key {
+ __u32 prefixlen;
+ __u32 data;
+};
+
+/* Benchmark operations */
+enum {
+ LPM_OP_NOOP = 0,
+ LPM_OP_BASELINE,
+ LPM_OP_LOOKUP,
+ LPM_OP_INSERT,
+ LPM_OP_UPDATE,
+ LPM_OP_DELETE,
+ LPM_OP_FREE
+};
+
+/*
+ * Return values from run_bench.
+ *
+ * Negative values are also allowed and represent kernel error codes.
+ */
+#define LPM_BENCH_SUCCESS 0
+#define LPM_BENCH_REINIT_MAP 1 /* Reset trie to initial state for current op */
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie_bench.c b/tools/testing/selftests/bpf/progs/lpm_trie_bench.c
new file mode 100644
index 000000000000..a0e6ebd5507a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie_bench.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Cloudflare */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_atomic.h"
+#include "progs/lpm_trie.h"
+
+#define BPF_OBJ_NAME_LEN 16U
+#define MAX_ENTRIES 100000000
+#define NR_LOOPS 10000
+
+char _license[] SEC("license") = "GPL";
+
+/* Filled by userspace. See fill_map() in bench_lpm_trie_map.c */
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __type(key, struct trie_key);
+ __type(value, __u32);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, MAX_ENTRIES);
+} trie_map SEC(".maps");
+
+long hits;
+long duration_ns;
+
+/* Configured from userspace */
+__u32 nr_entries;
+__u32 prefixlen;
+bool random;
+__u8 op;
+
+static __u64 latency_free_start;
+
+SEC("fentry/bpf_map_free_deferred")
+int BPF_PROG(trie_free_entry, struct work_struct *work)
+{
+ struct bpf_map *map = container_of(work, struct bpf_map, work);
+ char name[BPF_OBJ_NAME_LEN];
+ u32 map_type;
+
+ map_type = BPF_CORE_READ(map, map_type);
+ if (map_type != BPF_MAP_TYPE_LPM_TRIE)
+ return 0;
+
+ /*
+ * Ideally we'd have access to the map ID but that's already
+ * freed before we enter trie_free().
+ */
+ BPF_CORE_READ_STR_INTO(&name, map, name);
+ if (bpf_strncmp(name, BPF_OBJ_NAME_LEN, "trie_free_map"))
+ return 0;
+
+ latency_free_start = bpf_ktime_get_ns();
+
+ return 0;
+}
+
+SEC("fexit/bpf_map_free_deferred")
+int BPF_PROG(trie_free_exit, struct work_struct *work)
+{
+ __u64 val;
+
+ if (!latency_free_start)
+ return 0;
+
+ val = bpf_ktime_get_ns() - latency_free_start;
+ latency_free_start = 0;
+
+ __sync_add_and_fetch(&duration_ns, val);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+static __u32 cur_key;
+
+static __always_inline void generate_key(struct trie_key *key)
+{
+ key->prefixlen = prefixlen;
+
+ if (random)
+ key->data = bpf_get_prandom_u32() % nr_entries;
+ else
+ key->data = cur_key++ % nr_entries;
+}
+
+static int noop(__u32 index, __u32 *unused)
+{
+ return 0;
+}
+
+static int baseline(__u32 index, __u32 *unused)
+{
+ struct trie_key key;
+ __u32 blackbox = 0;
+
+ generate_key(&key);
+ /* Avoid compiler optimizing out the modulo */
+ barrier_var(blackbox);
+ blackbox = READ_ONCE(key.data);
+
+ return 0;
+}
+
+static int lookup(__u32 index, int *retval)
+{
+ struct trie_key key;
+
+ generate_key(&key);
+ if (!bpf_map_lookup_elem(&trie_map, &key)) {
+ *retval = -ENOENT;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int insert(__u32 index, int *retval)
+{
+ struct trie_key key;
+ u32 val = 1;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_update_elem(&trie_map, &key, &val, BPF_NOEXIST);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ /* Is this the last entry? */
+ if (key.data == nr_entries - 1) {
+ /* For atomicity concerns, see the comment in delete() */
+ *retval = LPM_BENCH_REINIT_MAP;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update(__u32 index, int *retval)
+{
+ struct trie_key key;
+ u32 val = 1;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_update_elem(&trie_map, &key, &val, BPF_EXIST);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int delete(__u32 index, int *retval)
+{
+ struct trie_key key;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_delete_elem(&trie_map, &key);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ /* Do we need to refill the map? */
+ if (key.data == nr_entries - 1) {
+ /*
+ * Atomicity isn't required because DELETE only supports
+ * one producer running concurrently. What we need is a
+ * way to track how many entries have been deleted from
+ * the trie between consecutive invocations of the BPF
+ * prog because a single bpf_loop() call might not
+ * delete all entries, e.g. when NR_LOOPS < nr_entries.
+ */
+ *retval = LPM_BENCH_REINIT_MAP;
+ return 1;
+ }
+
+ return 0;
+}
+
+SEC("xdp")
+int BPF_PROG(run_bench)
+{
+ int err = LPM_BENCH_SUCCESS;
+ u64 start, delta;
+ int loops;
+
+ start = bpf_ktime_get_ns();
+
+ switch (op) {
+ case LPM_OP_NOOP:
+ loops = bpf_loop(NR_LOOPS, noop, NULL, 0);
+ break;
+ case LPM_OP_BASELINE:
+ loops = bpf_loop(NR_LOOPS, baseline, NULL, 0);
+ break;
+ case LPM_OP_LOOKUP:
+ loops = bpf_loop(NR_LOOPS, lookup, &err, 0);
+ break;
+ case LPM_OP_INSERT:
+ loops = bpf_loop(NR_LOOPS, insert, &err, 0);
+ break;
+ case LPM_OP_UPDATE:
+ loops = bpf_loop(NR_LOOPS, update, &err, 0);
+ break;
+ case LPM_OP_DELETE:
+ loops = bpf_loop(NR_LOOPS, delete, &err, 0);
+ break;
+ default:
+ bpf_printk("invalid benchmark operation\n");
+ return -1;
+ }
+
+ delta = bpf_ktime_get_ns() - start;
+
+ __sync_add_and_fetch(&duration_ns, delta);
+ __sync_add_and_fetch(&hits, loops);
+
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie_map.c b/tools/testing/selftests/bpf/progs/lpm_trie_map.c
new file mode 100644
index 000000000000..6e60d686b664
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie_map.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define MAX_ENTRIES 100000000
+
+struct trie_key {
+ __u32 prefixlen;
+ __u32 data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __type(key, struct trie_key);
+ __type(value, __u32);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, MAX_ENTRIES);
+} trie_free_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c
new file mode 100644
index 000000000000..ad73029cb1e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lru_bug.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct task_struct __kptr_untrusted *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_value);
+} lru_map SEC(".maps");
+
+int pid = 0;
+int result = 1;
+
+SEC("fentry/bpf_ktime_get_ns")
+int printk(void *ctx)
+{
+ struct map_value v = {};
+
+ if (pid == bpf_get_current_task_btf()->pid)
+ bpf_map_update_elem(&lru_map, &(int){0}, &v, 0);
+ return 0;
+}
+
+SEC("fentry/do_nanosleep")
+int nanosleep(void *ctx)
+{
+ struct map_value val = {}, *v;
+ struct task_struct *current;
+
+ bpf_map_update_elem(&lru_map, &(int){0}, &val, 0);
+ v = bpf_map_lookup_elem(&lru_map, &(int){0});
+ if (!v)
+ return 0;
+ bpf_map_delete_elem(&lru_map, &(int){0});
+ current = bpf_get_current_task_btf();
+ v->ptr = current;
+ pid = current->pid;
+ bpf_ktime_get_ns();
+ result = !v->ptr;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index ff4d343b94b5..7de173daf27b 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -5,9 +5,11 @@
*/
#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include <errno.h>
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -30,6 +32,53 @@ struct {
__type(value, __u64);
} lru_hash SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} lru_percpu_hash SEC(".maps");
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, __u64);
+} inner_map SEC(".maps");
+
+struct outer_arr {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_arr SEC(".maps") = {
+ .values = { [0] = &inner_map },
+};
+
+struct outer_hash {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __array(values, struct inner_map);
+} outer_hash SEC(".maps") = {
+ .values = { [0] = &inner_map },
+};
+
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
@@ -40,14 +89,16 @@ SEC("lsm/file_mprotect")
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot, int ret)
{
- if (ret != 0)
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (ret != 0 || !mm)
return ret;
- __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ __s32 pid = bpf_get_current_pid_tgid() >> 32;
int is_stack = 0;
- is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack);
+ is_stack = (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack);
if (is_stack && monitored_pid == pid) {
mprotect_count++;
@@ -61,6 +112,7 @@ SEC("lsm.s/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct inner_map *inner_map;
char args[64];
__u32 key = 0;
__u64 *value;
@@ -80,6 +132,27 @@ int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
value = bpf_map_lookup_elem(&lru_hash, &key);
if (value)
*value = 0;
+ value = bpf_map_lookup_elem(&percpu_array, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&percpu_hash, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&lru_percpu_hash, &key);
+ if (value)
+ *value = 0;
+ inner_map = bpf_map_lookup_elem(&outer_arr, &key);
+ if (inner_map) {
+ value = bpf_map_lookup_elem(inner_map, &key);
+ if (value)
+ *value = 0;
+ }
+ inner_map = bpf_map_lookup_elem(&outer_hash, &key);
+ if (inner_map) {
+ value = bpf_map_lookup_elem(inner_map, &key);
+ if (value)
+ *value = 0;
+ }
return 0;
}
@@ -91,11 +164,11 @@ int BPF_PROG(test_task_free, struct task_struct *task)
int copy_test = 0;
-SEC("fentry.s/__x64_sys_setdomainname")
+SEC("fentry.s/" SYS_PREFIX "sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
- void *ptr = (void *)PT_REGS_PARM1(regs);
- int len = PT_REGS_PARM2(regs);
+ void *ptr = (void *)PT_REGS_PARM1_SYSCALL(regs);
+ int len = PT_REGS_PARM2_SYSCALL(regs);
int buf = 0;
long ret;
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
new file mode 100644
index 000000000000..d7598538aa2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
+extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
+extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
+
+#ifndef AF_PACKET
+#define AF_PACKET 17
+#endif
+
+#ifndef AF_UNIX
+#define AF_UNIX 1
+#endif
+
+#ifndef EPERM
+#define EPERM 1
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, __u64);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+int called_socket_post_create;
+int called_socket_post_create2;
+int called_socket_bind;
+int called_socket_bind2;
+int called_socket_alloc;
+int called_socket_clone;
+
+static __always_inline int test_local_storage(void)
+{
+ __u64 *val;
+
+ val = bpf_get_local_storage(&cgroup_storage, 0);
+ if (!val)
+ return 0;
+ *val += 1;
+
+ return 1;
+}
+
+static __always_inline int real_create(struct socket *sock, int family,
+ int protocol)
+{
+ struct sock *sk;
+ int prio = 123;
+
+ /* Reject non-tx-only AF_PACKET. */
+ if (family == AF_PACKET && protocol != 0)
+ return 0; /* EPERM */
+
+ sk = sock->sk;
+ if (!sk)
+ return 1;
+
+ /* The rest of the sockets get default policy. */
+ if (bpf_setsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+ if (prio != 123)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create++;
+ return real_create(sock, family, protocol);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create2, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create2++;
+ return real_create(sock, family, protocol);
+}
+
+static __always_inline int real_bind(struct socket *sock,
+ struct sockaddr *address,
+ int addrlen)
+{
+ struct sockaddr_ll sa = {};
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 1;
+
+ if (sk->__sk_common.skc_family != AF_PACKET)
+ return 1;
+
+ if (sk->sk_kern_sock)
+ return 1;
+
+ bpf_probe_read_kernel(&sa, sizeof(sa), address);
+ if (sa.sll_protocol)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind2, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind2++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_current (via bpf_lsm_current_hooks) */
+SEC("lsm_cgroup/sk_alloc_security")
+int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
+{
+ called_socket_alloc++;
+ /* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
+ if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
+ return 1;
+
+ if (family == AF_UNIX)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_sock */
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ int prio = 234;
+
+ if (!newsk)
+ return 1;
+
+ /* Accepted request sockets get a different priority. */
+ if (bpf_setsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+ if (prio != 234)
+ return 1;
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 1;
+
+ called_socket_clone++;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
new file mode 100644
index 000000000000..6cb0f161f417
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(nonvoid_socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ /* Can not return any errors from void LSM hooks. */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
new file mode 100644
index 000000000000..6e7e58051e64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Huawei Technologies Co., Ltd */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("lsm/file_permission")
+int lsm_file_permission_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/kernfs_init_security")
+int lsm_kernfs_init_security_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/kernfs_init_security")
+int lsm_kernfs_init_security_entry(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_excl.c b/tools/testing/selftests/bpf/progs/map_excl.c
new file mode 100644
index 000000000000..d461684728e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_excl.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Google LLC. */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} excl_map SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_not_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_in_map_btf.c b/tools/testing/selftests/bpf/progs/map_in_map_btf.c
new file mode 100644
index 000000000000..7a1336d7b16a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_in_map_btf.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ __u64 data;
+ struct bpf_list_node node;
+};
+
+struct map_value {
+ struct bpf_list_head head __contains(node_data, node);
+ struct bpf_spin_lock lock;
+};
+
+struct inner_array_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} inner_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+ __array(values, struct inner_array_type);
+} outer_array SEC(".maps") = {
+ .values = {
+ [0] = &inner_array,
+ },
+};
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+bool done = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_inner_array(void *ctx)
+{
+ struct map_value *value;
+ struct node_data *new;
+ struct bpf_map *map;
+ int zero = 0;
+
+ if (done || (u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ map = bpf_map_lookup_elem(&outer_array, &zero);
+ if (!map)
+ return 0;
+
+ value = bpf_map_lookup_elem(map, &zero);
+ if (!value)
+ return 0;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new)
+ return 0;
+
+ bpf_spin_lock(&value->lock);
+ bpf_list_push_back(&value->head, &new->node);
+ bpf_spin_unlock(&value->lock);
+ done = true;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
new file mode 100644
index 000000000000..edaba481db9d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
+ struct prog_test_ref_kfunc __kptr *ref_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct pcpu_array_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} pcpu_array_map SEC(".maps");
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} hash_map SEC(".maps");
+
+struct pcpu_hash_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} pcpu_hash_map SEC(".maps");
+
+struct hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_malloc_map SEC(".maps");
+
+struct pcpu_hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} pcpu_hash_malloc_map SEC(".maps");
+
+struct lru_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+struct lru_pcpu_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_pcpu_hash_map SEC(".maps");
+
+struct cgrp_ls_map {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} cgrp_ls_map SEC(".maps");
+
+struct task_ls_map {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} task_ls_map SEC(".maps");
+
+struct inode_ls_map {
+ __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} inode_ls_map SEC(".maps");
+
+struct sk_ls_map {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct map_value);
+} sk_ls_map SEC(".maps");
+
+#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, 1); \
+ __uint(key_size, sizeof(int)); \
+ __uint(value_size, sizeof(int)); \
+ __array(values, struct inner_map_type); \
+ } name SEC(".maps") = { \
+ .values = { [0] = &inner_map_type }, \
+ }
+
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
+
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+
+static void test_kptr_unref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->unref_ptr;
+ /* store untrusted_ptr_or_null_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store untrusted_ptr_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ /* store NULL */
+ WRITE_ONCE(v->unref_ptr, NULL);
+}
+
+static void test_kptr_ref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->ref_ptr;
+ /* store ptr_or_null_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ if (!p)
+ return;
+ /*
+ * p is rcu_ptr_prog_test_ref_kfunc,
+ * because bpf prog is non-sleepable and runs in RCU CS.
+ * p can be passed to kfunc that requires KF_RCU.
+ */
+ bpf_kfunc_call_test_ref(p);
+ if (p->a + p->b > 100)
+ return;
+ /* store NULL */
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return;
+ /*
+ * p is trusted_ptr_prog_test_ref_kfunc.
+ * p can be passed to kfunc that requires KF_RCU.
+ */
+ bpf_kfunc_call_test_ref(p);
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ /* store ptr_ */
+ WRITE_ONCE(v->unref_ptr, p);
+ bpf_kfunc_call_test_release(p);
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return;
+ /* store ptr_ */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr(struct map_value *v)
+{
+ test_kptr_unref(v);
+ test_kptr_ref(v);
+}
+
+SEC("tc")
+int test_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+ TEST(pcpu_array_map);
+ TEST(pcpu_hash_map);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
+{
+ struct map_value *v;
+
+ v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("lsm/inode_unlink")
+int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
+{
+ struct task_struct *task;
+ struct map_value *v;
+
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+ v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("lsm/inode_unlink")
+int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
+{
+ struct map_value *v;
+
+ v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("tc")
+int test_sk_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 0;
+ v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (v)
+ test_kptr(v);
+ return 0;
+}
+
+SEC("tc")
+int test_map_in_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+ void *map;
+
+#define TEST(map_in_map) \
+ map = bpf_map_lookup_elem(&map_in_map, &key); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_of_array_maps);
+ TEST(array_of_hash_maps);
+ TEST(array_of_hash_malloc_maps);
+ TEST(array_of_lru_hash_maps);
+ TEST(array_of_pcpu_array_maps);
+ TEST(array_of_pcpu_hash_maps);
+ TEST(hash_of_array_maps);
+ TEST(hash_of_hash_maps);
+ TEST(hash_of_hash_malloc_maps);
+ TEST(hash_of_lru_hash_maps);
+ TEST(hash_of_pcpu_array_maps);
+ TEST(hash_of_pcpu_hash_maps);
+
+#undef TEST
+ return 0;
+}
+
+int ref = 1;
+
+static __always_inline
+int test_map_kptr_ref_pre(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+ unsigned long arg = 0;
+ int ret;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 1;
+ ref++;
+
+ p_st = p->next;
+ if (p_st->cnt.refs.counter != ref) {
+ ret = 2;
+ goto end;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 3;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 4;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 5;
+ bpf_kfunc_call_test_release(p);
+ ref--;
+ if (p_st->cnt.refs.counter != ref)
+ return 6;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 7;
+ ref++;
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 8;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 9;
+ /* Leave in map */
+
+ return 0;
+end:
+ ref--;
+ bpf_kfunc_call_test_release(p);
+ return ret;
+}
+
+static __always_inline
+int test_map_kptr_ref_post(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+
+ p_st = v->ref_ptr;
+ if (!p_st || p_st->cnt.refs.counter != ref)
+ return 1;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 2;
+ if (p_st->cnt.refs.counter != ref) {
+ bpf_kfunc_call_test_release(p);
+ return 3;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ bpf_kfunc_call_test_release(p);
+ return 4;
+ }
+ if (p_st->cnt.refs.counter != ref)
+ return 5;
+
+ return 0;
+}
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_pre(v); \
+ if (ret) \
+ return ret;
+
+#define TEST_PCPU(map) \
+ v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_pre(v); \
+ if (ret) \
+ return ret;
+
+SEC("tc")
+int test_map_kptr_ref1(struct __sk_buff *ctx)
+{
+ struct map_value *v, val = {};
+ int key = 0, ret;
+
+ bpf_map_update_elem(&hash_map, &key, &val, 0);
+ bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
+ bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
+
+ bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
+ bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
+ bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+ TEST_PCPU(pcpu_array_map);
+ TEST_PCPU(pcpu_hash_map);
+ TEST_PCPU(pcpu_hash_malloc_map);
+ TEST_PCPU(lru_pcpu_hash_map);
+
+ return 0;
+}
+
+#undef TEST
+#undef TEST_PCPU
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_post(v); \
+ if (ret) \
+ return ret;
+
+#define TEST_PCPU(map) \
+ v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
+ if (!v) \
+ return -1; \
+ ret = test_map_kptr_ref_post(v); \
+ if (ret) \
+ return ret;
+
+SEC("tc")
+int test_map_kptr_ref2(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, ret;
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+ TEST_PCPU(pcpu_array_map);
+ TEST_PCPU(pcpu_hash_map);
+ TEST_PCPU(pcpu_hash_malloc_map);
+ TEST_PCPU(lru_pcpu_hash_map);
+
+ return 0;
+}
+
+#undef TEST
+#undef TEST_PCPU
+
+SEC("tc")
+int test_map_kptr_ref3(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ unsigned long sp = 0;
+
+ p = bpf_kfunc_call_test_acquire(&sp);
+ if (!p)
+ return 1;
+ ref++;
+ if (p->cnt.refs.counter != ref) {
+ bpf_kfunc_call_test_release(p);
+ return 2;
+ }
+ bpf_kfunc_call_test_release(p);
+ ref--;
+ return 0;
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref1(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (v)
+ return 150;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 200;
+ return test_map_kptr_ref_pre(v);
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref2(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (!v)
+ return 200;
+ return test_map_kptr_ref_post(v);
+}
+
+SEC("syscall")
+int test_ls_map_kptr_ref_del(void *ctx)
+{
+ struct task_struct *current;
+ struct map_value *v;
+
+ current = bpf_get_current_task_btf();
+ if (!current)
+ return 100;
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
+ if (!v)
+ return 200;
+ if (!v->ref_ptr)
+ return 300;
+ return bpf_task_storage_delete(&task_ls_map, current);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
new file mode 100644
index 000000000000..4c0ff01f1a96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+struct map_value {
+ char buf[8];
+ struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
+ struct prog_test_ref_kfunc __kptr *ref_ptr;
+ struct prog_test_member __kptr *ref_memb_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+SEC("?tc")
+__failure __msg("kptr access size must be BPF_DW")
+int size_not_bpf_dw(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(u32 *)&v->unref_ptr = 0;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access cannot have variable offset")
+int non_const_var_off(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ *(u64 *)((void *)v + id) = 0;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 doesn't have constant offset. kptr has to be")
+int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ bpf_kptr_xchg((void *)v + id, NULL);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access misaligned expected=8 off=7")
+int misaligned_access_write(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(void **)((void *)v + 7) = NULL;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr access misaligned expected=8 off=1")
+int misaligned_access_read(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return *(u64 *)((void *)v + 1);
+}
+
+SEC("?tc")
+__failure __msg("variable untrusted_ptr_ access var_off=(0x0; 0x1e0)")
+int reject_var_off_store(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ unref_ptr += id;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc")
+int reject_bad_type_match(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = (void *)unref_ptr + 4;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_")
+int marked_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("access beyond struct prog_test_ref_kfunc at off 32 size 4")
+int correct_btf_id_check_size(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->unref_ptr;
+ if (!p)
+ return 0;
+ return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
+}
+
+SEC("?tc")
+__failure __msg("R1 type=untrusted_ptr_ expected=percpu_ptr_")
+int inherit_untrusted_on_walk(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = unref_ptr->next;
+ bpf_this_cpu_ptr(unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("off=8 kptr isn't referenced kptr")
+int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kptr_xchg(&v->unref_ptr, NULL);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_")
+int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("store to referenced kptr disallowed")
+int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ /* Checkmate, clang */
+ *(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R2 must be referenced")
+int reject_untrusted_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+__failure
+__msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member")
+int reject_bad_type_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("invalid kptr access, R2 type=ptr_prog_test_ref_kfunc")
+int reject_member_of_ref_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("kptr cannot be accessed indirectly by helper")
+int reject_indirect_helper_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_get_current_comm(v, sizeof(v->buf) + 1);
+ return 0;
+}
+
+__noinline
+int write_func(int *p)
+{
+ return p ? *p = 42 : 0;
+}
+
+SEC("?tc")
+__failure __msg("kptr cannot be accessed indirectly by helper")
+int reject_indirect_global_func_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return write_func((void *)v + 5);
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=4 alloc_insn=")
+int kptr_xchg_ref_state(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Possibly NULL pointer passed to helper arg2")
+int kptr_xchg_possibly_null(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+
+ /* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p)
+ bpf_kfunc_call_test_release(p);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
new file mode 100644
index 000000000000..63245785eb69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+__u32 target_id;
+
+__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
+
+SEC("iter/bpf_map")
+int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct bpf_map *map = ctx->map;
+
+ if (map && map->id == target_id)
+ BPF_SEQ_PRINTF(seq, "%lld", bpf_map_sum_elem_count(map));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index d8850bc6a9f1..efaf622c28dd 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
+int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
@@ -102,6 +103,8 @@ struct {
__type(value, __u32);
} m_hash SEC(".maps");
+__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
+
static inline int check_hash(void)
{
struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
@@ -114,6 +117,8 @@ static inline int check_hash(void)
VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0);
+ VERIFY(bpf_map_sum_elem_count(map) == 0);
+
for (i = 0; i < HALF_ENTRIES; ++i) {
const __u32 key = i;
const __u32 val = 1;
@@ -122,6 +127,7 @@ static inline int check_hash(void)
return 0;
}
VERIFY(hash->count.counter == HALF_ENTRIES);
+ VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
return 1;
}
@@ -310,7 +316,7 @@ struct lpm_trie {
} __attribute__((preserve_access_index));
struct lpm_key {
- struct bpf_lpm_trie_key trie_key;
+ struct bpf_lpm_trie_key_hdr trie_key;
__u32 data;
};
@@ -333,9 +339,11 @@ static inline int check_lpm_trie(void)
return 1;
}
+#define INNER_MAX_ENTRIES 1234
+
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
+ __uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} inner_map SEC(".maps");
@@ -347,7 +355,7 @@ struct {
__type(value, __u32);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
+ __uint(max_entries, INNER_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
});
@@ -359,8 +367,13 @@ static inline int check_array_of_maps(void)
{
struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
+ struct bpf_array *inner_map;
+ int key = 0;
VERIFY(check_default(&array_of_maps->map, map));
+ inner_map = bpf_map_lookup_elem(array_of_maps, &key);
+ VERIFY(inner_map != NULL);
+ VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
@@ -381,8 +394,13 @@ static inline int check_hash_of_maps(void)
{
struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
+ struct bpf_htab *inner_map;
+ int key = 2;
VERIFY(check_default(&hash_of_maps->map, map));
+ inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
+ VERIFY(inner_map != NULL);
+ VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
return 1;
}
@@ -635,7 +653,6 @@ struct bpf_ringbuf_map {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
@@ -643,7 +660,7 @@ static inline int check_ringbuf(void)
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
- VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
+ VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
@@ -683,5 +700,4 @@ int cg_skb(void *ctx)
return 1;
}
-__u32 _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
new file mode 100644
index 000000000000..3b984b6ae7c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r8 = *(u64 *)(r7 +0) ; R7=ptr_nameidata(off={{[0-9]+}}) R8=rdonly_untrusted_mem(sz=0)")
+__msg("r9 = *(u8 *)(r8 +0) ; R8=rdonly_untrusted_mem(sz=0) R9=scalar")
+int btf_id_to_ptr_mem(void *ctx)
+{
+ struct task_struct *task;
+ struct nameidata *idata;
+ u64 ret, off;
+
+ task = bpf_get_current_task_btf();
+ idata = task->nameidata;
+ off = bpf_core_field_offset(struct nameidata, pathname);
+ /*
+ * asm block to have reliable match target for __msg, equivalent of:
+ * ret = task->nameidata->pathname[0];
+ */
+ asm volatile (
+ "r7 = %[idata];"
+ "r7 += %[off];"
+ "r8 = *(u64 *)(r7 + 0);"
+ "r9 = *(u8 *)(r8 + 0);"
+ "%[ret] = r9;"
+ : [ret]"=r"(ret)
+ : [idata]"r"(idata),
+ [off]"r"(off)
+ : "r7", "r8", "r9");
+ return ret;
+}
+
+SEC("socket")
+__success
+__retval(0)
+int ldx_is_ok_bad_addr(void *ctx)
+{
+ char *p;
+
+ if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID))
+ return 42;
+
+ p = bpf_rdonly_cast(0, 0);
+ return p[0x7fff];
+}
+
+SEC("socket")
+__success
+__retval(1)
+int ldx_is_ok_good_addr(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return *p;
+}
+
+SEC("socket")
+__success
+int offset_not_tracked(void *ctx)
+{
+ int *p, i, s;
+
+ p = bpf_rdonly_cast(0, 0);
+ s = 0;
+ bpf_for(i, 0, 1000 * 1000 * 1000) {
+ p++;
+ s += *p;
+ }
+ return s;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int stx_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ *p = 1;
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ __sync_fetch_and_add(p, 1);
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_rmw_not_ok(void *ctx)
+{
+ long v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return __sync_val_compare_and_swap(p, 0, 42);
+}
+
+SEC("socket")
+__failure
+__msg("invalid access to memory, mem_size=0 off=0 size=4")
+__msg("R1 min value is outside of the allowed memory range")
+int kfunc_param_not_ok(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ bpf_kfunc_trusted_num_test(p);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure
+__msg("R1 type=rdonly_untrusted_mem expected=")
+int helper_param_not_ok(void *ctx)
+{
+ char *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ /*
+ * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do,
+ * the most permissive constraint
+ */
+ bpf_copy_from_user(p, 0, (void *)42);
+ return 0;
+}
+
+static __noinline u64 *get_some_addr(void)
+{
+ if (bpf_get_prandom_u32())
+ return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock));
+ else
+ return bpf_rdonly_cast(0, 0);
+}
+
+SEC("socket")
+__success
+__retval(0)
+int mixed_mem_type(void *ctx)
+{
+ u64 *p;
+
+ /* Try to avoid compiler hoisting load to if branches by using __noinline func. */
+ p = get_some_addr();
+ return *p;
+}
+
+__attribute__((__aligned__(8)))
+u8 global[] = {
+ 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, 0x77, 0x88,
+ 0x99
+};
+
+__always_inline
+static u64 combine(void *p)
+{
+ u64 acc;
+
+ acc = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ acc |= (*(u64 *)p >> 56) << 24;
+ acc |= (*(u32 *)p >> 24) << 16;
+ acc |= (*(u16 *)p >> 8) << 8;
+ acc |= *(u8 *)p;
+#else
+ acc |= (*(u64 *)p & 0xff) << 24;
+ acc |= (*(u32 *)p & 0xff) << 16;
+ acc |= (*(u16 *)p & 0xff) << 8;
+ acc |= *(u8 *)p;
+#endif
+ return acc;
+}
+
+SEC("socket")
+__retval(0x88442211)
+int diff_size_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0));
+}
+
+SEC("socket")
+__retval(0x99553322)
+int misaligned_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0) + 1);
+}
+
+__weak int return_one(void)
+{
+ return 1;
+}
+
+SEC("socket")
+__success
+__retval(1)
+int null_check(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ if (p == 0)
+ /* make this a function call to avoid compiler
+ * moving r0 assignment before check.
+ */
+ return return_one();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c
new file mode 100644
index 000000000000..51a4fe64c917
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
new file mode 100644
index 000000000000..29c18d869ec1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_kfunc_common_test")
+int test5(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.session/bpf_kfunc_common_test")
+int test6(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_tp_recursion.c b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
new file mode 100644
index 000000000000..762385f827c5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_printk("test");
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("tp/bpf_trace/bpf_trace_printk")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mmap_inner_array.c b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
new file mode 100644
index 000000000000..90aacbc2938a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct inner_array_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} inner_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+ __array(values, struct inner_array_type);
+} outer_map SEC(".maps");
+
+int pid = 0;
+__u64 match_value = 0x13572468;
+bool done = false;
+bool pid_match = false;
+bool outer_map_match = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_inner_array(void *ctx)
+{
+ __u32 curr_pid, zero = 0;
+ struct bpf_map *map;
+ __u64 *value;
+
+ curr_pid = (u32)bpf_get_current_pid_tgid();
+ if (done || curr_pid != pid)
+ return 0;
+
+ pid_match = true;
+ map = bpf_map_lookup_elem(&outer_map, &curr_pid);
+ if (!map)
+ return 0;
+
+ outer_map_match = true;
+ value = bpf_map_lookup_elem(map, &zero);
+ if (!value)
+ return 0;
+
+ *value = match_value;
+ done = true;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c
index 8b7466a15c6b..3376d4849f58 100644
--- a/tools/testing/selftests/bpf/progs/modify_return.c
+++ b/tools/testing/selftests/bpf/progs/modify_return.c
@@ -47,3 +47,43 @@ int BPF_PROG(fexit_test, int a, __u64 b, int ret)
return 0;
}
+
+static int sequence2;
+
+__u64 fentry_result2 = 0;
+SEC("fentry/bpf_modify_return_test2")
+int BPF_PROG(fentry_test2, int a, int *b, short c, int d, void *e, char f,
+ int g)
+{
+ sequence2++;
+ fentry_result2 = (sequence2 == 1);
+ return 0;
+}
+
+__u64 fmod_ret_result2 = 0;
+SEC("fmod_ret/bpf_modify_return_test2")
+int BPF_PROG(fmod_ret_test2, int a, int *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ sequence2++;
+ /* This is the first fmod_ret program, the ret passed should be 0 */
+ fmod_ret_result2 = (sequence2 == 2 && ret == 0);
+ return input_retval;
+}
+
+__u64 fexit_result2 = 0;
+SEC("fexit/bpf_modify_return_test2")
+int BPF_PROG(fexit_test2, int a, int *b, short c, int d, void *e, char f,
+ int g, int ret)
+{
+ sequence2++;
+ /* If the input_reval is non-zero a successful modification should have
+ * occurred.
+ */
+ if (input_retval)
+ fexit_result2 = (sequence2 == 3 && ret == input_retval);
+ else
+ fexit_result2 = (sequence2 == 3 && ret == 29);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
new file mode 100644
index 000000000000..3b188ccdcc40
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __MPTCP_BPF_H__
+#define __MPTCP_BPF_H__
+
+#include "bpf_experimental.h"
+
+/* list helpers from include/linux/list.h */
+static inline int list_is_head(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list == head;
+}
+
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+#define list_entry_is_head(pos, head, member) \
+ list_is_head(&pos->member, (head))
+
+/* small difference: 'can_loop' has been added in the conditions */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ !list_entry_is_head(pos, head, member) && can_loop; \
+ pos = list_next_entry(pos, member))
+
+/* mptcp helpers from protocol.h */
+#define mptcp_for_each_subflow(__msk, __subflow) \
+ list_for_each_entry(__subflow, &((__msk)->conn_list), node)
+
+static __always_inline struct sock *
+mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
+{
+ return subflow->tcp_sock;
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sock.c b/tools/testing/selftests/bpf/progs/mptcp_sock.c
new file mode 100644
index 000000000000..f3acb90588c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2022, SUSE. */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+__u32 token = 0;
+
+struct mptcp_storage {
+ __u32 invoked;
+ __u32 is_mptcp;
+ struct sock *sk;
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct mptcp_storage);
+} socket_storage_map SEC(".maps");
+
+SEC("sockops")
+int _sockops(struct bpf_sock_ops *ctx)
+{
+ struct mptcp_storage *storage;
+ struct mptcp_sock *msk;
+ int op = (int)ctx->op;
+ struct tcp_sock *tsk;
+ struct bpf_sock *sk;
+ bool is_mptcp;
+
+ if (op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tsk = bpf_skc_to_tcp_sock(sk);
+ if (!tsk)
+ return 1;
+
+ is_mptcp = bpf_core_field_exists(tsk->is_mptcp) ? tsk->is_mptcp : 0;
+ if (!is_mptcp) {
+ storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = 0;
+ __builtin_memset(storage->ca_name, 0, TCP_CA_NAME_MAX);
+ storage->first = NULL;
+ } else {
+ msk = bpf_skc_to_mptcp_sock(sk);
+ if (!msk)
+ return 1;
+
+ storage = bpf_sk_storage_get(&socket_storage_map, msk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = msk->token;
+ __builtin_memcpy(storage->ca_name, msk->ca_name, TCP_CA_NAME_MAX);
+ storage->first = msk->first;
+ }
+ storage->invoked++;
+ storage->is_mptcp = is_mptcp;
+ storage->sk = (struct sock *)sk;
+
+ return 1;
+}
+
+SEC("fentry/mptcp_pm_new_connection")
+int BPF_PROG(trace_mptcp_pm_new_connection, struct mptcp_sock *msk,
+ const struct sock *ssk, int server_side)
+{
+ if (!server_side)
+ token = msk->token;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sockmap.c b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
new file mode 100644
index 000000000000..d4eef0cbadb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+int sk_index;
+int redirect_idx;
+int trace_port;
+int helper_ret;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 100);
+} sock_map SEC(".maps");
+
+SEC("sockops")
+int mptcp_sockmap_inject(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *sk;
+
+ /* only accept specified connection */
+ if (skops->local_port != trace_port ||
+ skops->op != BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB)
+ return 1;
+
+ sk = skops->sk;
+ if (!sk)
+ return 1;
+
+ /* update sk handler */
+ helper_ret = bpf_sock_map_update(skops, &sock_map, &sk_index, BPF_NOEXIST);
+
+ return 1;
+}
+
+SEC("sk_skb/stream_verdict")
+int mptcp_sockmap_redirect(struct __sk_buff *skb)
+{
+ /* redirect skb to the sk under sock_map[redirect_idx] */
+ return bpf_sk_redirect_map(skb, &sock_map, redirect_idx, 0);
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_subflow.c b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
new file mode 100644
index 000000000000..41389e579578
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2024, Kylin Software */
+
+/* vmlinux.h, bpf_helpers.h and other 'define' */
+#include "bpf_tracing_net.h"
+#include "mptcp_bpf.h"
+
+char _license[] SEC("license") = "GPL";
+
+char cc[TCP_CA_NAME_MAX] = "reno";
+int pid;
+
+/* Associate a subflow counter to each token */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 100);
+} mptcp_sf SEC(".maps");
+
+SEC("sockops")
+int mptcp_subflow(struct bpf_sock_ops *skops)
+{
+ __u32 init = 1, key, mark, *cnt;
+ struct mptcp_sock *msk;
+ struct bpf_sock *sk;
+ int err;
+
+ if (skops->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = skops->sk;
+ if (!sk)
+ return 1;
+
+ msk = bpf_skc_to_mptcp_sock(sk);
+ if (!msk)
+ return 1;
+
+ key = msk->token;
+ cnt = bpf_map_lookup_elem(&mptcp_sf, &key);
+ if (cnt) {
+ /* A new subflow is added to an existing MPTCP connection */
+ __sync_fetch_and_add(cnt, 1);
+ mark = *cnt;
+ } else {
+ /* A new MPTCP connection is just initiated and this is its primary subflow */
+ bpf_map_update_elem(&mptcp_sf, &key, &init, BPF_ANY);
+ mark = init;
+ }
+
+ /* Set the mark of the subflow's socket based on appearance order */
+ err = bpf_setsockopt(skops, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
+ if (err < 0)
+ return 1;
+ if (mark == 2)
+ err = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION, cc, TCP_CA_NAME_MAX);
+
+ return 1;
+}
+
+static int _check_getsockopt_subflow_mark(struct mptcp_sock *msk, struct bpf_sockopt *ctx)
+{
+ struct mptcp_subflow_context *subflow;
+ int i = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow,
+ struct mptcp_subflow_context));
+
+ if (ssk->sk_mark != ++i) {
+ ctx->retval = -2;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+static int _check_getsockopt_subflow_cc(struct mptcp_sock *msk, struct bpf_sockopt *ctx)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct inet_connection_sock *icsk;
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_tcp_sock(bpf_core_cast(subflow,
+ struct mptcp_subflow_context));
+ icsk = bpf_core_cast(ssk, struct inet_connection_sock);
+
+ if (ssk->sk_mark == 2 &&
+ __builtin_memcmp(icsk->icsk_ca_ops->name, cc, TCP_CA_NAME_MAX)) {
+ ctx->retval = -2;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int _getsockopt_subflow(struct bpf_sockopt *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ struct mptcp_sock *msk;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 1;
+
+ if (!sk || sk->protocol != IPPROTO_MPTCP ||
+ (!(ctx->level == SOL_SOCKET && ctx->optname == SO_MARK) &&
+ !(ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION)))
+ return 1;
+
+ msk = bpf_core_cast(sk, struct mptcp_sock);
+ if (msk->pm.extra_subflows != 1) {
+ ctx->retval = -1;
+ return 1;
+ }
+
+ if (ctx->optname == SO_MARK)
+ return _check_getsockopt_subflow_mark(msk, ctx);
+ return _check_getsockopt_subflow_cc(msk, ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcpify.c b/tools/testing/selftests/bpf/progs/mptcpify.c
new file mode 100644
index 000000000000..cbdc730c3a47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcpify.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, SUSE. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+int pid;
+
+SEC("fmod_ret/update_socket_protocol")
+int BPF_PROG(mptcpify, int family, int type, int protocol)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return protocol;
+
+ if ((family == AF_INET || family == AF_INET6) &&
+ type == SOCK_STREAM &&
+ (!protocol || protocol == IPPROTO_TCP)) {
+ return IPPROTO_MPTCP;
+ }
+
+ return protocol;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c
new file mode 100644
index 000000000000..49ad7b9adf56
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_acquire.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_nonzero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_nonzero_offset_test(&sk->sk_write_queue);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_zero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_zero_offset_test(&sk->__sk_common);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h
new file mode 100644
index 000000000000..1784b496be2e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _NESTED_TRUST_COMMON_H
+#define _NESTED_TRUST_COMMON_H
+
+#include <stdbool.h>
+
+bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
+__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+
+#endif /* _NESTED_TRUST_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
new file mode 100644
index 000000000000..3568ec450100
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, u64);
+} sk_storage_map SEC(".maps");
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R2 must be")
+int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
+ return 0;
+}
+
+/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
+SEC("tp_btf/tcp_probe")
+__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+int BPF_PROG(test_invalid_skb_field, struct sock *sk, struct sk_buff *skb)
+{
+ bpf_sk_storage_get(&sk_storage_map, skb->next, 0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c
new file mode 100644
index 000000000000..2b66953ca82e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#include "nested_trust_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, u64);
+} sk_storage_map SEC(".maps");
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_test_cpu(0, task->cpus_ptr);
+ return 0;
+}
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
+{
+ bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__success
+int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
+{
+ bpf_cpumask_first_zero(&task->cpus_mask);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/net_timestamping.c b/tools/testing/selftests/bpf/progs/net_timestamping.c
new file mode 100644
index 000000000000..b4c2f0f2be11
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/net_timestamping.c
@@ -0,0 +1,248 @@
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include <errno.h>
+
+__u32 monitored_pid = 0;
+
+int nr_active;
+int nr_snd;
+int nr_passive;
+int nr_sched;
+int nr_txsw;
+int nr_ack;
+
+struct sk_stg {
+ __u64 sendmsg_ns; /* record ts when sendmsg is called */
+};
+
+struct sk_tskey {
+ u64 cookie;
+ u32 tskey;
+};
+
+struct delay_info {
+ u64 sendmsg_ns; /* record ts when sendmsg is called */
+ u32 sched_delay; /* SCHED_CB - sendmsg_ns */
+ u32 snd_sw_delay; /* SND_SW_CB - SCHED_CB */
+ u32 ack_delay; /* ACK_CB - SND_SW_CB */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct sk_stg);
+} sk_stg_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct sk_tskey);
+ __type(value, struct delay_info);
+ __uint(max_entries, 1024);
+} time_map SEC(".maps");
+
+static u64 delay_tolerance_nsec = 10000000000; /* 10 second as an example */
+
+extern int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops, u64 flags) __ksym;
+
+static int bpf_test_sockopt(void *ctx, const struct sock *sk, int expected)
+{
+ int tmp, new = SK_BPF_CB_TX_TIMESTAMPING;
+ int opt = SK_BPF_CB_FLAGS;
+ int level = SOL_SOCKET;
+
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)) != expected)
+ return 1;
+
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) != expected ||
+ (!expected && tmp != new))
+ return 1;
+
+ return 0;
+}
+
+static bool bpf_test_access_sockopt(void *ctx, const struct sock *sk)
+{
+ if (bpf_test_sockopt(ctx, sk, -EOPNOTSUPP))
+ return true;
+ return false;
+}
+
+static bool bpf_test_access_load_hdr_opt(struct bpf_sock_ops *skops)
+{
+ u8 opt[3] = {0};
+ int load_flags = 0;
+ int ret;
+
+ ret = bpf_load_hdr_opt(skops, opt, sizeof(opt), load_flags);
+ if (ret != -EOPNOTSUPP)
+ return true;
+
+ return false;
+}
+
+static bool bpf_test_access_cb_flags_set(struct bpf_sock_ops *skops)
+{
+ int ret;
+
+ ret = bpf_sock_ops_cb_flags_set(skops, 0);
+ if (ret != -EOPNOTSUPP)
+ return true;
+
+ return false;
+}
+
+/* In the timestamping callbacks, we're not allowed to call the following
+ * BPF CALLs for the safety concern. Return false if expected.
+ */
+static bool bpf_test_access_bpf_calls(struct bpf_sock_ops *skops,
+ const struct sock *sk)
+{
+ if (bpf_test_access_sockopt(skops, sk))
+ return true;
+
+ if (bpf_test_access_load_hdr_opt(skops))
+ return true;
+
+ if (bpf_test_access_cb_flags_set(skops))
+ return true;
+
+ return false;
+}
+
+static bool bpf_test_delay(struct bpf_sock_ops *skops, const struct sock *sk)
+{
+ struct bpf_sock_ops_kern *skops_kern;
+ u64 timestamp = bpf_ktime_get_ns();
+ struct skb_shared_info *shinfo;
+ struct delay_info dinfo = {0};
+ struct sk_tskey key = {0};
+ struct delay_info *val;
+ struct sk_buff *skb;
+ struct sk_stg *stg;
+ u64 prior_ts, delay;
+
+ if (bpf_test_access_bpf_calls(skops, sk))
+ return false;
+
+ skops_kern = bpf_cast_to_kern_ctx(skops);
+ skb = skops_kern->skb;
+ shinfo = bpf_core_cast(skb->head + skb->end, struct skb_shared_info);
+
+ key.cookie = bpf_get_socket_cookie(skops);
+ if (!key.cookie)
+ return false;
+
+ if (skops->op == BPF_SOCK_OPS_TSTAMP_SENDMSG_CB) {
+ stg = bpf_sk_storage_get(&sk_stg_map, (void *)sk, 0, 0);
+ if (!stg)
+ return false;
+ dinfo.sendmsg_ns = stg->sendmsg_ns;
+ bpf_sock_ops_enable_tx_tstamp(skops_kern, 0);
+ key.tskey = shinfo->tskey;
+ if (!key.tskey)
+ return false;
+ bpf_map_update_elem(&time_map, &key, &dinfo, BPF_ANY);
+ return true;
+ }
+
+ key.tskey = shinfo->tskey;
+ if (!key.tskey)
+ return false;
+
+ val = bpf_map_lookup_elem(&time_map, &key);
+ if (!val)
+ return false;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TSTAMP_SCHED_CB:
+ val->sched_delay = timestamp - val->sendmsg_ns;
+ delay = val->sched_delay;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SND_SW_CB:
+ prior_ts = val->sched_delay + val->sendmsg_ns;
+ val->snd_sw_delay = timestamp - prior_ts;
+ delay = val->snd_sw_delay;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_ACK_CB:
+ prior_ts = val->snd_sw_delay + val->sched_delay + val->sendmsg_ns;
+ val->ack_delay = timestamp - prior_ts;
+ delay = val->ack_delay;
+ break;
+ }
+
+ if (delay >= delay_tolerance_nsec)
+ return false;
+
+ /* Since it's the last one, remove from the map after latency check */
+ if (skops->op == BPF_SOCK_OPS_TSTAMP_ACK_CB)
+ bpf_map_delete_elem(&time_map, &key);
+
+ return true;
+}
+
+SEC("fentry/tcp_sendmsg_locked")
+int BPF_PROG(trace_tcp_sendmsg_locked, struct sock *sk, struct msghdr *msg,
+ size_t size)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ u64 timestamp = bpf_ktime_get_ns();
+ u32 flag = sk->sk_bpf_cb_flags;
+ struct sk_stg *stg;
+
+ if (pid != monitored_pid || !flag)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return 0;
+
+ stg->sendmsg_ns = timestamp;
+ nr_snd += 1;
+ return 0;
+}
+
+SEC("sockops")
+int skops_sockopt(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *bpf_sk = skops->sk;
+ const struct sock *sk;
+
+ if (!bpf_sk)
+ return 1;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk);
+ if (!sk)
+ return 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ nr_active += !bpf_test_sockopt(skops, sk, 0);
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SENDMSG_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_snd += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SCHED_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_sched += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_SND_SW_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_txsw += 1;
+ break;
+ case BPF_SOCK_OPS_TSTAMP_ACK_CB:
+ if (bpf_test_delay(skops, sk))
+ nr_ack += 1;
+ break;
+ }
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index d071adf178bd..f9ef8aee56f1 100644
--- a/tools/testing/selftests/bpf/progs/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
@@ -13,21 +13,20 @@
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
- __type(value, struct percpu_net_cnt);
+ __type(value, union percpu_net_cnt);
} percpu_netcnt SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
- __type(value, struct net_cnt);
+ __type(value, union net_cnt);
} netcnt SEC(".maps");
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
- struct percpu_net_cnt *percpu_cnt;
- char fmt[] = "%d %llu %llu\n";
- struct net_cnt *cnt;
+ union percpu_net_cnt *percpu_cnt;
+ union net_cnt *cnt;
__u64 ts, dt;
int ret;
@@ -68,4 +67,3 @@ int bpf_nextcnt(struct __sk_buff *skb)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
index 6b670039ea67..9e067dcbf607 100644
--- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
#include <errno.h>
@@ -16,8 +17,11 @@ bool skip = false;
#define STRSIZE 2048
#define EXPECTED_STRSIZE 256
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#if defined(bpf_target_s390)
+/* NULL points to a readable struct lowcore on s390, so take the last page */
+#define BADPTR ((void *)0xFFFFFFFFFFFFF000ULL)
+#else
+#define BADPTR 0
#endif
struct {
@@ -46,7 +50,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len)
do { \
static const char _expectedval[EXPECTED_STRSIZE] = \
_expected; \
- static const char _ptrtype[64] = #_type; \
__u64 _hflags = _flags | BTF_F_COMPACT; \
static _type _ptrdata = __VA_ARGS__; \
static struct btf_ptr _ptr = { }; \
@@ -113,11 +116,11 @@ int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
}
/* Check invalid ptr value */
- p.ptr = 0;
+ p.ptr = BADPTR;
__ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
if (__ret >= 0) {
- bpf_printk("printing NULL should generate error, got (%d)",
- __ret);
+ bpf_printk("printing %llx should generate error, got (%d)",
+ (unsigned long long)BADPTR, __ret);
ret = -ERANGE;
}
diff --git a/tools/testing/selftests/bpf/progs/netns_cookie_prog.c b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c
new file mode 100644
index 000000000000..94040714af18
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/netns_cookie_prog.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+#define AF_INET6 10
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sockops_netns_cookies SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_msg_netns_cookies SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+int tcx_init_netns_cookie, tcx_netns_cookie;
+int cgroup_skb_init_netns_cookie, cgroup_skb_netns_cookie;
+
+SEC("sockops")
+int get_netns_cookie_sockops(struct bpf_sock_ops *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ int *cookie;
+ __u32 key = 0;
+
+ if (ctx->family != AF_INET6)
+ return 1;
+
+ if (!sk)
+ return 1;
+
+ switch (ctx->op) {
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ cookie = bpf_sk_storage_get(&sockops_netns_cookies, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!cookie)
+ return 1;
+
+ *cookie = bpf_get_netns_cookie(ctx);
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ bpf_sock_map_update(ctx, &sock_map, &key, BPF_NOEXIST);
+ break;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+SEC("sk_msg")
+int get_netns_cookie_sk_msg(struct sk_msg_md *msg)
+{
+ struct bpf_sock *sk = msg->sk;
+ int *cookie;
+
+ if (msg->family != AF_INET6)
+ return 1;
+
+ if (!sk)
+ return 1;
+
+ cookie = bpf_sk_storage_get(&sk_msg_netns_cookies, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!cookie)
+ return 1;
+
+ *cookie = bpf_get_netns_cookie(msg);
+
+ return 1;
+}
+
+SEC("tcx/ingress")
+int get_netns_cookie_tcx(struct __sk_buff *skb)
+{
+ tcx_init_netns_cookie = bpf_get_netns_cookie(NULL);
+ tcx_netns_cookie = bpf_get_netns_cookie(skb);
+ return TCX_PASS;
+}
+
+SEC("cgroup_skb/ingress")
+int get_netns_cookie_cgroup_skb(struct __sk_buff *skb)
+{
+ cgroup_skb_init_netns_cookie = bpf_get_netns_cookie(NULL);
+ cgroup_skb_netns_cookie = bpf_get_netns_cookie(skb);
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/normal_map_btf.c b/tools/testing/selftests/bpf/progs/normal_map_btf.c
new file mode 100644
index 000000000000..a45c9299552c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/normal_map_btf.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ __u64 data;
+ struct bpf_list_node node;
+};
+
+struct map_value {
+ struct bpf_list_head head __contains(node_data, node);
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+bool done = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_array(void *ctx)
+{
+ struct map_value *value;
+ struct node_data *new;
+ int zero = 0;
+
+ if (done || (int)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ value = bpf_map_lookup_elem(&array, &zero);
+ if (!value)
+ return 0;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new)
+ return 0;
+
+ bpf_spin_lock(&value->lock);
+ bpf_list_push_back(&value->head, &new->node);
+ bpf_spin_unlock(&value->lock);
+ done = true;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
new file mode 100644
index 000000000000..37c2d2608ec0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -0,0 +1,190 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Update percpu data */
+SEC("?fentry/bpf_fentry_test2")
+int BPF_PROG(test_array_map_2)
+{
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data */
+SEC("?fentry/bpf_fentry_test3")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+/* Explicitly free allocated percpu data */
+SEC("?fentry/bpf_fentry_test4")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ /* delete */
+ p = bpf_kptr_xchg(&e->pc, NULL);
+ if (p) {
+ bpf_percpu_obj_drop(p);
+ }
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+int BPF_PROG(test_array_map_10)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ int i, index = 0;
+ struct val_t *v;
+ struct elem *e;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ bpf_rcu_read_lock();
+ p = e->pc;
+ if (!p) {
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ goto out;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1) {
+ /* race condition */
+ bpf_percpu_obj_drop(p1);
+ }
+ }
+
+ v = bpf_this_cpu_ptr(p);
+ v->c = 3;
+ v = bpf_this_cpu_ptr(p);
+ v->c = 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ goto out;
+ v->c = 1;
+ v->d = 2;
+
+ /* delete */
+ p1 = bpf_kptr_xchg(&e->pc, NULL);
+ if (!p1)
+ goto out;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ /* finally release p */
+ bpf_percpu_obj_drop(p1);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
new file mode 100644
index 000000000000..a2acf9aa6c24
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
@@ -0,0 +1,109 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ v = bpf_per_cpu_ptr(p, 0);
+ if (!v)
+ return 0;
+ v->c = 1;
+ v->d = 2;
+ return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+int my_pid;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+ struct task_struct *task;
+ struct val_t __percpu_kptr *p;
+ struct val_t *v;
+ struct elem *e;
+ int i;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+ if (!e)
+ return 0;
+
+ p = e->pc;
+ if (!p)
+ return 0;
+
+ bpf_for(i, 0, nr_cpus) {
+ v = bpf_per_cpu_ptr(p, i);
+ if (v) {
+ if (i == 0)
+ cpu0_field_d = v->d;
+ sum_field_c += v->c;
+ }
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
new file mode 100644
index 000000000000..f2b8eb2ff76f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
@@ -0,0 +1,182 @@
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct val_t {
+ long b, c, d;
+};
+
+struct val2_t {
+ long b;
+};
+
+struct val_with_ptr_t {
+ char *p;
+};
+
+struct val_with_rb_root_t {
+ struct bpf_spin_lock lock;
+};
+
+struct val_600b_t {
+ char b[600];
+};
+
+struct elem {
+ long sum;
+ struct val_t __percpu_kptr *pc;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+long ret;
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("store to referenced kptr disallowed")
+int BPF_PROG(test_array_map_1)
+{
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ e->pc = (struct val_t __percpu_kptr *)ret;
+ return 0;
+}
+
+SEC("?fentry/bpf_fentry_test1")
+__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
+int BPF_PROG(test_array_map_2)
+{
+ struct val2_t __percpu_kptr *p2;
+ struct val_t __percpu_kptr *p;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p2 = bpf_percpu_obj_new(struct val2_t);
+ if (!p2)
+ return 0;
+
+ p = bpf_kptr_xchg(&e->pc, p2);
+ if (p)
+ bpf_percpu_obj_drop(p);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
+int BPF_PROG(test_array_map_3)
+{
+ struct val_t __percpu_kptr *p, *p1;
+ struct val_t *v;
+ struct elem *e;
+ int index = 0;
+
+ e = bpf_map_lookup_elem(&array, &index);
+ if (!e)
+ return 0;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ p1 = bpf_kptr_xchg(&e->pc, p);
+ if (p1)
+ bpf_percpu_obj_drop(p1);
+
+ v = bpf_this_cpu_ptr(p);
+ ret = v->b;
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
+int BPF_PROG(test_array_map_4)
+{
+ struct val_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
+int BPF_PROG(test_array_map_5)
+{
+ struct val_t *p;
+
+ p = bpf_obj_new(struct val_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
+int BPF_PROG(test_array_map_6)
+{
+ struct val_with_ptr_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_ptr_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
+int BPF_PROG(test_array_map_7)
+{
+ struct val_with_rb_root_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_with_rb_root_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_fentry_test1")
+__failure __msg("bpf_percpu_obj_new type size (600) is greater than 512")
+int BPF_PROG(test_array_map_8)
+{
+ struct val_600b_t __percpu_kptr *p;
+
+ p = bpf_percpu_obj_new(struct val_600b_t);
+ if (!p)
+ return 0;
+
+ bpf_percpu_obj_drop(p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index 25467d13c356..f793280a3238 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -11,8 +11,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -35,10 +35,10 @@ int oncpu(void *ctx)
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
- if (val > 0)
+ if (val >= 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
- if (val > 0)
+ if (val >= 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/perfbuf_bench.c b/tools/testing/selftests/bpf/progs/perfbuf_bench.c
index e5ab4836a641..29c1639fc78a 100644
--- a/tools/testing/selftests/bpf/progs/perfbuf_bench.c
+++ b/tools/testing/selftests/bpf/progs/perfbuf_bench.c
@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -18,10 +19,9 @@ const volatile int batch_cnt = 0;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
-SEC("fentry/__x64_sys_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_perfbuf(void *ctx)
{
- __u64 *sample;
int i;
for (i = 0; i < batch_cnt; i++) {
diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c
new file mode 100644
index 000000000000..7d04254e61f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempt_lock.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_1(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_3(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx)
+{
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_disable();
+ bpf_preempt_enable();
+ bpf_preempt_enable();
+ return 0;
+}
+
+static __noinline void preempt_disable(void)
+{
+ bpf_preempt_disable();
+}
+
+static __noinline void preempt_enable(void)
+{
+ bpf_preempt_enable();
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_disable();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_disable();
+ preempt_enable();
+ return 0;
+}
+
+static __noinline void preempt_balance_subprog(void)
+{
+ preempt_disable();
+ preempt_enable();
+}
+
+SEC("?tc")
+__success int preempt_balance(struct __sk_buff *ctx)
+{
+ bpf_guard_preempt();
+ return 0;
+}
+
+SEC("?tc")
+__success int preempt_balance_subprog_test(struct __sk_buff *ctx)
+{
+ preempt_balance_subprog();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("sleepable helper bpf_copy_from_user#")
+int preempt_sleepable_helper(void *ctx)
+{
+ u32 data;
+
+ bpf_preempt_disable();
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ bpf_preempt_enable();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region")
+int preempt_sleepable_kfunc(void *ctx)
+{
+ u32 data;
+
+ bpf_preempt_disable();
+ bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
+ bpf_preempt_enable();
+ return 0;
+}
+
+int __noinline preempt_global_subprog(void)
+{
+ preempt_balance_subprog();
+ return 0;
+}
+
+SEC("?tc")
+__success
+int preempt_global_subprog_test(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ preempt_global_subprog();
+ preempt_enable();
+ return 0;
+}
+
+int __noinline
+global_subprog(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_helper_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_sleepable_helper_subprog(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_sleepable_kfunc_subprog(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+SEC("?syscall")
+__failure __msg("global functions that may sleep are not allowed in non-sleepable context")
+int preempt_global_sleepable_subprog_indirect(struct __sk_buff *ctx)
+{
+ preempt_disable();
+ if (ctx->mark)
+ global_subprog_calling_sleepable_global(ctx->mark);
+ preempt_enable();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
new file mode 100644
index 000000000000..55907ef961bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+
+struct bin_data {
+ char data[256];
+ struct bpf_spin_lock lock;
+};
+
+struct map_value {
+ struct bin_data __kptr * data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2048);
+} array SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+bool nomem_err = false;
+
+static int del_array(unsigned int i, int *from)
+{
+ struct map_value *value;
+ struct bin_data *old;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static int add_array(unsigned int i, int *from)
+{
+ struct bin_data *old, *new;
+ struct map_value *value;
+
+ value = bpf_map_lookup_elem(&array, from);
+ if (!value)
+ return 1;
+
+ new = bpf_obj_new(typeof(*new));
+ if (!new) {
+ nomem_err = true;
+ return 1;
+ }
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old)
+ bpf_obj_drop(old);
+
+ (*from)++;
+ return 0;
+}
+
+static void del_then_add_array(int from)
+{
+ int i;
+
+ i = from;
+ bpf_loop(512, del_array, &i, 0);
+
+ i = from;
+ bpf_loop(512, add_array, &i, 0);
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG2(test0, int, a)
+{
+ del_then_add_array(0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG2(test1, int, a, u64, b)
+{
+ del_then_add_array(512);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG2(test2, char, a, int, b, u64, c)
+{
+ del_then_add_array(1024);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test4")
+int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d)
+{
+ del_then_add_array(1536);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c
new file mode 100644
index 000000000000..cfc1f48e0d28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/prepare.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+int err;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int program(struct __sk_buff *skb)
+{
+ err = 0;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/priv_freplace_prog.c b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c
new file mode 100644
index 000000000000..ccf1b04010ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_freplace_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("freplace/xdp_prog1")
+int new_xdp_prog2(struct xdp_md *xd)
+{
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644
index 000000000000..9085be50f03b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_map.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 1);
+ __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644
index 000000000000..725e29595079
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("xdp")
+int xdp_prog1(struct xdp_md *xdp)
+{
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c
new file mode 100644
index 000000000000..d97d6e07ef5c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void __kfunc_btf_root(void)
+{
+ bpf_kfunc_st_ops_inc10(NULL);
+}
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: r1 = *(u64 *)(r1 +0)")
+__xlated("5: r6 = r1")
+__xlated("6: call kernel-function")
+__xlated("7: r1 = r6")
+__xlated("8: call pc+1")
+__xlated("9: exit")
+SEC("struct_ops/test_prologue")
+__naked int test_prologue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r6 = r1")
+__xlated("3: call kernel-function")
+__xlated("4: r1 = r6")
+__xlated("5: call pc+")
+/* epilogue */
+__xlated("6: r1 = *(u64 *)(r10 -8)")
+__xlated("7: r1 = *(u64 *)(r1 +0)")
+__xlated("8: r6 = *(u64 *)(r1 +0)")
+__xlated("9: r6 += 10000")
+__xlated("10: *(u64 *)(r1 +0) = r6")
+__xlated("11: r0 = r6")
+__xlated("12: r0 *= 2")
+__xlated("13: exit")
+SEC("struct_ops/test_epilogue")
+__naked int test_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: r1 = *(u64 *)(r1 +0)")
+__xlated("6: r6 = r1")
+__xlated("7: call kernel-function")
+__xlated("8: r1 = r6")
+__xlated("9: call pc+")
+/* epilogue */
+__xlated("10: r1 = *(u64 *)(r10 -8)")
+__xlated("11: r1 = *(u64 *)(r1 +0)")
+__xlated("12: r6 = *(u64 *)(r1 +0)")
+__xlated("13: r6 += 10000")
+__xlated("14: *(u64 *)(r1 +0) = r6")
+__xlated("15: r0 = r6")
+__xlated("16: r0 *= 2")
+__xlated("17: exit")
+SEC("struct_ops/test_pro_epilogue")
+__naked int test_pro_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+SEC("syscall")
+__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
+int syscall_prologue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue = {
+ .test_prologue = (void *)test_prologue,
+ .test_epilogue = (void *)test_epilogue,
+ .test_pro_epilogue = (void *)test_pro_epilogue,
+};
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
new file mode 100644
index 000000000000..6048d79be48b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: if r1 == 0x0 goto pc+5")
+__xlated("5: if r1 == 0x1 goto pc+2")
+__xlated("6: r1 = 1")
+__xlated("7: goto pc-3")
+__xlated("8: r1 = 0")
+__xlated("9: goto pc-6")
+__xlated("10: r0 = 0")
+__xlated("11: exit")
+SEC("struct_ops/test_prologue_goto_start")
+__naked int test_prologue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: if r1 == 0x0 goto pc+5")
+__xlated("2: if r1 == 0x1 goto pc+2")
+__xlated("3: r1 = 1")
+__xlated("4: goto pc-3")
+__xlated("5: r1 = 0")
+__xlated("6: goto pc-6")
+__xlated("7: r0 = 0")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+SEC("struct_ops/test_epilogue_goto_start")
+__naked int test_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: if r1 == 0x0 goto pc+5")
+__xlated("6: if r1 == 0x1 goto pc+2")
+__xlated("7: r1 = 1")
+__xlated("8: goto pc-3")
+__xlated("9: r1 = 0")
+__xlated("10: goto pc-6")
+__xlated("11: r0 = 0")
+/* epilogue */
+__xlated("12: r1 = *(u64 *)(r10 -8)")
+__xlated("13: r1 = *(u64 *)(r1 +0)")
+__xlated("14: r6 = *(u64 *)(r1 +0)")
+__xlated("15: r6 += 10000")
+__xlated("16: *(u64 *)(r1 +0) = r6")
+__xlated("17: r0 = r6")
+__xlated("18: r0 *= 2")
+__xlated("19: exit")
+SEC("struct_ops/test_pro_epilogue_goto_start")
+__naked int test_pro_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_goto_start = {
+ .test_prologue = (void *)test_prologue_goto_start,
+ .test_epilogue = (void *)test_epilogue_goto_start,
+ .test_pro_epilogue = (void *)test_pro_epilogue_goto_start,
+};
+
+SEC("syscall")
+__retval(0)
+int syscall_prologue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20000) /* (EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c
new file mode 100644
index 000000000000..a5a8f08ac8fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_with_kfunc.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void __kfunc_btf_root(void)
+{
+ bpf_kfunc_st_ops_inc10(NULL);
+}
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+__success
+/* prologue */
+__xlated("0: r8 = r1")
+__xlated("1: r1 = 0")
+__xlated("2: call kernel-function")
+__xlated("3: if r0 != 0x0 goto pc+5")
+__xlated("4: r6 = *(u64 *)(r8 +0)")
+__xlated("5: r7 = *(u64 *)(r6 +0)")
+__xlated("6: r7 += 1000")
+__xlated("7: *(u64 *)(r6 +0) = r7")
+__xlated("8: goto pc+2")
+__xlated("9: r1 = r0")
+__xlated("10: call kernel-function")
+__xlated("11: r1 = r8")
+/* save __u64 *ctx to stack */
+__xlated("12: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("13: r1 = *(u64 *)(r1 +0)")
+__xlated("14: r6 = r1")
+__xlated("15: call kernel-function")
+__xlated("16: r1 = r6")
+__xlated("17: call pc+")
+/* epilogue */
+__xlated("18: r1 = 0")
+__xlated("19: r6 = 0")
+__xlated("20: call kernel-function")
+__xlated("21: if r0 != 0x0 goto pc+6")
+__xlated("22: r1 = *(u64 *)(r10 -8)")
+__xlated("23: r1 = *(u64 *)(r1 +0)")
+__xlated("24: r6 = *(u64 *)(r1 +0)")
+__xlated("25: r6 += 10000")
+__xlated("26: *(u64 *)(r1 +0) = r6")
+__xlated("27: goto pc+2")
+__xlated("28: r1 = r0")
+__xlated("29: call kernel-function")
+__xlated("30: r0 = r6")
+__xlated("31: r0 *= 2")
+__xlated("32: exit")
+SEC("struct_ops/test_pro_epilogue")
+__naked int test_kfunc_pro_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+SEC("syscall")
+__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue_with_kfunc = {
+ .test_pro_epilogue = (void *)test_kfunc_pro_epilogue,
+};
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 4896fdf816f7..813143b4985d 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -6,6 +6,10 @@
#include <bpf/bpf_tracing.h>
#include "profiler.h"
+#include "err.h"
+#include "bpf_experimental.h"
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
#ifndef NULL
#define NULL 0
@@ -16,7 +20,6 @@
#define O_DIRECTORY 00200000
#define __O_TMPFILE 020000000
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define MAX_ERRNO 4095
#define S_IFMT 00170000
#define S_IFSOCK 0140000
#define S_IFLNK 0120000
@@ -34,7 +37,6 @@
#define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK)
#define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO)
#define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK)
-#define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO
#define KILL_DATA_ARRAY_SIZE 8
@@ -132,10 +134,6 @@ struct {
__uint(max_entries, 16);
} disallowed_exec_inodes SEC(".maps");
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#endif
-
static INLINE bool IS_ERR(const void* ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
@@ -156,10 +154,10 @@ probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
{
len = len < max ? len : max;
if (len > 1) {
- if (bpf_probe_read(dst, len, src))
+ if (bpf_probe_read_kernel(dst, len, src))
return 0;
} else if (len == 1) {
- if (bpf_probe_read(dst, 1, src))
+ if (bpf_probe_read_kernel(dst, 1, src))
return 0;
}
return len;
@@ -169,7 +167,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
int spid)
{
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == spid)
@@ -185,7 +183,7 @@ static INLINE void populate_ancestors(struct task_struct* task,
ancestors_data->num_ancestors = 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
parent = BPF_CORE_READ(parent, real_parent);
@@ -212,20 +210,20 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
size_t filepart_length;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(cgroup_node, name));
if (!cgroup_node)
return payload;
if (cgroup_node == cgroup_root_node)
*root_pos = payload - payload_start;
- if (filepart_length <= MAX_PATH) {
- barrier_var(filepart_length);
+ if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) {
payload += filepart_length;
}
- cgroup_node = BPF_CORE_READ(cgroup_node, parent);
+ cgroup_node = BPF_CORE_READ(cgroup_node, __parent);
}
return payload;
}
@@ -261,7 +259,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
pids_cgrp_id___local);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys_state* subsys =
@@ -303,19 +301,17 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
cgroup_data->cgroup_full_length = 0;
size_t cgroup_root_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
- barrier_var(cgroup_root_length);
- if (cgroup_root_length <= MAX_PATH) {
- barrier_var(cgroup_root_length);
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(root_kernfs, name));
+ if (bpf_cmp_likely(cgroup_root_length, <=, MAX_PATH)) {
cgroup_data->cgroup_root_length = cgroup_root_length;
payload += cgroup_root_length;
}
size_t cgroup_proc_length =
- bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= MAX_PATH) {
- barrier_var(cgroup_proc_length);
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(proc_kernfs, name));
+ if (bpf_cmp_likely(cgroup_proc_length, <=, MAX_PATH)) {
cgroup_data->cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -345,9 +341,7 @@ static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
metadata->comm_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
metadata->comm_length = comm_length;
payload += comm_length;
}
@@ -395,7 +389,8 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
if (arr_struct == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[0],
+ sizeof(arr_struct->array[0]), kill_data);
} else {
int index = get_var_spid_index(arr_struct, spid);
@@ -405,12 +400,13 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (kill_data == NULL)
return 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
- bpf_probe_read(&arr_struct->array[i],
- sizeof(arr_struct->array[i]), kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[i],
+ sizeof(arr_struct->array[i]),
+ kill_data);
bpf_map_update_elem(&var_tpid_to_data, &tpid,
arr_struct, 0);
@@ -427,17 +423,17 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (delta_sec < STALE_INFO) {
kill_data->kill_count++;
kill_data->last_kill_time = bpf_ktime_get_ns();
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
} else {
struct var_kill_data_t* kill_data =
get_var_kill_data(ctx, spid, tpid, sig);
if (kill_data == NULL)
return 0;
- bpf_probe_read(&arr_struct->array[index],
- sizeof(arr_struct->array[index]),
- kill_data);
+ bpf_probe_read_kernel(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
}
}
bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
@@ -484,15 +480,15 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
- filepart_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp_dentry, d_name.name));
- barrier_var(filepart_length);
- if (filepart_length > MAX_PATH)
+ filepart_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp_dentry, d_name.name));
+ bpf_nop_mov(filepart_length);
+ if (bpf_cmp_unlikely(filepart_length, >, MAX_PATH))
break;
- barrier_var(filepart_length);
payload += filepart_length;
length += filepart_length;
@@ -510,7 +506,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
{
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
@@ -572,19 +568,18 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
sysctl_data->sysctl_val_length = 0;
sysctl_data->sysctl_path_length = 0;
- size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
- barrier_var(sysctl_val_length);
- if (sysctl_val_length <= CTL_MAXNAME) {
- barrier_var(sysctl_val_length);
+ size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
+ CTL_MAXNAME, buf);
+ if (bpf_cmp_likely(sysctl_val_length, <=, CTL_MAXNAME)) {
sysctl_data->sysctl_val_length = sysctl_val_length;
payload += sysctl_val_length;
}
- size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
- BPF_CORE_READ(filp, f_path.dentry, d_name.name));
- barrier_var(sysctl_path_length);
- if (sysctl_path_length <= MAX_PATH) {
- barrier_var(sysctl_path_length);
+ size_t sysctl_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp, f_path.dentry,
+ d_name.name));
+ if (bpf_cmp_likely(sysctl_path_length, <=, MAX_PATH)) {
sysctl_data->sysctl_path_length = sysctl_path_length;
payload += sysctl_path_length;
}
@@ -601,7 +596,7 @@ out:
}
SEC("tracepoint/syscalls/sys_enter_kill")
-int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
+int tracepoint__syscalls__sys_enter_kill(struct syscall_trace_enter* ctx)
{
struct bpf_func_stats_ctx stats_ctx;
@@ -632,13 +627,14 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
- if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
- bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+ if (past_kill_data != NULL && past_kill_data->kill_target_pid == (pid_t)tpid) {
+ bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
+ past_kill_data);
void* payload = kill_data->payload;
size_t offset = kill_data->payload_length;
if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
@@ -649,18 +645,16 @@ int raw_tracepoint__sched_process_exit(void* ctx)
kill_data->kill_target_cgroup_proc_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
- barrier_var(comm_length);
- if (comm_length <= TASK_COMM_LEN) {
- barrier_var(comm_length);
+ if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
kill_data->kill_target_name_length = comm_length;
payload += comm_length;
}
- size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
- BPF_CORE_READ(proc_kernfs, name));
- barrier_var(cgroup_proc_length);
- if (cgroup_proc_length <= KILL_TARGET_LEN) {
- barrier_var(cgroup_proc_length);
+ size_t cgroup_proc_length =
+ bpf_probe_read_kernel_str(payload,
+ KILL_TARGET_LEN,
+ BPF_CORE_READ(proc_kernfs, name));
+ if (bpf_cmp_likely(cgroup_proc_length, <=, KILL_TARGET_LEN)) {
kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length;
}
@@ -718,10 +712,9 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
const char* filename = BPF_CORE_READ(bprm, filename);
- size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
- barrier_var(bin_path_length);
- if (bin_path_length <= MAX_FILENAME_LEN) {
- barrier_var(bin_path_length);
+ size_t bin_path_length =
+ bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
+ if (bpf_cmp_likely(bin_path_length, <=, MAX_FILENAME_LEN)) {
proc_exec_data->bin_path_length = bin_path_length;
payload += bin_path_length;
}
@@ -731,8 +724,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
unsigned int cmdline_length = probe_read_lim(payload, arg_start,
arg_end - arg_start, MAX_ARGS_LEN);
- if (cmdline_length <= MAX_ARGS_LEN) {
- barrier_var(cmdline_length);
+ if (bpf_cmp_likely(cmdline_length, <=, MAX_ARGS_LEN)) {
proc_exec_data->cmdline_length = cmdline_length;
payload += cmdline_length;
}
@@ -809,9 +801,7 @@ int kprobe_ret__do_filp_open(struct pt_regs* ctx)
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -826,8 +816,9 @@ out:
SEC("kprobe/vfs_link")
int BPF_KPROBE(kprobe__vfs_link,
- struct dentry* old_dentry, struct inode* dir,
- struct dentry* new_dentry, struct inode** delegated_inode)
+ struct dentry* old_dentry, struct mnt_idmap *idmap,
+ struct inode* dir, struct dentry* new_dentry,
+ struct inode** delegated_inode)
{
struct bpf_func_stats_ctx stats_ctx;
bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
@@ -863,17 +854,13 @@ int BPF_KPROBE(kprobe__vfs_link,
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(new_dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
@@ -921,17 +908,14 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
filemod_data->payload);
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
- size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
+ oldname);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->src_filepath_length = len;
}
len = read_absolute_file_path_from_dentry(dentry, payload);
- barrier_var(len);
- if (len <= MAX_FILEPATH_LENGTH) {
- barrier_var(len);
+ if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
payload += len;
filemod_data->dst_filepath_length = len;
}
diff --git a/tools/testing/selftests/bpf/progs/profiler1.c b/tools/testing/selftests/bpf/progs/profiler1.c
index 4df9088bfc00..fb6b13522949 100644
--- a/tools/testing/selftests/bpf/progs/profiler1.c
+++ b/tools/testing/selftests/bpf/progs/profiler1.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
#define UNROLL
#define INLINE __always_inline
#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 2fb7adafb6b6..86484f07e1d1 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -7,6 +7,8 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define FUNCTION_NAME_LEN 64
#define FILE_NAME_LEN 128
@@ -159,6 +161,57 @@ struct {
__uint(value_size, sizeof(long long) * 127);
} stackmap SEC(".maps");
+#ifdef USE_BPF_LOOP
+struct process_frame_ctx {
+ int cur_cpu;
+ int32_t *symbol_counter;
+ void *frame_ptr;
+ FrameData *frame;
+ PidData *pidData;
+ Symbol *sym;
+ Event *event;
+ bool done;
+};
+
+static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx)
+{
+ int zero = 0;
+ void *frame_ptr = ctx->frame_ptr;
+ PidData *pidData = ctx->pidData;
+ FrameData *frame = ctx->frame;
+ int32_t *symbol_counter = ctx->symbol_counter;
+ int cur_cpu = ctx->cur_cpu;
+ Event *event = ctx->event;
+ Symbol *sym = ctx->sym;
+
+ if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) {
+ int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
+ int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
+
+ if (!symbol_id) {
+ bpf_map_update_elem(&symbolmap, sym, &zero, 0);
+ symbol_id = bpf_map_lookup_elem(&symbolmap, sym);
+ if (!symbol_id) {
+ ctx->done = true;
+ return 1;
+ }
+ }
+ if (*symbol_id == new_symbol_id)
+ (*symbol_counter)++;
+
+ barrier_var(i);
+ if (i >= STACK_MAX_LEN)
+ return 1;
+
+ event->stack[i] = *symbol_id;
+
+ event->stack_len = i + 1;
+ frame_ptr = frame->f_back;
+ }
+ return 0;
+}
+#endif /* USE_BPF_LOOP */
+
#ifdef GLOBAL_FUNC
__noinline
#elif defined(SUBPROGS)
@@ -228,13 +281,37 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
return 0;
-#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+#ifdef USE_BPF_LOOP
+ struct process_frame_ctx ctx = {
+ .cur_cpu = cur_cpu,
+ .symbol_counter = symbol_counter,
+ .frame_ptr = frame_ptr,
+ .frame = &frame,
+ .pidData = pidData,
+ .sym = &sym,
+ .event = event,
+ };
+
+ bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0);
+ if (ctx.done)
+ return 0;
#else
-#pragma clang loop unroll(full)
-#endif
+#if defined(USE_ITER)
+/* no for loop, no unrolling */
+#elif defined(NO_UNROLL)
+ __pragma_loop_no_unroll
+#elif defined(UNROLL_COUNT)
+ __pragma_loop_unroll_count(UNROLL_COUNT)
+#else
+ __pragma_loop_unroll_full
+#endif /* NO_UNROLL */
/* Unwind python stack */
+#ifdef USE_ITER
+ int i;
+ bpf_for(i, 0, STACK_MAX_LEN) {
+#else /* !USE_ITER */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
+#endif
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu;
int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym);
@@ -251,6 +328,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
frame_ptr = frame.f_back;
}
}
+#endif /* USE_BPF_LOOP */
event->stack_complete = frame_ptr == NULL;
} else {
event->stack_complete = 1;
@@ -268,7 +346,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx)
{
- int i, ret = 0;
+ int ret = 0;
ret |= __on_event(ctx);
ret |= __on_event(ctx);
ret |= __on_event(ctx);
diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
index c39f559d3100..42c4a8b62e36 100644
--- a/tools/testing/selftests/bpf/progs/pyperf180.c
+++ b/tools/testing/selftests/bpf/progs/pyperf180.c
@@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
+
+/* llvm upstream commit at clang18
+ * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
+ * changed inlining behavior and caused compilation failure as some branch
+ * target distance exceeded 16bit representation which is the maximum for
+ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
+ * to specify which cpu version is used for compilation. So a smaller
+ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
+ * reduced some branch target distances and resolved the compilation failure.
+ *
+ * To capture the case where a developer/ci uses clang18 but the corresponding
+ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
+ * will be set as well to prevent potential compilation failures.
+ */
+#ifdef __BPF_CPU_VERSION__
+#if __BPF_CPU_VERSION__ < 4
+#define UNROLL_COUNT 90
+#endif
+#elif __clang_major__ == 18
+#define UNROLL_COUNT 90
+#endif
+
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
index cb49b89e37cd..ce1aa5189cc4 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
-/* clang will not unroll the loop 600 times.
- * Instead it will unroll it to the amount it deemed
- * appropriate, but the loop will still execute 600 times.
- * Total program size is around 90k insns
+/* Full unroll of 600 iterations will have total
+ * program size close to 298k insns and this may
+ * cause BPF_JMP insn out of 16-bit integer range.
+ * So limit the unroll size to 150 so the
+ * total program size is around 80k insns but
+ * the loop will still execute 600 times.
*/
+#define UNROLL_COUNT 150
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c
new file mode 100644
index 000000000000..5c2059dc01af
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#define STACK_MAX_LEN 600
+#define USE_BPF_LOOP
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_iter.c b/tools/testing/selftests/bpf/progs/pyperf600_iter.c
new file mode 100644
index 000000000000..d62e1b200c30
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600_iter.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+#define STACK_MAX_LEN 600
+#define SUBPROGS
+#define NO_UNROLL
+#define USE_ITER
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
index 6beff7502f4d..520b58c4f8db 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
@@ -2,7 +2,4 @@
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
-/* clang will not unroll at all.
- * Total program size is around 2k insns
- */
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c
new file mode 100644
index 000000000000..efa416f53968
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int tid;
+int i;
+
+SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp")
+int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+
+ if (task->pid != tid)
+ return 0;
+
+ /* If dead code elimination kicks in, the increment +=2 will be
+ * removed. For raw_tp programs attaching to tracepoints in kernel
+ * modules, we mark input arguments as PTR_MAYBE_NULL, so branch
+ * prediction should never kick in.
+ */
+ asm volatile ("%[i] += 1; if %[ctx] != 0 goto +1; %[i] += 2;"
+ : [i]"+r"(i)
+ : [ctx]"r"(skb)
+ : "memory");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c
new file mode 100644
index 000000000000..0d58114a4955
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/raw_tp_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Ensure module parameter has PTR_MAYBE_NULL */
+SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int test_raw_tp_null_bpf_testmod_test_raw_tp_null_arg_1(void *ctx) {
+ asm volatile("r1 = *(u64 *)(r1 +0); r1 = *(u64 *)(r1 +0);" ::: __clobber_all);
+ return 0;
+}
+
+/* Check NULL marking */
+SEC("tp_btf/sched_pi_setprio")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int test_raw_tp_null_sched_pi_setprio_arg_2(void *ctx) {
+ asm volatile("r1 = *(u64 *)(r1 +8); r1 = *(u64 *)(r1 +0);" ::: __clobber_all);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
new file mode 100644
index 000000000000..49fe93d7e059
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+struct root_nested_inner {
+ struct bpf_spin_lock glock;
+ struct bpf_rb_root root __contains(node_data, node);
+};
+
+struct root_nested {
+ struct root_nested_inner inner;
+};
+
+long less_callback_ran = -1;
+long removed_key = -1;
+long first_data[2] = {-1, -1};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
+private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
+private(B) struct root_nested groot_nested;
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ less_callback_ran = 1;
+
+ return node_a->key < node_b->key;
+}
+
+static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 2;
+ }
+ m->key = 1;
+
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_rbtree_add(root, &m->node, less);
+ bpf_spin_unlock(lock);
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 3;
+ n->key = 3;
+
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+SEC("tc")
+long rbtree_add_nodes(void *ctx)
+{
+ return __add_three(&groot, &glock);
+}
+
+SEC("tc")
+long rbtree_add_nodes_nested(void *ctx)
+{
+ return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
+}
+
+SEC("tc")
+long rbtree_add_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ goto err_out;
+ n->key = 5;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 3;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ res = bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+
+ if (!res)
+ return 1;
+
+ n = container_of(res, struct node_data, node);
+ removed_key = n->key;
+ bpf_obj_drop(n);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+SEC("tc")
+long rbtree_add_and_remove_array(void *ctx)
+{
+ struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
+ struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
+ struct node_data *n;
+ long k1 = -1, k2 = -1, k3 = -1;
+ int i, j;
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
+ if (!nodes[i][j])
+ goto err_out;
+ nodes[i][j]->key = i * 2 + j;
+ }
+ }
+
+ bpf_spin_lock(&glock);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
+ for (j = 0; j < 2; j++)
+ bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
+ res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
+ res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
+ res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
+ bpf_spin_unlock(&glock);
+
+ if (res1) {
+ n = container_of(res1, struct node_data, node);
+ k1 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res2) {
+ n = container_of(res2, struct node_data, node);
+ k2 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (res3) {
+ n = container_of(res3, struct node_data, node);
+ k3 = n->key;
+ bpf_obj_drop(n);
+ }
+ if (k1 != 0 || k2 != 2 || k3 != 4)
+ return 2;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 2; j++) {
+ if (nodes[i][j])
+ bpf_obj_drop(nodes[i][j]);
+ }
+ }
+ return 1;
+}
+
+SEC("tc")
+long rbtree_first_and_remove(void *ctx)
+{
+ struct bpf_rb_node *res = NULL;
+ struct node_data *n, *m, *o;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 3;
+ n->data = 4;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m)
+ goto err_out;
+ m->key = 5;
+ m->data = 6;
+
+ o = bpf_obj_new(typeof(*o));
+ if (!o)
+ goto err_out;
+ o->key = 1;
+ o->data = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rbtree_add(&groot, &m->node, less);
+ bpf_rbtree_add(&groot, &o->node, less);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 2;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+
+ res = bpf_rbtree_remove(&groot, &o->node);
+ bpf_spin_unlock(&glock);
+
+ if (!res)
+ return 5;
+
+ o = container_of(res, struct node_data, node);
+ removed_key = o->key;
+ bpf_obj_drop(o);
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 3;
+ }
+
+ o = container_of(res, struct node_data, node);
+ first_data[1] = o->data;
+ bpf_spin_unlock(&glock);
+
+ return 0;
+err_out:
+ if (n)
+ bpf_obj_drop(n);
+ if (m)
+ bpf_obj_drop(m);
+ return 1;
+}
+
+SEC("tc")
+long rbtree_api_release_aliasing(void *ctx)
+{
+ struct node_data *n, *m, *o;
+ struct bpf_rb_node *res, *res2;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ n->key = 41;
+ n->data = 42;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+
+ /* m and o point to the same node,
+ * but verifier doesn't know this
+ */
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ o = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ m = container_of(res, struct node_data, node);
+
+ res = bpf_rbtree_remove(&groot, &m->node);
+ /* Retval of previous remove returns an owning reference to m,
+ * which is the same node non-owning ref o is pointing at.
+ * We can safely try to remove o as the second rbtree_remove will
+ * return NULL since the node isn't in a tree.
+ *
+ * Previously we relied on the verifier type system + rbtree_remove
+ * invalidating non-owning refs to ensure that rbtree_remove couldn't
+ * fail, but now rbtree_remove does runtime checking so we no longer
+ * invalidate non-owning refs after remove.
+ */
+ res2 = bpf_rbtree_remove(&groot, &o->node);
+
+ bpf_spin_unlock(&glock);
+
+ if (res) {
+ o = container_of(res, struct node_data, node);
+ first_data[0] = o->data;
+ bpf_obj_drop(o);
+ }
+ if (res2) {
+ /* The second remove fails, so res2 is null and this doesn't
+ * execute
+ */
+ m = container_of(res2, struct node_data, node);
+ first_data[1] = m->data;
+ bpf_obj_drop(m);
+ }
+ return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
new file mode 100644
index 000000000000..60079b202c07
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+ int key;
+ int data;
+ struct bpf_rb_node node;
+};
+
+struct node_data2 {
+ int key;
+ struct bpf_rb_node node;
+ int data;
+};
+
+static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data2 *node_a;
+ struct node_data2 *node_b;
+
+ node_a = container_of(a, struct node_data2, node);
+ node_b = container_of(b, struct node_data2, node);
+
+ return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__add_wrong_type(void *ctx)
+{
+ struct node_data2 *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less2);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
new file mode 100644
index 000000000000..7651843f5a80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+/* BTF load should fail as bpf_rb_root __contains this type and points to
+ * 'node', but 'node' is not a bpf_rb_node
+ */
+struct node_data {
+ int key;
+ int data;
+ struct bpf_list_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__wrong_node_type(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
new file mode 100644
index 000000000000..4acb6af2dfe3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot2 __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_add(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_rbtree_add(&groot, &n->node, less);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_remove(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_rbtree_remove(&groot, &n->node);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_first(void *ctx)
+{
+ bpf_rbtree_first(&groot);
+ return 0;
+}
+
+SEC("?tc")
+__retval(0)
+long rbtree_api_remove_unadded_node(void *ctx)
+{
+ struct node_data *n, *m;
+ struct bpf_rb_node *res_n, *res_m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_obj_new(typeof(*m));
+ if (!m) {
+ bpf_obj_drop(n);
+ return 1;
+ }
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ res_n = bpf_rbtree_remove(&groot, &n->node);
+
+ res_m = bpf_rbtree_remove(&groot, &m->node);
+ bpf_spin_unlock(&glock);
+
+ bpf_obj_drop(m);
+ if (res_n)
+ bpf_obj_drop(container_of(res_n, struct node_data, node));
+ if (res_m) {
+ bpf_obj_drop(container_of(res_m, struct node_data, node));
+ /* m was not added to the rbtree */
+ return 2;
+ }
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
+long rbtree_api_remove_no_drop(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto unlock_err;
+
+ res = bpf_rbtree_remove(&groot, res);
+
+ if (res) {
+ n = container_of(res, struct node_data, node);
+ __sink(n);
+ }
+ bpf_spin_unlock(&glock);
+
+ /* if (res) { bpf_obj_drop(n); } is missing here */
+ return 0;
+
+unlock_err:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_to_multiple_trees(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+
+ /* This add should fail since n already in groot's tree */
+ bpf_rbtree_add(&groot2, &n->node, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed")
+long rbtree_api_use_unchecked_remove_retval(void *ctx)
+{
+ struct bpf_rb_node *res;
+
+ bpf_spin_lock(&glock);
+
+ res = bpf_rbtree_first(&groot);
+ if (!res)
+ goto err_out;
+ res = bpf_rbtree_remove(&groot, res);
+
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* Must check res for NULL before using in rbtree_add below */
+ bpf_rbtree_add(&groot, res, less);
+ bpf_spin_unlock(&glock);
+ return 0;
+
+err_out:
+ bpf_spin_unlock(&glock);
+ return 1;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
+long rbtree_api_add_release_unlock_escape(void *ctx)
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After add() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer")
+long rbtree_api_first_release_unlock_escape(void *ctx)
+{
+ struct bpf_rb_node *res;
+ struct node_data *n;
+
+ bpf_spin_lock(&glock);
+ res = bpf_rbtree_first(&groot);
+ if (!res) {
+ bpf_spin_unlock(&glock);
+ return 1;
+ }
+ n = container_of(res, struct node_data, node);
+ bpf_spin_unlock(&glock);
+
+ bpf_spin_lock(&glock);
+ /* After first() in previous critical section, n should be
+ * release_on_unlock and released after previous spin_unlock,
+ * so should not be possible to use it here
+ */
+ bpf_rbtree_remove(&groot, &n->node);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_add(&groot, &node_a->node, less);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_remove(&groot, &node_a->node);
+
+ return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+ bpf_rbtree_first(&groot);
+ bpf_spin_unlock(&glock);
+
+ return node_a->key < node_b->key;
+}
+
+static __always_inline
+long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
+{
+ struct node_data *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, cb);
+ bpf_spin_unlock(&glock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_add);
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove not allowed in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_remove);
+}
+
+SEC("?tc")
+__failure __msg("can't spin_{lock,unlock} in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
+{
+ return add_with_cb(less__bad_fn_call_first_unlock_after);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c
new file mode 100644
index 000000000000..b05565d1db0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rbtree_search.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct node_data {
+ struct bpf_refcount ref;
+ struct bpf_rb_node r0;
+ struct bpf_rb_node r1;
+ int key0;
+ int key1;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock0;
+private(A) struct bpf_rb_root groot0 __contains(node_data, r0);
+
+private(B) struct bpf_spin_lock glock1;
+private(B) struct bpf_rb_root groot1 __contains(node_data, r1);
+
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+#define NR_NODES 16
+
+int zero = 0;
+
+static bool less0(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = rb_entry(a, struct node_data, r0);
+ node_b = rb_entry(b, struct node_data, r0);
+
+ return node_a->key0 < node_b->key0;
+}
+
+static bool less1(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = rb_entry(a, struct node_data, r1);
+ node_b = rb_entry(b, struct node_data, r1);
+
+ return node_a->key1 < node_b->key1;
+}
+
+SEC("syscall")
+__retval(0)
+long rbtree_search(void *ctx)
+{
+ struct bpf_rb_node *rb_n, *rb_m, *gc_ns[NR_NODES];
+ long lookup_key = NR_NODES / 2;
+ struct node_data *n, *m;
+ int i, nr_gc = 0;
+
+ for (i = zero; i < NR_NODES && can_loop; i++) {
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return __LINE__;
+
+ m = bpf_refcount_acquire(n);
+
+ n->key0 = i;
+ m->key1 = i;
+
+ bpf_spin_lock(&glock0);
+ bpf_rbtree_add(&groot0, &n->r0, less0);
+ bpf_spin_unlock(&glock0);
+
+ bpf_spin_lock(&glock1);
+ bpf_rbtree_add(&groot1, &m->r1, less1);
+ bpf_spin_unlock(&glock1);
+ }
+
+ n = NULL;
+ bpf_spin_lock(&glock0);
+ rb_n = bpf_rbtree_root(&groot0);
+ while (can_loop) {
+ if (!rb_n) {
+ bpf_spin_unlock(&glock0);
+ return __LINE__;
+ }
+
+ n = rb_entry(rb_n, struct node_data, r0);
+ if (lookup_key == n->key0)
+ break;
+ if (nr_gc < NR_NODES)
+ gc_ns[nr_gc++] = rb_n;
+ if (lookup_key < n->key0)
+ rb_n = bpf_rbtree_left(&groot0, rb_n);
+ else
+ rb_n = bpf_rbtree_right(&groot0, rb_n);
+ }
+
+ if (!n || lookup_key != n->key0) {
+ bpf_spin_unlock(&glock0);
+ return __LINE__;
+ }
+
+ for (i = 0; i < nr_gc; i++) {
+ rb_n = gc_ns[i];
+ gc_ns[i] = bpf_rbtree_remove(&groot0, rb_n);
+ }
+
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&glock0);
+
+ for (i = 0; i < nr_gc; i++) {
+ rb_n = gc_ns[i];
+ if (rb_n) {
+ n = rb_entry(rb_n, struct node_data, r0);
+ bpf_obj_drop(n);
+ }
+ }
+
+ if (!m)
+ return __LINE__;
+
+ bpf_spin_lock(&glock1);
+ rb_m = bpf_rbtree_remove(&groot1, &m->r1);
+ bpf_spin_unlock(&glock1);
+ bpf_obj_drop(m);
+ if (!rb_m)
+ return __LINE__;
+ bpf_obj_drop(rb_entry(rb_m, struct node_data, r1));
+
+ return 0;
+}
+
+#define TEST_ROOT(dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_root_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_rb_node *rb_n; \
+ __u64 jiffies = 0; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_root(&groot0); \
+ if (rb_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock0); \
+ \
+ return !!jiffies; \
+}
+
+#define TEST_LR(op, dolock) \
+SEC("syscall") \
+__failure __msg(MSG) \
+long test_##op##_spinlock_##dolock(void *ctx) \
+{ \
+ struct bpf_rb_node *rb_n; \
+ struct node_data *n; \
+ __u64 jiffies = 0; \
+ \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_root(&groot0); \
+ if (!rb_n) { \
+ bpf_spin_unlock(&glock0); \
+ return 1; \
+ } \
+ n = rb_entry(rb_n, struct node_data, r0); \
+ n = bpf_refcount_acquire(n); \
+ bpf_spin_unlock(&glock0); \
+ if (!n) \
+ return 1; \
+ \
+ if (dolock) \
+ bpf_spin_lock(&glock0); \
+ rb_n = bpf_rbtree_##op(&groot0, &n->r0); \
+ if (rb_n) \
+ jiffies = bpf_jiffies64(); \
+ if (dolock) \
+ bpf_spin_unlock(&glock0); \
+ \
+ return !!jiffies; \
+}
+
+/*
+ * Use a separate MSG macro instead of passing to TEST_XXX(..., MSG)
+ * to ensure the message itself is not in the bpf prog lineinfo
+ * which the verifier includes in its log.
+ * Otherwise, the test_loader will incorrectly match the prog lineinfo
+ * instead of the log generated by the verifier.
+ */
+#define MSG "call bpf_rbtree_root{{.+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_ROOT(true)
+#undef MSG
+#define MSG "call bpf_rbtree_{{(left|right).+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref"
+TEST_LR(left, true)
+TEST_LR(right, true)
+#undef MSG
+
+#define MSG "bpf_spin_lock at off=0 must be held for bpf_rb_root"
+TEST_ROOT(false)
+TEST_LR(left, false)
+TEST_LR(right, false)
+#undef MSG
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
new file mode 100644
index 000000000000..d70c28824bbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+__u32 user_data, target_pid;
+__s32 key_serial;
+__u64 flags, task_storage_val, cgroup_id;
+
+struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+void bpf_key_put(struct bpf_key *key) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int get_cgroup_id(void *ctx)
+{
+ struct task_struct *task;
+ struct css_set *cgroups;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ /* simulate bpf_get_current_cgroup_id() helper */
+ bpf_rcu_read_lock();
+ cgroups = task->cgroups;
+ if (!cgroups)
+ goto unlock;
+ cgroup_id = cgroups->dfl_cgrp->kn->id;
+unlock:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_succ(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ long init_val = 2;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ /* region including helper using rcu ptr real_parent */
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ ptr = bpf_task_storage_get(&map_a, real_parent, &init_val,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ goto out;
+ ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (!ptr)
+ goto out;
+ task_storage_val = *ptr;
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int no_lock(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* old style ptr_to_btf_id is not allowed in sleepable */
+ task = bpf_get_current_task_btf();
+ real_parent = task->real_parent;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int two_regions(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* two regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_1(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_2(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ bpf_rcu_read_lock();
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int task_acquire(void *ctx)
+{
+ struct task_struct *task, *real_parent, *gparent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+
+ /* rcu_ptr->rcu_field */
+ gparent = real_parent->real_parent;
+ if (!gparent)
+ goto out;
+
+ /* acquire a reference which can be used outside rcu read lock region */
+ gparent = bpf_task_acquire(gparent);
+ if (!gparent)
+ goto out;
+
+ (void)bpf_task_storage_get(&map_a, gparent, 0, 0);
+ bpf_task_release(gparent);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_lock(void *ctx)
+{
+ struct task_struct *task;
+
+ /* missing bpf_rcu_read_lock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int miss_unlock(void *ctx)
+{
+ struct task_struct *task;
+
+ /* missing bpf_rcu_read_unlock() */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, task, 0, 0);
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int non_sleepable_rcu_mismatch(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ /* non-sleepable: missing bpf_rcu_read_unlock() in one path */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ if (real_parent)
+ bpf_rcu_read_unlock();
+out:
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int inproper_sleepable_helper(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+ struct pt_regs *regs;
+ __u32 value = 0;
+ void *ptr;
+
+ task = bpf_get_current_task_btf();
+ /* sleepable helper in rcu read lock region */
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ regs = (struct pt_regs *)bpf_task_pt_regs(real_parent);
+ if (!regs)
+ goto out;
+
+ ptr = (void *)PT_REGS_IP(regs);
+ (void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
+ user_data = value;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?lsm.s/bpf")
+int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size,
+ bool kernel)
+{
+ struct bpf_key *bkey;
+
+ /* sleepable kfunc in rcu read lock region */
+ bpf_rcu_read_lock();
+ bkey = bpf_lookup_user_key(key_serial, flags);
+ bpf_rcu_read_unlock();
+ if (!bkey)
+ return -1;
+ bpf_key_put(bkey);
+
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region_unbalanced_1(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region_unbalanced_2(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_trusted_non_rcuptr(void *ctx)
+{
+ struct task_struct *task, *group_leader;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ /* the pointer group_leader is explicitly marked as trusted */
+ group_leader = task->real_parent->group_leader;
+ (void)bpf_task_storage_get(&map_a, group_leader, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int task_untrusted_rcuptr(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ /* helper use of rcu ptr outside the rcu read lock region */
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int cross_rcu_region(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* rcu ptr define/use in different regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_lock();
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+__noinline
+static int static_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog(NULL);
+}
+
+__noinline
+static int static_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_lock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_lock(NULL);
+}
+
+__noinline
+static int static_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_unlock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_unlock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_unlock(NULL);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += static_subprog(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += global_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += static_subprog_lock(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += global_subprog_lock(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += static_subprog_unlock(ctx);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_unlock(ret);
+ return 0;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_helper_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_sleepable_helper_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_kfunc_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_sleepable_kfunc_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_sleepable_global_subprog_indirect(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_calling_sleepable_global(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
new file mode 100644
index 000000000000..df4873558634
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct task_ls_map {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_ls_map SEC(".maps");
+
+long gp_seq;
+
+SEC("syscall")
+int do_call_rcu_tasks_trace(void *ctx)
+{
+ struct task_struct *current;
+ int *v;
+
+ current = bpf_get_current_task_btf();
+ v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!v)
+ return 1;
+ /* Invoke call_rcu_tasks_trace */
+ return bpf_task_storage_delete(&task_ls_map, current);
+}
+
+SEC("kprobe/rcu_tasks_trace_postgp")
+int rcu_tasks_trace_postgp(void *ctx)
+{
+ __sync_add_and_fetch(&gp_seq, 1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c
new file mode 100644
index 000000000000..69da05bb6c63
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern bool CONFIG_PREEMPTION __kconfig __weak;
+extern const int bpf_task_storage_busy __ksym;
+
+char _license[] SEC("license") = "GPL";
+
+int pid = 0;
+int busy = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} task SEC(".maps");
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(read_bpf_task_storage_busy)
+{
+ int *value;
+
+ if (!CONFIG_PREEMPTION)
+ return 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ value = bpf_this_cpu_ptr(&bpf_task_storage_busy);
+ if (value)
+ busy = *value;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
new file mode 100644
index 000000000000..405adbe5e8b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid = 0;
+
+char xattr_value[64];
+static const char expected_value_a[] = "bpf_selftest_value_a";
+static const char expected_value_b[] = "bpf_selftest_value_b";
+bool found_value_a;
+bool found_value_b;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css, *tmp;
+ struct bpf_dynptr value_ptr;
+ struct cgroup *cgrp;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ css = &cgrp->self;
+ bpf_dynptr_from_mem(xattr_value, sizeof(xattr_value), 0, &value_ptr);
+ bpf_for_each(css, tmp, css, BPF_CGROUP_ITER_ANCESTORS_UP) {
+ int ret;
+
+ ret = bpf_cgroup_read_xattr(tmp->cgroup, "user.bpf_test",
+ &value_ptr);
+ if (ret < 0)
+ continue;
+
+ if (ret == sizeof(expected_value_a) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_a), expected_value_a))
+ found_value_a = true;
+ if (ret == sizeof(expected_value_b) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_b), expected_value_b))
+ found_value_b = true;
+ }
+
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
new file mode 100644
index 000000000000..395591374d4f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+int target_pid = 0;
+void *user_ptr = 0;
+int read_ret[10];
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * These are the kfuncs, the others are helpers
+ */
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+int bpf_copy_from_user_task_str(void *dst, u32, const void *,
+ struct task_struct *, u64) __weak __ksym;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int do_probe_read(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[0] = bpf_probe_read_kernel(buf, sizeof(buf), user_ptr);
+ read_ret[1] = bpf_probe_read_kernel_str(buf, sizeof(buf), user_ptr);
+ read_ret[2] = bpf_probe_read(buf, sizeof(buf), user_ptr);
+ read_ret[3] = bpf_probe_read_str(buf, sizeof(buf), user_ptr);
+ read_ret[4] = bpf_probe_read_user(buf, sizeof(buf), user_ptr);
+ read_ret[5] = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
+
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int do_copy_from_user(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
+ read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
+ bpf_get_current_task_btf(), 0);
+ read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0);
+ read_ret[9] = bpf_copy_from_user_task_str((char *)buf,
+ sizeof(buf),
+ user_ptr,
+ bpf_get_current_task_btf(),
+ 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recursion.c b/tools/testing/selftests/bpf/progs/recursion.c
new file mode 100644
index 000000000000..3c2423bb19e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recursion.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, long);
+} hash1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, long);
+} hash2 SEC(".maps");
+
+int pass1 = 0;
+int pass2 = 0;
+
+SEC("fentry/htab_map_delete_elem")
+int BPF_PROG(on_delete, struct bpf_map *map)
+{
+ int key = 0;
+
+ if (map == (void *)&hash1) {
+ pass1++;
+ return 0;
+ }
+ if (map == (void *)&hash2) {
+ pass2++;
+ bpf_map_delete_elem(&hash2, &key);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c
new file mode 100644
index 000000000000..59748c95471a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <bpf_sockopt_helpers.h>
+
+#define SERV4_IP 0xc0a801feU /* 192.168.1.254 */
+#define SERV4_PORT 4040
+
+SEC("cgroup/recvmsg4")
+int recvmsg4_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ if (sk->family != AF_INET)
+ return 1;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 1;
+
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
+ ctx->user_ip4 = bpf_htonl(SERV4_IP);
+ ctx->user_port = bpf_htons(SERV4_PORT);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c
new file mode 100644
index 000000000000..d9a4016596d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/in6.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include <bpf_sockopt_helpers.h>
+
+#define SERV6_IP_0 0xfaceb00c /* face:b00c:1234:5678::abcd */
+#define SERV6_IP_1 0x12345678
+#define SERV6_IP_2 0x00000000
+#define SERV6_IP_3 0x0000abcd
+#define SERV6_PORT 6060
+
+SEC("cgroup/recvmsg6")
+int recvmsg6_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock *sk;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ if (sk->family != AF_INET6)
+ return 1;
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 1;
+
+ if (!get_set_sk_priority(ctx))
+ return 1;
+
+ ctx->user_ip6[0] = bpf_htonl(SERV6_IP_0);
+ ctx->user_ip6[1] = bpf_htonl(SERV6_IP_1);
+ ctx->user_ip6[2] = bpf_htonl(SERV6_IP_2);
+ ctx->user_ip6[3] = bpf_htonl(SERV6_IP_3);
+ ctx->user_port = bpf_htons(SERV6_PORT);
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
new file mode 100644
index 000000000000..1c7ab44bccfa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_ADDRESS[] = "\0bpf_cgroup_unix_test";
+
+SEC("cgroup/recvmsg_unix")
+int recvmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_ADDRESS) - 1;
+ int ret;
+
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1);
+ if (ret)
+ return 1;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 1;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS,
+ sizeof(SERVUN_ADDRESS) - 1) != 0)
+ return 1;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
new file mode 100644
index 000000000000..1aca85d86aeb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+struct node_data {
+ long key;
+ long list_data;
+ struct bpf_rb_node r;
+ struct bpf_list_node l;
+ struct bpf_refcount ref;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} stashed_nodes SEC(".maps");
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock lock;
+private(A) struct bpf_rb_root root __contains(node_data, r);
+private(A) struct bpf_list_head head __contains(node_data, l);
+
+private(B) struct bpf_spin_lock alock;
+private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
+
+private(C) struct bpf_spin_lock block;
+private(C) struct bpf_rb_root broot __contains(node_data, r);
+
+static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
+{
+ struct node_data *a;
+ struct node_data *b;
+
+ a = container_of(node_a, struct node_data, r);
+ b = container_of(node_b, struct node_data, r);
+
+ return a->key < b->key;
+}
+
+static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+static long __insert_in_tree_and_list(struct bpf_list_head *head,
+ struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 123;
+ m->list_data = 456;
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &n->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ bpf_obj_drop(m);
+ return -2;
+ }
+ bpf_spin_unlock(lock);
+
+ bpf_spin_lock(lock);
+ if (bpf_list_push_front(head, &m->l)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -3;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock)
+{
+ struct map_value *mapval;
+ struct node_data *n, *m;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return -2;
+
+ n->key = val;
+ m = bpf_refcount_acquire(n);
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (n) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(m);
+ return -3;
+ }
+
+ bpf_spin_lock(lock);
+ if (bpf_rbtree_add(root, &m->r, less)) {
+ /* Failure to insert - unexpected */
+ bpf_spin_unlock(lock);
+ return -4;
+ }
+ bpf_spin_unlock(lock);
+ return 0;
+}
+
+static long __read_from_tree(struct bpf_rb_root *root,
+ struct bpf_spin_lock *lock,
+ bool remove_from_tree)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ rb = bpf_rbtree_first(root);
+ if (!rb) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ res = n->key;
+
+ if (!remove_from_tree) {
+ bpf_spin_unlock(lock);
+ return res;
+ }
+
+ rb = bpf_rbtree_remove(root, rb);
+ bpf_spin_unlock(lock);
+ if (!rb)
+ return -2;
+ n = container_of(rb, struct node_data, r);
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock,
+ bool remove_from_list)
+{
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long res = -99;
+
+ bpf_spin_lock(lock);
+
+ l = bpf_list_pop_front(head);
+ if (!l) {
+ bpf_spin_unlock(lock);
+ return -1;
+ }
+
+ n = container_of(l, struct node_data, l);
+ res = n->list_data;
+
+ if (!remove_from_list) {
+ if (bpf_list_push_back(head, &n->l)) {
+ bpf_spin_unlock(lock);
+ return -2;
+ }
+ }
+
+ bpf_spin_unlock(lock);
+
+ if (remove_from_list)
+ bpf_obj_drop(n);
+ return res;
+}
+
+static long __read_from_unstash(int idx)
+{
+ struct node_data *n = NULL;
+ struct map_value *mapval;
+ long val = -99;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return -1;
+
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (!n)
+ return -2;
+
+ val = n->key;
+ bpf_obj_drop(n);
+ return val;
+}
+
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* After successful insert of struct node_data into both collections:
+ * - it should have refcount = 2
+ * - removing / not removing the node_data from a collection after
+ * reading should have no effect on ability to read / remove from
+ * the other collection
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both: remove from list");
+
+#undef INSERT_READ_BOTH
+#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(579) \
+long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \
+{ \
+ long err, tree_data, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_list(&head, &lock, rem_list); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ return tree_data + list_data; \
+}
+
+/* Similar to insert_read_both, but list data is read and possibly removed
+ * first
+ *
+ * Results should be no different than reading and possibly removing rbtree
+ * node first
+ */
+INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list");
+INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither");
+INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree");
+INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list");
+
+#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(-1) \
+long insert_double_##read_fn##_and_del_##read_root(void *ctx) \
+{ \
+ long err, list_data; \
+ \
+ err = __insert_in_tree_and_list(&head, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ else \
+ list_data = err; \
+ \
+ err = read_fn(&read_root, &lock, true); \
+ if (err < 0) \
+ return err; \
+ \
+ return err + list_data; \
+}
+
+/* Insert into both tree and list, then try reading-and-removing from either twice
+ *
+ * The second read-and-remove should fail on read step since the node has
+ * already been removed
+ */
+INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree");
+INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list");
+
+#define INSERT_STASH_READ(rem_tree, desc) \
+SEC("tc") \
+__description(desc) \
+__success __retval(84) \
+long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
+{ \
+ long err, tree_data, map_data; \
+ \
+ err = __stash_map_insert_tree(0, 42, &root, &lock); \
+ if (err) \
+ return err; \
+ \
+ err = __read_from_tree(&root, &lock, rem_tree); \
+ if (err < 0) \
+ return err; \
+ else \
+ tree_data = err; \
+ \
+ err = __read_from_unstash(0); \
+ if (err < 0) \
+ return err; \
+ else \
+ map_data = err; \
+ \
+ return tree_data + map_data; \
+}
+
+/* Stash a refcounted node in map_val, insert same node into tree, then try
+ * reading data from tree then unstashed map_val, possibly removing from tree
+ *
+ * Removing from tree should have no effect on map_val kptr validity
+ */
+INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
+INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&alock);
+ if (!m)
+ return 2;
+
+ m->key = 2;
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("tc")
+__success
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&alock);
+ bpf_rbtree_add(&aroot, &n->node, less_a);
+ bpf_spin_unlock(&alock);
+
+ bpf_obj_drop(m);
+
+ return 0;
+}
+
+static long __stash_map_empty_xchg(struct node_data *n, int idx)
+{
+ struct map_value *mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+
+ if (!mapval) {
+ bpf_obj_drop(n);
+ return 1;
+ }
+ n = bpf_kptr_xchg(&mapval->node, n);
+ if (n) {
+ bpf_obj_drop(n);
+ return 2;
+ }
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_a1(void *ctx)
+{
+ struct node_data *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+ m = bpf_refcount_acquire(n);
+
+ if (__stash_map_empty_xchg(n, 0)) {
+ bpf_obj_drop(m);
+ return 2;
+ }
+
+ if (__stash_map_empty_xchg(m, 1))
+ return 3;
+
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_b(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data *n;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ n = bpf_kptr_xchg(&mapval->node, NULL);
+ if (!n)
+ return 2;
+
+ bpf_spin_lock(&block);
+
+ bpf_rbtree_add(&broot, &n->r, less);
+
+ bpf_spin_unlock(&block);
+ return 0;
+}
+
+SEC("tc")
+long rbtree_wrong_owner_remove_fail_a2(void *ctx)
+{
+ struct map_value *mapval;
+ struct bpf_rb_node *res;
+ struct node_data *m;
+ int idx = 1;
+
+ mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ m = bpf_kptr_xchg(&mapval->node, NULL);
+ if (!m)
+ return 2;
+ bpf_spin_lock(&lock);
+
+ /* make m non-owning ref */
+ bpf_list_push_back(&head, &m->l);
+ res = bpf_rbtree_remove(&root, &m->r);
+
+ bpf_spin_unlock(&lock);
+ if (res) {
+ bpf_obj_drop(container_of(res, struct node_data, r));
+ return 3;
+ }
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ bpf_rcu_read_unlock();
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* No explicit bpf_rcu_read_lock */
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ /* No explicit bpf_rcu_read_unlock */
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+private(kptr_ref) u64 ref;
+
+static int probe_read_refcount(void)
+{
+ u32 refcount;
+
+ bpf_probe_read_kernel(&refcount, sizeof(refcount), (void *) ref);
+ return refcount;
+}
+
+static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct node_data __kptr **node)
+{
+ struct node_data *node_new, *node_ref, *node_old;
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return -1;
+
+ node_ref = bpf_refcount_acquire(node_new);
+ node_old = bpf_kptr_xchg(node, node_new);
+ if (node_old) {
+ bpf_obj_drop(node_old);
+ bpf_obj_drop(node_ref);
+ return -2;
+ }
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &node_ref->l);
+ ref = (u64)(void *) &node_ref->ref;
+ bpf_spin_unlock(lock);
+ return probe_read_refcount();
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} percpu_hash SEC(".maps");
+
+SEC("tc")
+int percpu_hash_refcount_leak(void *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&percpu_hash, &key);
+ if (!v)
+ return 0;
+
+ return __insert_in_list(&head, &lock, &v->node);
+}
+
+SEC("tc")
+int check_percpu_hash_refcount(void *ctx)
+{
+ return probe_read_refcount();
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
new file mode 100644
index 000000000000..836c8ab7b908
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_acquire {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+ struct bpf_refcount refcount;
+};
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_acquire, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct node_acquire *node_a;
+ struct node_acquire *node_b;
+
+ node_a = container_of(a, struct node_acquire, node);
+ node_b = container_of(b, struct node_acquire, node);
+
+ return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}")
+long rbtree_refcounted_node_ref_escapes(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ bpf_spin_unlock(&glock);
+ if (!m)
+ return 2;
+
+ m->key = 2;
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+long refcount_acquire_maybe_null(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ /* Intentionally not testing !n
+ * it's MAYBE_NULL for refcount_acquire
+ */
+ m = bpf_refcount_acquire(n);
+ if (m)
+ bpf_obj_drop(m);
+ if (n)
+ bpf_obj_drop(n);
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
+long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
+{
+ struct node_acquire *n, *m;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 1;
+
+ /* m becomes an owning ref but is never drop'd or added to a tree */
+ m = bpf_refcount_acquire(n);
+ m->key = 2;
+
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_spin_unlock(&glock);
+
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__failure __msg("function calls are not allowed while holding a lock")
+int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct node_acquire *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* spin_{lock,unlock} are in different RCU CS */
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ bpf_spin_unlock(&glock);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock.c b/tools/testing/selftests/bpf/progs/res_spin_lock.c
new file mode 100644
index 000000000000..22c4fb8b9266
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define EDEADLK 35
+#define ETIMEDOUT 110
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 64);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+struct bpf_res_spin_lock lockA __hidden SEC(".data.A");
+struct bpf_res_spin_lock lockB __hidden SEC(".data.B");
+
+SEC("tc")
+int res_spin_lock_test(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem1, *elem2;
+ int r;
+
+ elem1 = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem1)
+ return -1;
+ elem2 = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem2)
+ return -1;
+
+ r = bpf_res_spin_lock(&elem1->lock);
+ if (r)
+ return r;
+ r = bpf_res_spin_lock(&elem2->lock);
+ if (!r) {
+ bpf_res_spin_unlock(&elem2->lock);
+ bpf_res_spin_unlock(&elem1->lock);
+ return -1;
+ }
+ bpf_res_spin_unlock(&elem1->lock);
+ return r != -EDEADLK;
+}
+
+SEC("tc")
+int res_spin_lock_test_AB(struct __sk_buff *ctx)
+{
+ int r;
+
+ r = bpf_res_spin_lock(&lockA);
+ if (r)
+ return !r;
+ /* Only unlock if we took the lock. */
+ if (!bpf_res_spin_lock(&lockB))
+ bpf_res_spin_unlock(&lockB);
+ bpf_res_spin_unlock(&lockA);
+ return 0;
+}
+
+int err;
+
+SEC("tc")
+int res_spin_lock_test_BA(struct __sk_buff *ctx)
+{
+ int r;
+
+ r = bpf_res_spin_lock(&lockB);
+ if (r)
+ return !r;
+ if (!bpf_res_spin_lock(&lockA))
+ bpf_res_spin_unlock(&lockA);
+ else
+ err = -EDEADLK;
+ bpf_res_spin_unlock(&lockB);
+ return err ?: 0;
+}
+
+SEC("tc")
+int res_spin_lock_test_held_lock_max(struct __sk_buff *ctx)
+{
+ struct bpf_res_spin_lock *locks[48] = {};
+ struct arr_elem *e;
+ u64 time_beg, time;
+ int ret = 0, i;
+
+ _Static_assert(ARRAY_SIZE(((struct rqspinlock_held){}).locks) == 31,
+ "RES_NR_HELD assumed to be 31");
+
+ for (i = 0; i < 34; i++) {
+ int key = i;
+
+ /* We cannot pass in i as it will get spilled/filled by the compiler and
+ * loses bounds in verifier state.
+ */
+ e = bpf_map_lookup_elem(&arrmap, &key);
+ if (!e)
+ return 1;
+ locks[i] = &e->lock;
+ }
+
+ for (; i < 48; i++) {
+ int key = i - 2;
+
+ /* We cannot pass in i as it will get spilled/filled by the compiler and
+ * loses bounds in verifier state.
+ */
+ e = bpf_map_lookup_elem(&arrmap, &key);
+ if (!e)
+ return 1;
+ locks[i] = &e->lock;
+ }
+
+ time_beg = bpf_ktime_get_ns();
+ for (i = 0; i < 34; i++) {
+ if (bpf_res_spin_lock(locks[i]))
+ goto end;
+ }
+
+ /* Trigger AA, after exhausting entries in the held lock table. This
+ * time, only the timeout can save us, as AA detection won't succeed.
+ */
+ ret = bpf_res_spin_lock(locks[34]);
+ if (!ret) {
+ bpf_res_spin_unlock(locks[34]);
+ ret = 1;
+ goto end;
+ }
+
+ ret = ret != -ETIMEDOUT ? 2 : 0;
+
+end:
+ for (i = i - 1; i >= 0; i--)
+ bpf_res_spin_unlock(locks[i]);
+ time = bpf_ktime_get_ns() - time_beg;
+ /* Time spent should be easily above our limit (1/4 s), since AA
+ * detection won't be expedited due to lack of held lock entry.
+ */
+ return ret ?: (time > 1000000000 / 4 ? 0 : 1);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c
new file mode 100644
index 000000000000..330682a88c16
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/res_spin_lock_fail.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+long value;
+
+struct bpf_spin_lock lock __hidden SEC(".data.A");
+struct bpf_res_spin_lock res_lock __hidden SEC(".data.B");
+
+SEC("?tc")
+__failure __msg("point to map value or allocated object")
+int res_spin_lock_arg(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock((struct bpf_res_spin_lock *)bpf_core_cast(&elem->lock, struct __sk_buff));
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("AA deadlock detected")
+int res_spin_lock_AA(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock(&elem->lock);
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("AA deadlock detected")
+int res_spin_lock_cond_AA(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock))
+ return 0;
+ bpf_res_spin_lock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_mismatch_1(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock))
+ return 0;
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_mismatch_2(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ bpf_res_spin_unlock(&elem->lock);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_irq_mismatch_1(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_local_irq_save(&f1);
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("unlock of different lock")
+int res_spin_lock_irq_mismatch_2(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
+ return 0;
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int res_spin_lock_ooo(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock(&res_lock))
+ return 0;
+ if (bpf_res_spin_lock(&elem->lock)) {
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+ }
+ bpf_res_spin_unlock(&elem->lock);
+ bpf_res_spin_unlock(&res_lock);
+ return 0;
+}
+
+SEC("?tc")
+__success
+int res_spin_lock_ooo_irq(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ unsigned long f1, f2;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&res_lock, &f1))
+ return 0;
+ if (bpf_res_spin_lock_irqsave(&elem->lock, &f2)) {
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ /* We won't have a unreleased IRQ flag error here. */
+ return 0;
+ }
+ bpf_res_spin_unlock_irqrestore(&elem->lock, &f2);
+ bpf_res_spin_unlock_irqrestore(&res_lock, &f1);
+ return 0;
+}
+
+struct bpf_res_spin_lock lock1 __hidden SEC(".data.OO1");
+struct bpf_res_spin_lock lock2 __hidden SEC(".data.OO2");
+
+SEC("?tc")
+__failure __msg("bpf_res_spin_unlock cannot be out of order")
+int res_spin_lock_ooo_unlock(struct __sk_buff *ctx)
+{
+ if (bpf_res_spin_lock(&lock1))
+ return 0;
+ if (bpf_res_spin_lock(&lock2)) {
+ bpf_res_spin_unlock(&lock1);
+ return 0;
+ }
+ bpf_res_spin_unlock(&lock1);
+ bpf_res_spin_unlock(&lock2);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("off 1 doesn't point to 'struct bpf_res_spin_lock' that is at 0")
+int res_spin_lock_bad_off(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem)
+ return 0;
+ bpf_res_spin_lock((void *)&elem->lock + 1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("R1 doesn't have constant offset. bpf_res_spin_lock has to be at the constant offset")
+int res_spin_lock_var_off(struct __sk_buff *ctx)
+{
+ struct arr_elem *elem;
+ u64 val = value;
+
+ elem = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!elem) {
+ // FIXME: Only inline assembly use in assert macro doesn't emit
+ // BTF definition.
+ bpf_throw(0);
+ return 0;
+ }
+ bpf_assert_range(val, 0, 40);
+ bpf_res_spin_lock((void *)&value + val);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("map 'res_spin.bss' has no valid bpf_res_spin_lock")
+int res_spin_lock_no_lock_map(struct __sk_buff *ctx)
+{
+ bpf_res_spin_lock((void *)&value + 1);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("local 'kptr' has no valid bpf_res_spin_lock")
+int res_spin_lock_no_lock_kptr(struct __sk_buff *ctx)
+{
+ struct { int i; } *p = bpf_obj_new(typeof(*p));
+
+ if (!p)
+ return 0;
+ bpf_res_spin_lock((void *)p);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/ringbuf_bench.c b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
index 123607d314d6..d96c7d1e8fc2 100644
--- a/tools/testing/selftests/bpf/progs/ringbuf_bench.c
+++ b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
+#include <stdbool.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -13,9 +15,11 @@ struct {
const volatile int batch_cnt = 0;
const volatile long use_output = 0;
+const volatile bool bench_producer = false;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
+long hits __attribute__((aligned(128))) = 0;
const volatile long wakeup_data_size = 0;
@@ -23,6 +27,9 @@ static __always_inline long get_flags()
{
long sz;
+ if (bench_producer)
+ return BPF_RB_NO_WAKEUP;
+
if (!wakeup_data_size)
return 0;
@@ -30,7 +37,7 @@ static __always_inline long get_flags()
return sz >= wakeup_data_size ? BPF_RB_FORCE_WAKEUP : BPF_RB_NO_WAKEUP;
}
-SEC("fentry/__x64_sys_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_ringbuf(void *ctx)
{
long *sample, flags;
@@ -46,6 +53,8 @@ int bench_ringbuf(void *ctx)
*sample = sample_val;
flags = get_flags();
bpf_ringbuf_submit(sample, flags);
+ if (bench_producer)
+ __sync_add_and_fetch(&hits, 1);
}
}
} else {
@@ -54,6 +63,9 @@ int bench_ringbuf(void *ctx)
if (bpf_ringbuf_output(&ringbuf, &sample_val,
sizeof(sample_val), flags))
__sync_add_and_fetch(&dropped, 1);
+ else if (bench_producer)
+ __sync_add_and_fetch(&hits, 1);
+
}
}
return 0;
diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c
new file mode 100644
index 000000000000..7216b3450e96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define EPERM 1 /* Operation not permitted */
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct map;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} prot_status_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} prot_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} not_prot_map SEC(".maps");
+
+SEC("fmod_ret/security_bpf_map")
+int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode)
+{
+ __u32 key = 0;
+ __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key);
+
+ if (!status_ptr || !*status_ptr)
+ return 0;
+
+ if (map == &prot_map) {
+ /* Allow read-only access */
+ if (fmode & FMODE_WRITE)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This program keeps references to maps. This is needed to prevent
+ * optimizing them out.
+ */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_dummy1, int a)
+{
+ __u32 key = 0;
+ __u32 val1 = a;
+ __u32 val2 = a + 1;
+
+ bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY);
+ bpf_map_update_elem(&not_prot_map, &key, &val2, BPF_ANY);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index 092d9da536f3..edc159598a0e 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
@@ -8,6 +8,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
#define SRC1_IP4 0xAC100001U /* 172.16.0.1 */
#define SRC2_IP4 0x00000000U
#define SRC_REWRITE_IP4 0x7f000004U
@@ -16,14 +18,15 @@
#define DST_PORT 4040
#define DST_REWRITE_PORT4 4444
-int _version SEC("version") = 1;
-
SEC("cgroup/sendmsg4")
int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_DGRAM)
return 0;
+ if (!get_set_sk_priority(ctx))
+ return 0;
+
/* Rewrite source. */
if (ctx->msg_src_ip4 == bpf_htonl(SRC1_IP4) ||
ctx->msg_src_ip4 == bpf_htonl(SRC2_IP4)) {
@@ -46,4 +49,10 @@ int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/sendmsg4")
+int sendmsg_v4_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 255a432bc163..36a7f960799f 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
@@ -8,6 +8,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf_sockopt_helpers.h>
+
#define SRC_REWRITE_IP6_0 0
#define SRC_REWRITE_IP6_1 0
#define SRC_REWRITE_IP6_2 0
@@ -18,9 +20,12 @@
#define DST_REWRITE_IP6_2 0
#define DST_REWRITE_IP6_3 1
-#define DST_REWRITE_PORT6 6666
+#define DST_REWRITE_IP6_V4_MAPPED_0 0
+#define DST_REWRITE_IP6_V4_MAPPED_1 0
+#define DST_REWRITE_IP6_V4_MAPPED_2 0x0000FFFF
+#define DST_REWRITE_IP6_V4_MAPPED_3 0xc0a80004 // 192.168.0.4
-int _version SEC("version") = 1;
+#define DST_REWRITE_PORT6 6666
SEC("cgroup/sendmsg6")
int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
@@ -28,6 +33,9 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
if (ctx->type != SOCK_DGRAM)
return 0;
+ if (!get_set_sk_priority(ctx))
+ return 0;
+
/* Rewrite source. */
if (ctx->msg_src_ip6[3] == bpf_htonl(1) ||
ctx->msg_src_ip6[3] == bpf_htonl(0)) {
@@ -56,4 +64,56 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
return 1;
}
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_v4mapped_prog(struct bpf_sock_addr *ctx)
+{
+ /* Rewrite source. */
+ ctx->msg_src_ip6[0] = bpf_htonl(SRC_REWRITE_IP6_0);
+ ctx->msg_src_ip6[1] = bpf_htonl(SRC_REWRITE_IP6_1);
+ ctx->msg_src_ip6[2] = bpf_htonl(SRC_REWRITE_IP6_2);
+ ctx->msg_src_ip6[3] = bpf_htonl(SRC_REWRITE_IP6_3);
+
+ /* Rewrite destination. */
+ ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_0);
+ ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_1);
+ ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_2);
+ ctx->user_ip6[3] = bpf_htonl(DST_REWRITE_IP6_V4_MAPPED_3);
+
+ ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_wildcard_prog(struct bpf_sock_addr *ctx)
+{
+ /* Rewrite source. */
+ ctx->msg_src_ip6[0] = bpf_htonl(SRC_REWRITE_IP6_0);
+ ctx->msg_src_ip6[1] = bpf_htonl(SRC_REWRITE_IP6_1);
+ ctx->msg_src_ip6[2] = bpf_htonl(SRC_REWRITE_IP6_2);
+ ctx->msg_src_ip6[3] = bpf_htonl(SRC_REWRITE_IP6_3);
+
+ /* Rewrite destination. */
+ ctx->user_ip6[0] = bpf_htonl(0);
+ ctx->user_ip6[1] = bpf_htonl(0);
+ ctx->user_ip6[2] = bpf_htonl(0);
+ ctx->user_ip6[3] = bpf_htonl(0);
+
+ ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_preserve_dst_prog(struct bpf_sock_addr *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/sendmsg6")
+int sendmsg_v6_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
new file mode 100644
index 000000000000..332d0eb1116f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
+
+__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite";
+
+SEC("cgroup/sendmsg_unix")
+int sendmsg_unix_prog(struct bpf_sock_addr *ctx)
+{
+ struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx);
+ struct sockaddr_un *sa_kern_unaddr;
+ __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) +
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1;
+ int ret;
+
+ /* Rewrite destination. */
+ ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1);
+ if (ret)
+ return 0;
+
+ if (sa_kern->uaddrlen != unaddrlen)
+ return 0;
+
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
+ if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
+ sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/sendmsg_unix")
+int sendmsg_unix_deny_prog(struct bpf_sock_addr *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c
new file mode 100644
index 000000000000..ebaef28b2cb3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/set_global_vars.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+typedef __s32 s32;
+typedef s32 i32;
+typedef __u8 u8;
+
+enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 };
+enum Enumu64 {EB1 = 0llu, EB2 = 12llu };
+enum Enums64 { EC1 = 0ll, EC2 = 13ll };
+
+const volatile __s64 var_s64 = -1;
+const volatile __u64 var_u64 = 0;
+const volatile i32 var_s32 = -1;
+const volatile __u32 var_u32 = 0;
+const volatile __s16 var_s16 = -1;
+const volatile __u16 var_u16 = 0;
+const volatile __s8 var_s8 = -1;
+const volatile u8 var_u8 = 0;
+const volatile enum Enum var_ea = EA1;
+const volatile enum Enumu64 var_eb = EB1;
+const volatile enum Enums64 var_ec = EC1;
+const volatile bool var_b = false;
+const volatile i32 arr[32];
+const volatile enum Enum enum_arr[32];
+const volatile i32 three_d[47][19][17];
+const volatile i32 *ptr_arr[32];
+
+struct Struct {
+ int:16;
+ __u16 filler;
+ struct {
+ const __u16 filler2;
+ };
+ struct Struct2 {
+ __u16 filler;
+ volatile struct {
+ const int:1;
+ union {
+ const volatile u8 var_u8[3];
+ const volatile __s16 filler3;
+ const int:1;
+ s32 mat[7][5];
+ } u;
+ };
+ } struct2[2][4];
+};
+
+const volatile __u32 stru = 0; /* same prefix as below */
+const volatile struct Struct struct1[3];
+const volatile struct Struct struct11[11][7];
+
+struct Struct3 {
+ struct {
+ u8 var_u8_l;
+ };
+ struct {
+ struct {
+ u8 var_u8_h;
+ };
+ };
+};
+
+typedef struct Struct3 Struct3_t;
+
+union Union {
+ __u16 var_u16;
+ Struct3_t struct3;
+};
+
+const volatile union Union union1 = {.var_u16 = -1};
+
+SEC("socket")
+int test_set_globals(void *ctx)
+{
+ volatile __s8 a;
+
+ a = var_s64;
+ a = var_u64;
+ a = var_s32;
+ a = var_u32;
+ a = var_s16;
+ a = var_u16;
+ a = var_s8;
+ a = var_u8;
+ a = var_ea;
+ a = var_eb;
+ a = var_ec;
+ a = var_b;
+ a = struct1[2].struct2[1][2].u.var_u8[2];
+ a = union1.var_u16;
+ a = arr[3];
+ a = arr[EA2];
+ a = enum_arr[EC2];
+ a = three_d[31][7][EA2];
+ a = struct1[2].struct2[1][2].u.mat[5][3];
+ a = struct11[7][5].struct2[0][1].u.mat[3][0];
+
+ return a;
+}
diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c
new file mode 100644
index 000000000000..d330b1511979
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+extern unsigned long CONFIG_HZ __kconfig;
+
+const volatile char veth[IFNAMSIZ];
+const volatile int veth_ifindex;
+
+int nr_listen;
+int nr_passive;
+int nr_active;
+int nr_connect;
+int nr_binddev;
+int nr_socket_post_create;
+int nr_fin_wait1;
+
+struct sockopt_test {
+ int opt;
+ int new;
+ int restore;
+ int expected;
+ int tcp_expected;
+ unsigned int flip:1;
+};
+
+static const char not_exist_cc[] = "not_exist";
+static const char cubic_cc[] = "cubic";
+static const char reno_cc[] = "reno";
+
+static const struct sockopt_test sol_socket_tests[] = {
+ { .opt = SO_REUSEADDR, .flip = 1, },
+ { .opt = SO_SNDBUF, .new = 8123, .expected = 8123 * 2, },
+ { .opt = SO_RCVBUF, .new = 8123, .expected = 8123 * 2, },
+ { .opt = SO_KEEPALIVE, .flip = 1, },
+ { .opt = SO_PRIORITY, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_REUSEPORT, .flip = 1, },
+ { .opt = SO_RCVLOWAT, .new = 8123, .expected = 8123, },
+ { .opt = SO_MARK, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_MAX_PACING_RATE, .new = 0xeb9f, .expected = 0xeb9f, },
+ { .opt = SO_TXREHASH, .flip = 1, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_tcp_tests[] = {
+ { .opt = TCP_NODELAY, .flip = 1, },
+ { .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, },
+ { .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, },
+ { .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, },
+ { .opt = TCP_SYNCNT, .new = 123, .expected = 123, .restore = 124, },
+ { .opt = TCP_WINDOW_CLAMP, .new = 8123, .expected = 8123, .restore = 8124, },
+ { .opt = TCP_CONGESTION, },
+ { .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, },
+ { .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, },
+ { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, },
+ { .opt = TCP_BPF_SOCK_OPS_CB_FLAGS, .new = BPF_SOCK_OPS_ALL_CB_FLAGS,
+ .expected = BPF_SOCK_OPS_ALL_CB_FLAGS, },
+ { .opt = TCP_BPF_DELACK_MAX, .new = 30000, .expected = 30000, },
+ { .opt = TCP_BPF_RTO_MIN, .new = 30000, .expected = 30000, },
+ { .opt = TCP_RTO_MAX_MS, .new = 2000, .expected = 2000, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_ip_tests[] = {
+ { .opt = IP_TOS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
+ { .opt = 0, },
+};
+
+static const struct sockopt_test sol_ipv6_tests[] = {
+ { .opt = IPV6_TCLASS, .new = 0xe1, .expected = 0xe1, .tcp_expected = 0xe0, },
+ { .opt = IPV6_AUTOFLOWLABEL, .flip = 1, },
+ { .opt = 0, },
+};
+
+struct loop_ctx {
+ void *ctx;
+ struct sock *sk;
+};
+
+static bool sk_is_tcp(struct sock *sk)
+{
+ return (sk->__sk_common.skc_family == AF_INET ||
+ sk->__sk_common.skc_family == AF_INET6) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static int bpf_test_sockopt_flip(void *ctx, struct sock *sk,
+ const struct sockopt_test *t,
+ int level)
+{
+ int old, tmp, new, opt = t->opt;
+
+ opt = t->opt;
+
+ if (opt == SO_TXREHASH && !sk_is_tcp(sk))
+ return 0;
+
+ if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+ /* kernel initialized txrehash to 255 */
+ if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1)
+ old = 1;
+
+ new = !old;
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
+ tmp != new)
+ return 1;
+
+ if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+static int bpf_test_sockopt_int(void *ctx, struct sock *sk,
+ const struct sockopt_test *t,
+ int level)
+{
+ int old, tmp, new, expected, opt;
+
+ opt = t->opt;
+ new = t->new;
+ if (sk->sk_type == SOCK_STREAM && t->tcp_expected)
+ expected = t->tcp_expected;
+ else
+ expected = t->expected;
+
+ if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)) ||
+ old == new)
+ return 1;
+
+ if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)))
+ return 1;
+ if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) ||
+ tmp != expected)
+ return 1;
+
+ if (t->restore)
+ old = t->restore;
+ if (bpf_setsockopt(ctx, level, opt, &old, sizeof(old)))
+ return 1;
+
+ return 0;
+}
+
+static int bpf_test_socket_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_socket_tests))
+ return 1;
+
+ t = &sol_socket_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, SOL_SOCKET);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, SOL_SOCKET);
+}
+
+static int bpf_test_ip_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_ip_tests))
+ return 1;
+
+ t = &sol_ip_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IP);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IP);
+}
+
+static int bpf_test_ipv6_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+
+ if (i >= ARRAY_SIZE(sol_ipv6_tests))
+ return 1;
+
+ t = &sol_ipv6_tests[i];
+ if (!t->opt)
+ return 1;
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(lc->ctx, lc->sk, t, IPPROTO_IPV6);
+
+ return bpf_test_sockopt_int(lc->ctx, lc->sk, t, IPPROTO_IPV6);
+}
+
+static int bpf_test_tcp_sockopt(__u32 i, struct loop_ctx *lc)
+{
+ const struct sockopt_test *t;
+ struct sock *sk;
+ void *ctx;
+
+ if (i >= ARRAY_SIZE(sol_tcp_tests))
+ return 1;
+
+ t = &sol_tcp_tests[i];
+ if (!t->opt)
+ return 1;
+
+ ctx = lc->ctx;
+ sk = lc->sk;
+
+ if (t->opt == TCP_CONGESTION) {
+ char old_cc[16], tmp_cc[16];
+ const char *new_cc;
+ int new_cc_len;
+
+ if (!bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION,
+ (void *)not_exist_cc, sizeof(not_exist_cc)))
+ return 1;
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
+ return 1;
+ if (!bpf_strncmp(old_cc, sizeof(old_cc), cubic_cc)) {
+ new_cc = reno_cc;
+ new_cc_len = sizeof(reno_cc);
+ } else {
+ new_cc = cubic_cc;
+ new_cc_len = sizeof(cubic_cc);
+ }
+ if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, (void *)new_cc,
+ new_cc_len))
+ return 1;
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, tmp_cc, sizeof(tmp_cc)))
+ return 1;
+ if (bpf_strncmp(tmp_cc, sizeof(tmp_cc), new_cc))
+ return 1;
+ if (bpf_setsockopt(ctx, IPPROTO_TCP, TCP_CONGESTION, old_cc, sizeof(old_cc)))
+ return 1;
+ return 0;
+ }
+
+ if (t->flip)
+ return bpf_test_sockopt_flip(ctx, sk, t, IPPROTO_TCP);
+
+ return bpf_test_sockopt_int(ctx, sk, t, IPPROTO_TCP);
+}
+
+static int bpf_test_sockopt(void *ctx, struct sock *sk)
+{
+ struct loop_ctx lc = { .ctx = ctx, .sk = sk, };
+ __u16 family, proto;
+ int n;
+
+ family = sk->sk_family;
+ proto = sk->sk_protocol;
+
+ n = bpf_loop(ARRAY_SIZE(sol_socket_tests), bpf_test_socket_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_socket_tests))
+ return -1;
+
+ if (proto == IPPROTO_TCP) {
+ n = bpf_loop(ARRAY_SIZE(sol_tcp_tests), bpf_test_tcp_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_tcp_tests))
+ return -1;
+ }
+
+ if (family == AF_INET) {
+ n = bpf_loop(ARRAY_SIZE(sol_ip_tests), bpf_test_ip_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_ip_tests))
+ return -1;
+ } else {
+ n = bpf_loop(ARRAY_SIZE(sol_ipv6_tests), bpf_test_ipv6_sockopt, &lc, 0);
+ if (n != ARRAY_SIZE(sol_ipv6_tests))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int binddev_test(void *ctx)
+{
+ const char empty_ifname[] = "";
+ int ifindex, zero = 0;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ (void *)veth, sizeof(veth)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != veth_ifindex)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTODEVICE,
+ (void *)empty_ifname, sizeof(empty_ifname)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != 0)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&veth_ifindex, sizeof(int)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != veth_ifindex)
+ return -1;
+
+ if (bpf_setsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &zero, sizeof(int)))
+ return -1;
+ if (bpf_getsockopt(ctx, SOL_SOCKET, SO_BINDTOIFINDEX,
+ &ifindex, sizeof(int)) ||
+ ifindex != 0)
+ return -1;
+
+ return 0;
+}
+
+static int test_tcp_maxseg(void *ctx, struct sock *sk)
+{
+ int val = 1314, tmp;
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG,
+ &val, sizeof(val));
+
+ if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, &tmp, sizeof(tmp)) ||
+ tmp > val)
+ return -1;
+
+ return 0;
+}
+
+static int test_tcp_saved_syn(void *ctx, struct sock *sk)
+{
+ __u8 saved_syn[20];
+ int one = 1;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_SAVE_SYN,
+ &one, sizeof(one));
+
+ return bpf_getsockopt(ctx, IPPROTO_TCP, TCP_SAVED_SYN,
+ saved_syn, sizeof(saved_syn));
+}
+
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 1;
+
+ nr_socket_post_create += !bpf_test_sockopt(sk, sk);
+ nr_binddev += !binddev_test(sk);
+
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int _getsockopt(struct bpf_sockopt *ctx)
+{
+ struct bpf_sock *sk = ctx->sk;
+ int *optval = ctx->optval;
+ struct tcp_sock *tp;
+
+ if (!sk || ctx->level != SOL_TCP || ctx->optname != TCP_BPF_SOCK_OPS_CB_FLAGS)
+ return 1;
+
+ tp = bpf_core_cast(sk, struct tcp_sock);
+ if (ctx->optval + sizeof(int) <= ctx->optval_end) {
+ *optval = tp->bpf_sock_ops_cb_flags;
+ ctx->retval = 0;
+ }
+ return 1;
+}
+
+SEC("sockops")
+int skops_sockopt(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *bpf_sk = skops->sk;
+ struct sock *sk;
+ int flags;
+
+ if (!bpf_sk)
+ return 1;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk);
+ if (!sk)
+ return 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ nr_listen += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk) ||
+ test_tcp_saved_syn(skops, sk));
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ nr_connect += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk));
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ nr_active += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ nr_passive += !(bpf_test_sockopt(skops, sk) ||
+ test_tcp_maxseg(skops, sk) ||
+ test_tcp_saved_syn(skops, sk));
+ flags = skops->bpf_sock_ops_cb_flags | BPF_SOCK_OPS_STATE_CB_FLAG;
+ bpf_setsockopt(skops, SOL_TCP, TCP_BPF_SOCK_OPS_CB_FLAGS, &flags, sizeof(flags));
+ break;
+ case BPF_SOCK_OPS_STATE_CB:
+ if (skops->args[1] == BPF_TCP_CLOSE_WAIT)
+ nr_fin_wait1 += !bpf_test_sockopt(skops, sk);
+ break;
+ }
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c b/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c
new file mode 100644
index 000000000000..09a00d11ffcc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+extern int tcp_memory_per_cpu_fw_alloc __ksym;
+extern int udp_memory_per_cpu_fw_alloc __ksym;
+
+int nr_cpus;
+bool tcp_activated, udp_activated;
+long tcp_memory_allocated, udp_memory_allocated;
+
+struct sk_prot {
+ long *memory_allocated;
+ int *memory_per_cpu_fw_alloc;
+};
+
+static int drain_memory_per_cpu_fw_alloc(__u32 i, struct sk_prot *sk_prot_ctx)
+{
+ int *memory_per_cpu_fw_alloc;
+
+ memory_per_cpu_fw_alloc = bpf_per_cpu_ptr(sk_prot_ctx->memory_per_cpu_fw_alloc, i);
+ if (memory_per_cpu_fw_alloc)
+ *sk_prot_ctx->memory_allocated += *memory_per_cpu_fw_alloc;
+
+ return 0;
+}
+
+static long get_memory_allocated(struct sock *_sk, int *memory_per_cpu_fw_alloc)
+{
+ struct sock *sk = bpf_core_cast(_sk, struct sock);
+ struct sk_prot sk_prot_ctx;
+ long memory_allocated;
+
+ /* net_aligned_data.{tcp,udp}_memory_allocated was not available. */
+ memory_allocated = sk->__sk_common.skc_prot->memory_allocated->counter;
+
+ sk_prot_ctx.memory_allocated = &memory_allocated;
+ sk_prot_ctx.memory_per_cpu_fw_alloc = memory_per_cpu_fw_alloc;
+
+ bpf_loop(nr_cpus, drain_memory_per_cpu_fw_alloc, &sk_prot_ctx, 0);
+
+ return memory_allocated;
+}
+
+static void fentry_init_sock(struct sock *sk, bool *activated,
+ long *memory_allocated, int *memory_per_cpu_fw_alloc)
+{
+ if (!*activated)
+ return;
+
+ *memory_allocated = get_memory_allocated(sk, memory_per_cpu_fw_alloc);
+ *activated = false;
+}
+
+SEC("fentry/tcp_init_sock")
+int BPF_PROG(fentry_tcp_init_sock, struct sock *sk)
+{
+ fentry_init_sock(sk, &tcp_activated,
+ &tcp_memory_allocated, &tcp_memory_per_cpu_fw_alloc);
+ return 0;
+}
+
+SEC("fentry/udp_init_sock")
+int BPF_PROG(fentry_udp_init_sock, struct sock *sk)
+{
+ fentry_init_sock(sk, &udp_activated,
+ &udp_memory_allocated, &udp_memory_per_cpu_fw_alloc);
+ return 0;
+}
+
+SEC("cgroup/sock_create")
+int sock_create(struct bpf_sock *ctx)
+{
+ int err, val = 1;
+
+ err = bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_BYPASS_PROT_MEM,
+ &val, sizeof(val));
+ if (err)
+ goto err;
+
+ val = 0;
+
+ err = bpf_getsockopt(ctx, SOL_SOCKET, SK_BPF_BYPASS_PROT_MEM,
+ &val, sizeof(val));
+ if (err)
+ goto err;
+
+ if (val != 1) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ return 1;
+
+err:
+ bpf_set_retval(err);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
new file mode 100644
index 000000000000..46d6eb2a3b17
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+void *local_storage_ptr = NULL;
+void *sk_ptr = NULL;
+int cookie_found = 0;
+__u64 cookie = 0;
+__u32 omem = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_storage SEC(".maps");
+
+SEC("fexit/bpf_local_storage_destroy")
+int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
+{
+ struct sock *sk;
+
+ if (local_storage_ptr != local_storage)
+ return 0;
+
+ sk = bpf_core_cast(sk_ptr, struct sock);
+ if (sk->sk_cookie.counter != cookie)
+ return 0;
+
+ cookie_found++;
+ omem = sk->sk_omem_alloc.counter;
+ local_storage_ptr = NULL;
+
+ return 0;
+}
+
+SEC("fentry/inet6_sock_destruct")
+int BPF_PROG(inet6_sock_destruct, struct sock *sk)
+{
+ int *value;
+
+ if (!cookie || sk->sk_cookie.counter != cookie)
+ return 0;
+
+ value = bpf_sk_storage_get(&sk_storage, sk, 0, 0);
+ if (value && *value == 0xdeadbeef) {
+ cookie_found++;
+ sk_ptr = sk;
+ local_storage_ptr = sk->sk_bpf_storage;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/skb_load_bytes.c b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
new file mode 100644
index 000000000000..e4252fd973be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 load_offset = 0;
+int test_result = 0;
+
+SEC("tc")
+int skb_process(struct __sk_buff *skb)
+{
+ char buf[16];
+
+ test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
index cf6823f42e80..3bb4451524a1 100644
--- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#ifndef BPF_NO_PRESERVE_ACCESS_INDEX
#define BPF_NO_PRESERVE_ACCESS_INDEX
+#endif
#include <vmlinux.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
-#define NULL 0
#define INLINE __always_inline
#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
@@ -26,12 +27,14 @@ out:
return ip;
}
-SEC("classifier/cls")
+SEC("tc")
int main_prog(struct __sk_buff *skb)
{
struct iphdr *ip = NULL;
struct tcphdr *tcp;
__u8 proto = 0;
+ int urg_ptr;
+ u32 offset;
if (!(ip = get_iphdr(skb)))
goto out;
@@ -47,7 +50,14 @@ int main_prog(struct __sk_buff *skb)
if (!tcp)
goto out;
- return tcp->urg_ptr;
+ urg_ptr = tcp->urg_ptr;
+
+ /* Checksum validation part */
+ proto++;
+ offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol);
+ bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM);
+
+ return urg_ptr;
out:
return -1;
}
diff --git a/tools/testing/selftests/bpf/progs/sock_addr_kern.c b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
new file mode 100644
index 000000000000..84ad515eafd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_addr_kern.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("syscall")
+int init_sock(struct init_sock_args *args)
+{
+ bpf_kfunc_init_sock(args);
+
+ return 0;
+}
+
+SEC("syscall")
+int close_sock(void *ctx)
+{
+ bpf_kfunc_close_sock();
+
+ return 0;
+}
+
+SEC("syscall")
+int kernel_connect(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_connect(args);
+}
+
+SEC("syscall")
+int kernel_bind(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_bind(args);
+}
+
+SEC("syscall")
+int kernel_listen(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_listen();
+}
+
+SEC("syscall")
+int kernel_sendmsg(struct sendmsg_args *args)
+{
+ return bpf_kfunc_call_kernel_sendmsg(args);
+}
+
+SEC("syscall")
+int sock_sendmsg(struct sendmsg_args *args)
+{
+ return bpf_kfunc_call_sock_sendmsg(args);
+}
+
+SEC("syscall")
+int kernel_getsockname(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_getsockname(args);
+}
+
+SEC("syscall")
+int kernel_getpeername(struct addr_args *args)
+{
+ return bpf_kfunc_call_kernel_getpeername(args);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
new file mode 100644
index 000000000000..9e0bf7a54cec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "bpf_tracing_net.h"
+
+__be16 serv_port = 0;
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} tcp_conn_sockets SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} udp_conn_sockets SEC(".maps");
+
+SEC("cgroup/connect6")
+int sock_connect(struct bpf_sock_addr *ctx)
+{
+ __u64 sock_cookie = 0;
+ int key = 0;
+ __u32 keyc = 0;
+
+ if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
+ return 1;
+
+ sock_cookie = bpf_get_socket_cookie(ctx);
+ if (ctx->protocol == IPPROTO_TCP)
+ bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0);
+ else if (ctx->protocol == IPPROTO_UDP)
+ bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0);
+ else
+ return 1;
+
+ return 1;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_client(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ __u64 sock_cookie = 0;
+ __u64 *val;
+ int key = 0;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ val = bpf_map_lookup_elem(&tcp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+SEC("iter/tcp")
+int iter_tcp6_server(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = ctx->sk_common;
+ const struct inet_connection_sock *icsk;
+ const struct inet_sock *inet;
+ struct tcp6_sock *tcp_sk;
+ __be16 srcp;
+
+ if (!sk_common)
+ return 0;
+
+ if (sk_common->skc_family != AF_INET6)
+ return 0;
+
+ tcp_sk = bpf_skc_to_tcp6_sock(sk_common);
+ if (!tcp_sk)
+ return 0;
+
+ icsk = &tcp_sk->tcp.inet_conn;
+ inet = &icsk->icsk_inet;
+ srcp = inet->inet_sport;
+
+ /* Destroy server sockets. */
+ if (srcp == serv_port)
+ bpf_sock_destroy(sk_common);
+
+ return 0;
+}
+
+
+SEC("iter/udp")
+int iter_udp6_client(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ __u64 sock_cookie = 0, *val;
+ int key = 0;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ val = bpf_map_lookup_elem(&udp_conn_sockets, &key);
+ if (!val)
+ return 0;
+ /* Destroy connected client sockets. */
+ if (sock_cookie == *val)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+SEC("iter/udp")
+int iter_udp6_server(struct bpf_iter__udp *ctx)
+{
+ struct udp_sock *udp_sk = ctx->udp_sk;
+ struct sock *sk = (struct sock *) udp_sk;
+ struct inet_sock *inet;
+ __be16 srcp;
+
+ if (!sk)
+ return 0;
+
+ inet = &udp_sk->inet;
+ srcp = inet->inet_sport;
+ if (srcp == serv_port)
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
new file mode 100644
index 000000000000..dd6850b58e25
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int bpf_sock_destroy(struct sock_common *sk) __ksym;
+
+SEC("tp_btf/tcp_destroy_sock")
+__failure __msg("calling kernel function bpf_sock_destroy is not allowed")
+int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk)
+{
+ /* should not load */
+ bpf_sock_destroy((struct sock_common *)sk);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
new file mode 100644
index 000000000000..77966ded5467
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Meta
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+
+#define ATTR __always_inline
+#include "test_jhash.h"
+
+static bool ipv6_addr_loopback(const struct in6_addr *a)
+{
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+ a->s6_addr32[2] | (a->s6_addr32[3] ^ bpf_htonl(1))) == 0;
+}
+
+static bool ipv4_addr_loopback(__be32 a)
+{
+ return a == bpf_ntohl(0x7f000001);
+}
+
+volatile const unsigned int sf;
+volatile const unsigned int ss;
+volatile const __u16 ports[2];
+unsigned int bucket[2];
+
+SEC("iter/tcp")
+int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
+{
+ struct sock *sk = (struct sock *)ctx->sk_common;
+ struct inet_hashinfo *hinfo;
+ unsigned int hash;
+ __u64 sock_cookie;
+ struct net *net;
+ int idx;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_family != sf ||
+ (ss && sk->sk_state != ss) ||
+ (sk->sk_family == AF_INET6 ?
+ !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
+ return 0;
+
+ if (sk->sk_num == ports[0])
+ idx = 0;
+ else if (sk->sk_num == ports[1])
+ idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
+ else
+ return 0;
+
+ /* bucket selection as in inet_lhash2_bucket_sk() */
+ net = sk->sk_net.net;
+ hash = jhash2(sk->sk_v6_rcv_saddr.s6_addr32, 4, net->hash_mix);
+ hash ^= sk->sk_num;
+ hinfo = net->ipv4.tcp_death_row.hashinfo;
+ bucket[idx] = hash & hinfo->lhash2_mask;
+ bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+volatile const __u64 destroy_cookie;
+
+SEC("iter/tcp")
+int iter_tcp_destroy(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = (struct sock_common *)ctx->sk_common;
+ __u64 sock_cookie;
+
+ if (!sk_common)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ if (sock_cookie != destroy_cookie)
+ return 0;
+
+ bpf_sock_destroy(sk_common);
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+#define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk)
+
+SEC("iter/udp")
+int iter_udp_soreuse(struct bpf_iter__udp *ctx)
+{
+ struct sock *sk = (struct sock *)ctx->udp_sk;
+ struct udp_table *udptable;
+ __u64 sock_cookie;
+ int idx;
+
+ if (!sk)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk);
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_family != sf ||
+ (sk->sk_family == AF_INET6 ?
+ !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
+ return 0;
+
+ if (sk->sk_num == ports[0])
+ idx = 0;
+ else if (sk->sk_num == ports[1])
+ idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
+ else
+ return 0;
+
+ /* bucket selection as in udp_hashslot2() */
+ udptable = sk->sk_net.net->ipv4.udp_table;
+ bucket[idx] = udp_sk(sk)->udp_portaddr_hash & udptable->mask;
+ bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 0cb5656a22b0..35630a5aaf5f 100644
--- a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
-#include <linux/bpf.h>
-#include <sys/socket.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+
+#define AF_INET6 10
struct socket_cookie {
__u64 cookie_key;
@@ -19,6 +21,14 @@ struct {
__type(value, struct socket_cookie);
} socket_cookies SEC(".maps");
+/*
+ * These three programs get executed in a row on connect() syscalls. The
+ * userspace side of the test creates a client socket, issues a connect() on it
+ * and then checks that the local storage associated with this socket has:
+ * cookie_value == local_port << 8 | 0xFF
+ * The different parts of this cookie_value are appended by those hooks if they
+ * all agree on the output of bpf_get_socket_cookie().
+ */
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{
@@ -32,16 +42,16 @@ int set_cookie(struct bpf_sock_addr *ctx)
if (!p)
return 1;
- p->cookie_value = 0xFF;
+ p->cookie_value = 0xF;
p->cookie_key = bpf_get_socket_cookie(ctx);
return 1;
}
SEC("sockops")
-int update_cookie(struct bpf_sock_ops *ctx)
+int update_cookie_sockops(struct bpf_sock_ops *ctx)
{
- struct bpf_sock *sk;
+ struct bpf_sock *sk = ctx->sk;
struct socket_cookie *p;
if (ctx->family != AF_INET6)
@@ -50,21 +60,40 @@ int update_cookie(struct bpf_sock_ops *ctx)
if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
- if (!ctx->sk)
+ if (!sk)
return 1;
- p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
+ p = bpf_sk_storage_get(&socket_cookies, sk, 0, 0);
if (!p)
return 1;
if (p->cookie_key != bpf_get_socket_cookie(ctx))
return 1;
- p->cookie_value = (ctx->local_port << 8) | p->cookie_value;
+ p->cookie_value |= (ctx->local_port << 8);
return 1;
}
-int _version SEC("version") = 1;
+SEC("fexit/inet_stream_connect")
+int BPF_PROG(update_cookie_tracing, struct socket *sock,
+ struct sockaddr *uaddr, int addr_len, int flags)
+{
+ struct socket_cookie *p;
+
+ if (uaddr->sa_family != AF_INET6)
+ return 0;
+
+ p = bpf_sk_storage_get(&socket_cookies, sock->sk, 0, 0);
+ if (!p)
+ return 0;
+
+ if (p->cookie_key != bpf_get_socket_cookie(sock->sk))
+ return 0;
+
+ p->cookie_value |= 0xF0;
+
+ return 0;
+}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index ca283af80d4e..c9abfe3a11af 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
@@ -2,15 +2,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
SEC("sk_skb1")
int bpf_prog1(struct __sk_buff *skb)
{
void *data_end = (void *)(long) skb->data_end;
void *data = (void *)(long) skb->data;
- __u32 lport = skb->local_port;
- __u32 rport = skb->remote_port;
__u8 *d = data;
int err;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index fdb4bf4408fa..80632954c5a1 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
@@ -3,23 +3,9 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
SEC("sk_msg1")
int bpf_prog1(struct sk_msg_md *msg)
{
- void *data_end = (void *)(long) msg->data_end;
- void *data = (void *)(long) msg->data;
-
- char *d;
-
- if (data + 8 > data_end)
- return SK_DROP;
-
- bpf_printk("data length %i\n", (__u64)msg->data_end - (__u64)msg->data);
- d = (char *)data;
- bpf_printk("hello sendmsg hook %i %i\n", d[0], d[1]);
-
return SK_PASS;
}
diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 4797dc985064..0660f29dca95 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
@@ -1,28 +1,27 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-
-int _version SEC("version") = 1;
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_rx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_tx SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 20);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} sock_map_msg SEC(".maps");
struct {
@@ -42,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb)
__u8 *d = data;
__u8 sk, map;
+ __sink(lport);
+ __sink(rport);
+
if (data + 8 > data_end)
return SK_DROP;
diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
index c6d428a8d785..a3434b840928 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c
@@ -3,13 +3,14 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
#define SOL_CUSTOM 0xdeadbeef
#define CUSTOM_INHERIT1 0
#define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2
+__s32 page_size = 0;
+
struct sockopt_inherit {
__u8 val;
};
@@ -56,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -71,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -81,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_CUSTOM)
- return 1; /* only interested in SOL_CUSTOM */
+ goto out; /* only interested in SOL_CUSTOM */
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -94,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = -1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c
index 9d8c212dde9f..db67278e12d4 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_multi.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c
@@ -4,16 +4,17 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-SEC("cgroup/getsockopt/child")
+__s32 page_size = 0;
+
+SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -27,16 +28,22 @@ int _getsockopt_child(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
-SEC("cgroup/getsockopt/parent")
+SEC("cgroup/getsockopt")
int _getsockopt_parent(struct bpf_sockopt *ctx)
{
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -50,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -59,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval = ctx->optval;
if (ctx->level != SOL_IP || ctx->optname != IP_TOS)
- return 1;
+ goto out;
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
@@ -68,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
new file mode 100644
index 000000000000..5c3614333b01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+__s32 page_size = 0;
+
+const char cc_reno[TCP_CA_NAME_MAX] = "reno";
+const char cc_cubic[TCP_CA_NAME_MAX] = "cubic";
+
+SEC("cgroup/setsockopt")
+int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
+{
+ void *optval_end = ctx->optval_end;
+ int *optval = ctx->optval;
+ char buf[TCP_CA_NAME_MAX];
+
+ if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS)
+ goto out;
+
+ if (optval + 1 > optval_end)
+ return 0; /* EPERM, bounds check */
+
+ if (bpf_getsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
+ return 0;
+
+ if (bpf_strncmp(buf, sizeof(buf), cc_cubic))
+ return 0;
+
+ if (*optval == 0x2d) {
+ if (bpf_setsockopt(ctx->sk, SOL_TCP, TCP_CONGESTION, (void *)&cc_reno,
+ sizeof(cc_reno)))
+ return 0;
+ }
+ return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index 712df7b49cb1..cb990a7d3d45 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
+#include <linux/tcp.h>
#include <linux/bpf.h>
+#include <netinet/in.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
+int page_size = 0; /* userspace should set it */
+
+#ifndef SOL_TCP
+#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
@@ -31,14 +32,27 @@ int _getsockopt(struct bpf_sockopt *ctx)
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
+ struct bpf_sock *sk;
+
+ /* Bypass AF_NETLINK. */
+ sk = ctx->sk;
+ if (sk && sk->family == AF_NETLINK)
+ goto out;
+
+ /* Make sure bpf_get_netns_cookie is callable.
+ */
+ if (bpf_get_netns_cookie(NULL) == 0)
+ return 0;
+
+ if (bpf_get_netns_cookie(ctx) == 0)
+ return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
- return 1;
+ goto out;
}
if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
@@ -46,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) {
@@ -54,12 +68,28 @@ int _getsockopt(struct bpf_sockopt *ctx)
* let next BPF program in the cgroup chain or kernel
* handle it.
*/
- return 1;
+ goto out;
+ }
+
+ if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
+ /* Verify that TCP_ZEROCOPY_RECEIVE triggers.
+ * It has a custom implementation for performance
+ * reasons.
+ */
+
+ /* Check that optval contains address (__u64) */
+ if (optval + sizeof(__u64) > optval_end)
+ return 0; /* bounds check */
+
+ if (((struct tcp_zerocopy_receive *)optval)->address != 0)
+ return 0; /* unexpected data */
+
+ goto out;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
ctx->retval = 0; /* Reset system call return value to zero */
@@ -71,25 +101,25 @@ int _getsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
- return 0; /* EPERM, unexpected data size */
+ if (optval_end - optval != page_size)
+ return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
- return 0; /* EPERM, deny everything except custom level */
+ return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
- return 0; /* EPERM, couldn't get sk storage */
+ return 0; /* couldn't get sk storage */
if (!ctx->retval)
- return 0; /* EPERM, kernel should not have handled
+ return 0; /* kernel should not have handled
* SOL_CUSTOM, something is wrong!
*/
ctx->retval = 0; /* Reset system call return value to zero */
@@ -98,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
ctx->optlen = 1;
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
SEC("cgroup/setsockopt")
@@ -106,6 +142,20 @@ int _setsockopt(struct bpf_sockopt *ctx)
__u8 *optval_end = ctx->optval_end;
__u8 *optval = ctx->optval;
struct sockopt_sk *storage;
+ struct bpf_sock *sk;
+
+ /* Bypass AF_NETLINK. */
+ sk = ctx->sk;
+ if (sk && sk->family == AF_NETLINK)
+ goto out;
+
+ /* Make sure bpf_get_netns_cookie is callable.
+ */
+ if (bpf_get_netns_cookie(NULL) == 0)
+ return 0;
+
+ if (bpf_get_netns_cookie(ctx) == 0)
+ return 0;
if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
/* Not interested in SOL_IP:IP_TOS;
@@ -120,7 +170,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
/* Overwrite SO_SNDBUF value */
if (optval + sizeof(__u32) > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
*(__u32 *)optval = 0x55AA;
ctx->optlen = 4;
@@ -132,7 +182,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
/* Always use cubic */
if (optval + 5 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
memcpy(optval, "cubic", 5);
ctx->optlen = 5;
@@ -142,11 +192,11 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */
- if (ctx->optlen != PAGE_SIZE * 2)
- return 0; /* EPERM, unexpected data size */
+ if (ctx->optlen != page_size * 2)
+ return 0; /* unexpected data size */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
/* Make sure we can trim the buffer. */
optval[0] = 0;
@@ -156,22 +206,22 @@ int _setsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
- if (optval_end - optval != PAGE_SIZE)
- return 0; /* EPERM, unexpected data size */
+ if (optval_end - optval != page_size)
+ return 0; /* unexpected data size */
return 1;
}
if (ctx->level != SOL_CUSTOM)
- return 0; /* EPERM, deny everything except custom level */
+ return 0; /* deny everything except custom level */
if (optval + 1 > optval_end)
- return 0; /* EPERM, bounds check */
+ return 0; /* bounds check */
storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
- return 0; /* EPERM, couldn't get sk storage */
+ return 0; /* couldn't get sk storage */
storage->val = optval[0];
ctx->optlen = -1; /* BPF has consumed this option, don't call kernel
@@ -179,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx)
*/
return 1;
+
+out:
+ /* optval larger than PAGE_SIZE use kernel's buffer. */
+ if (ctx->optlen > page_size)
+ ctx->optlen = 0;
+ return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
new file mode 100644
index 000000000000..a96c8150d7f5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
+typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, 16384);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
+
+/*
+ * This function is here to have CONFIG_UNWINDER_ORC
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_UNWINDER_ORC ? 0 : 1;
+}
+
+__u32 stack_key;
+
+SEC("kprobe.multi")
+int kprobe_multi_test(struct pt_regs *ctx)
+{
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+SEC("raw_tp/bpf_testmod_test_read")
+int rawtp_test(void *ctx)
+{
+ /* Skip ebpf program entry in the stack. */
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/stacktrace_map.c
index 00ed48672620..0c77df05be7f 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/stacktrace_map.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
@@ -27,8 +27,8 @@ typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -38,18 +38,19 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
- char prev_comm[16];
+ char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
- char next_comm[16];
+ char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
+__u32 stack_id;
SEC("tracepoint/sched/sched_switch")
int oncpu(struct sched_switch_args *ctx)
{
@@ -64,6 +65,7 @@ int oncpu(struct sched_switch_args *ctx)
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(ctx, &stackmap, 0);
if ((int)key >= 0) {
+ stack_id = key;
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p)
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
new file mode 100644
index 000000000000..2eb297df3dd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define TEST_STACK_DEPTH 2
+#define TEST_MAX_ENTRIES 16384
+
+typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, __u32);
+} stackid_hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, TEST_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stack_amap SEC(".maps");
+
+int pid = 0;
+int control = 0;
+int failed = 0;
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct trace_event_raw_sched_switch *ctx)
+{
+ __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
+ __u32 key = 0, val = 0;
+ __u64 *stack_p;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ if (control)
+ return 0;
+
+ /* it should allow skipping whole buffer size entries */
+ key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
+ if ((int)key >= 0) {
+ /* The size of stackmap and stack_amap should be the same */
+ bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+ stack_p = bpf_map_lookup_elem(&stack_amap, &key);
+ if (stack_p) {
+ bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
+ /* it wrongly skipped all the entries and filled zero */
+ if (stack_p[0] == 0)
+ failed = 1;
+ }
+ } else {
+ /* old kernel doesn't support skipping that many entries */
+ failed = 2;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c
new file mode 100644
index 000000000000..4a5bd852f10c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1); /* number of pages */
+} arena SEC(".maps");
+
+struct elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+#define ENOSPC 28
+#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+int size;
+u64 fault_addr;
+void *arena_ptr;
+
+SEC("syscall")
+__success __retval(0)
+int stream_exhaust(void *ctx)
+{
+ /* Use global variable for loop convergence. */
+ size = 0;
+ bpf_repeat(BPF_MAX_LOOPS) {
+ if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954)
+ return 0;
+ size += sizeof(_STR) - 1;
+ }
+ return 1;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__arch_s390x
+__success __retval(0)
+__stderr("ERROR: Timeout detected for may_goto instruction")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_cond_break(void *ctx)
+{
+ while (can_loop)
+ ;
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+__stderr("ERROR: AA or ABBA deadlock detected for bpf_res_spin_lock")
+__stderr("{{Attempted lock = (0x[0-9a-fA-F]+)\n"
+"Total held locks = 1\n"
+"Held lock\\[ 0\\] = \\1}}")
+__stderr("...")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_deadlock(void *ctx)
+{
+ struct bpf_res_spin_lock *lock, *nlock;
+
+ lock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!lock)
+ return 1;
+ nlock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!nlock)
+ return 1;
+ if (bpf_res_spin_lock(lock))
+ return 1;
+ if (bpf_res_spin_lock(nlock)) {
+ bpf_res_spin_unlock(lock);
+ return 0;
+ }
+ bpf_res_spin_unlock(nlock);
+ bpf_res_spin_unlock(lock);
+ return 1;
+}
+
+SEC("syscall")
+__success __retval(0)
+int stream_syscall(void *ctx)
+{
+ bpf_stream_printk(BPF_STDOUT, "foo");
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_write_fault(void *ctx)
+{
+ struct bpf_arena *ptr = (void *)&arena;
+ u64 user_vm_start;
+
+ /* Prevent GCC bounds warning: casting &arena to struct bpf_arena *
+ * triggers bounds checking since the map definition is smaller than struct
+ * bpf_arena. barrier_var() makes the pointer opaque to GCC, preventing the
+ * bounds analysis
+ */
+ barrier_var(ptr);
+ user_vm_start = ptr->user_vm_start;
+ fault_addr = user_vm_start + 0x7fff;
+ bpf_addr_space_cast(user_vm_start, 0, 1);
+ asm volatile (
+ "r1 = %0;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0x7fff) = r2;"
+ :
+ : "r" (user_vm_start)
+ : "r1", "r2"
+ );
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena READ access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_read_fault(void *ctx)
+{
+ struct bpf_arena *ptr = (void *)&arena;
+ u64 user_vm_start;
+
+ /* Prevent GCC bounds warning: casting &arena to struct bpf_arena *
+ * triggers bounds checking since the map definition is smaller than struct
+ * bpf_arena. barrier_var() makes the pointer opaque to GCC, preventing the
+ * bounds analysis
+ */
+ barrier_var(ptr);
+ user_vm_start = ptr->user_vm_start;
+ fault_addr = user_vm_start + 0x7fff;
+ bpf_addr_space_cast(user_vm_start, 0, 1);
+ asm volatile (
+ "r1 = %0;"
+ "r1 = *(u32 *)(r1 + 0x7fff);"
+ :
+ : "r" (user_vm_start)
+ : "r1"
+ );
+ return 0;
+}
+
+static __noinline void subprog(void)
+{
+ int __arena *addr = (int __arena *)0xdeadbeef;
+
+ arena_ptr = &arena;
+ *addr = 1;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_subprog_fault(void *ctx)
+{
+ subprog();
+ return 0;
+}
+
+static __noinline int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ int __arena *addr = (int __arena *)0xdeadbeef;
+
+ arena_ptr = &arena;
+ *addr = 1;
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_callback_fault(void *ctx)
+{
+ struct bpf_timer *arr_timer;
+
+ arr_timer = bpf_map_lookup_elem(&array, &(int){0});
+ if (!arr_timer)
+ return 0;
+ bpf_timer_init(arr_timer, &array, 1);
+ bpf_timer_set_callback(arr_timer, timer_cb);
+ bpf_timer_start(arr_timer, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c
new file mode 100644
index 000000000000..3662515f0107
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream_fail.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+SEC("syscall")
+__failure __msg("Possibly NULL pointer passed")
+int stream_vprintk_null_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("R3 type=scalar expected=")
+int stream_vprintk_scalar_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("arg#1 doesn't point to a const string")
+int stream_vprintk_string_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
new file mode 100644
index 000000000000..826e6b6aff7e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char *user_ptr = (char *)1;
+char *invalid_kern_ptr = (char *)-1;
+
+/*
+ * When passing userspace pointers, the error code differs based on arch:
+ * -ERANGE on arches with non-overlapping address spaces
+ * -EFAULT on other arches
+ */
+#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \
+ defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86)
+#define USER_PTR_ERR -ERANGE
+#else
+#define USER_PTR_ERR -EFAULT
+#endif
+
+/*
+ * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and
+ * user_ptr (instead of causing an exception) so the below two groups of tests
+ * are not applicable.
+ */
+#ifndef __TARGET_ARCH_s390
+
+/* Passing NULL to string kfuncs (treated as a userspace ptr) */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_null1(void *ctx) { return bpf_strcasecmp(NULL, "HELLO"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasecmp_null2(void *ctx) { return bpf_strcasecmp("HELLO", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasestr_null1(void *ctx) { return bpf_strcasestr(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasestr_null2(void *ctx) { return bpf_strcasestr("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strncasestr_null1(void *ctx) { return bpf_strncasestr(NULL, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strncasestr_null2(void *ctx) { return bpf_strncasestr("hello", NULL, 1); }
+
+/* Passing userspace ptr to string kfuncs */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_user_ptr1(void *ctx) { return bpf_strcasecmp(user_ptr, "HELLO"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_user_ptr2(void *ctx) { return bpf_strcasecmp("HELLO", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasestr_user_ptr1(void *ctx) { return bpf_strcasestr(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasestr_user_ptr2(void *ctx) { return bpf_strcasestr("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strncasestr_user_ptr1(void *ctx) { return bpf_strncasestr(user_ptr, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strncasestr_user_ptr2(void *ctx) { return bpf_strncasestr("hello", user_ptr, 1); }
+
+#endif /* __TARGET_ARCH_s390 */
+
+/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcasecmp_pagefault1(void *ctx) { return bpf_strcasecmp(invalid_kern_ptr, "HELLO"); }
+SEC("syscall") __retval(-EFAULT) int test_strcasecmp_pagefault2(void *ctx) { return bpf_strcasecmp("HELLO", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcasestr_pagefault1(void *ctx) { return bpf_strcasestr(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcasestr_pagefault2(void *ctx) { return bpf_strcasestr("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); }
+SEC("syscall") __retval(-EFAULT) int test_strncasestr_pagefault1(void *ctx) { return bpf_strncasestr(invalid_kern_ptr, "hello", 1); }
+SEC("syscall") __retval(-EFAULT) int test_strncasestr_pagefault2(void *ctx) { return bpf_strncasestr("hello", invalid_kern_ptr, 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
new file mode 100644
index 000000000000..05e1da1f250f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+
+char long_str[XATTR_SIZE_MAX + 1];
+
+SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); }
+SEC("syscall") int test_strcasecmp_too_long(void *ctx) { return bpf_strcasecmp(long_str, long_str); }
+SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); }
+SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); }
+SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); }
+SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); }
+SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); }
+SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); }
+SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); }
+SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); }
+SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); }
+SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); }
+SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); }
+SEC("syscall") int test_strcasestr_too_long(void *ctx) { return bpf_strcasestr(long_str, "hello"); }
+SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); }
+SEC("syscall") int test_strncasestr_too_long(void *ctx) { return bpf_strncasestr(long_str, "hello", sizeof(long_str)); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
new file mode 100644
index 000000000000..a8513964516b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char str[] = "hello world";
+
+#define __test(retval) SEC("syscall") __success __retval(retval)
+
+/* Functional tests */
+__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); }
+__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); }
+__test(0) int test_strcasecmp_eq1(void *ctx) { return bpf_strcasecmp(str, "hello world"); }
+__test(0) int test_strcasecmp_eq2(void *ctx) { return bpf_strcasecmp(str, "HELLO WORLD"); }
+__test(0) int test_strcasecmp_eq3(void *ctx) { return bpf_strcasecmp(str, "HELLO world"); }
+__test(1) int test_strcasecmp_neq1(void *ctx) { return bpf_strcasecmp(str, "hello"); }
+__test(1) int test_strcasecmp_neq2(void *ctx) { return bpf_strcasecmp(str, "HELLO"); }
+__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); }
+__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); }
+__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); }
+__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); }
+__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); }
+__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); }
+__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); }
+__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); }
+__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); }
+__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); }
+__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); }
+__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); }
+__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); }
+__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); }
+__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); }
+__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); }
+__test(6) int test_strcasestr_found(void *ctx) { return bpf_strcasestr(str, "woRLD"); }
+__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); }
+__test(-ENOENT) int test_strcasestr_notfound(void *ctx) { return bpf_strcasestr(str, "hi"); }
+__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); }
+__test(0) int test_strcasestr_empty(void *ctx) { return bpf_strcasestr(str, ""); }
+__test(0) int test_strnstr_found1(void *ctx) { return bpf_strnstr("", "", 0); }
+__test(0) int test_strnstr_found2(void *ctx) { return bpf_strnstr(str, "hello", 5); }
+__test(0) int test_strnstr_found3(void *ctx) { return bpf_strnstr(str, "hello", 6); }
+__test(-ENOENT) int test_strnstr_notfound1(void *ctx) { return bpf_strnstr(str, "hi", 10); }
+__test(-ENOENT) int test_strnstr_notfound2(void *ctx) { return bpf_strnstr(str, "hello", 4); }
+__test(-ENOENT) int test_strnstr_notfound3(void *ctx) { return bpf_strnstr("", "a", 0); }
+__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); }
+__test(0) int test_strncasestr_found1(void *ctx) { return bpf_strncasestr("", "", 0); }
+__test(0) int test_strncasestr_found2(void *ctx) { return bpf_strncasestr(str, "heLLO", 5); }
+__test(0) int test_strncasestr_found3(void *ctx) { return bpf_strncasestr(str, "heLLO", 6); }
+__test(-ENOENT) int test_strncasestr_notfound1(void *ctx) { return bpf_strncasestr(str, "hi", 10); }
+__test(-ENOENT) int test_strncasestr_notfound2(void *ctx) { return bpf_strncasestr(str, "hello", 4); }
+__test(-ENOENT) int test_strncasestr_notfound3(void *ctx) { return bpf_strncasestr("", "a", 0); }
+__test(0) int test_strncasestr_empty(void *ctx) { return bpf_strncasestr(str, "", 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c
new file mode 100644
index 000000000000..f47bf88f8d2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define STRNCMP_STR_SZ 4096
+
+/* Will be updated by benchmark before program loading */
+const volatile unsigned int cmp_str_len = 1;
+const char target[STRNCMP_STR_SZ];
+
+long hits = 0;
+char str[STRNCMP_STR_SZ];
+
+char _license[] SEC("license") = "GPL";
+
+static __always_inline int local_strncmp(const char *s1, unsigned int sz,
+ const char *s2)
+{
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < sz; i++) {
+ /* E.g. 0xff > 0x31 */
+ ret = (unsigned char)s1[i] - (unsigned char)s2[i];
+ if (ret || !s1[i])
+ break;
+ }
+
+ return ret;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int strncmp_no_helper(void *ctx)
+{
+ const char *target_str = target;
+
+ barrier_var(target_str);
+ if (local_strncmp(str, cmp_str_len + 1, target_str) < 0)
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int strncmp_helper(void *ctx)
+{
+ if (bpf_strncmp(str, cmp_str_len + 1, target) < 0)
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/strncmp_test.c b/tools/testing/selftests/bpf/progs/strncmp_test.c
new file mode 100644
index 000000000000..769668feed48
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strncmp_test.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
+#include <stdbool.h>
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define STRNCMP_STR_SZ 8
+
+const char target[STRNCMP_STR_SZ] = "EEEEEEE";
+char str[STRNCMP_STR_SZ];
+int cmp_ret = 0;
+int target_pid = 0;
+
+const char no_str_target[STRNCMP_STR_SZ] = "12345678";
+char writable_target[STRNCMP_STR_SZ];
+unsigned int no_const_str_size = STRNCMP_STR_SZ;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int do_strncmp(void *ctx)
+{
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_not_const_str_size(void *ctx)
+{
+ /* The value of string size is not const, so will fail */
+ cmp_ret = bpf_strncmp(str, no_const_str_size, target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_writable_target(void *ctx)
+{
+ /* Compared target is not read-only, so will fail */
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, writable_target);
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int strncmp_bad_not_null_term_target(void *ctx)
+{
+ /* Compared target is not null-terminated, so will fail */
+ cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, no_str_target);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 7de534f38c3f..6e1918deaf26 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
typedef uint32_t pid_t;
struct task_struct {};
@@ -24,9 +26,11 @@ struct task_struct {};
#define STACK_TABLE_EPOCH_SHIFT 20
#define STROBE_MAX_STR_LEN 1
#define STROBE_MAX_CFGS 32
+#define READ_MAP_VAR_PAYLOAD_CAP \
+ ((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
#define STROBE_MAX_PAYLOAD \
(STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
- STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
+ STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP)
struct strobe_value_header {
/*
@@ -135,7 +139,7 @@ struct strobe_value_loc {
* tpidr_el0 for aarch64).
* TLS_IMM_EXEC: absolute address of GOT entry containing offset
* from thread pointer;
- * TLS_GENERAL_DYN: absolute addres of double GOT entry
+ * TLS_GENERAL_DYN: absolute address of double GOT entry
* containing tls_index_t struct;
*/
int64_t offset;
@@ -326,9 +330,9 @@ static void *calc_location(struct strobe_value_loc *loc, void *tls_base)
}
bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv);
/* if pointer has (void *)-1 value, then TLS wasn't initialized yet */
- return tls_ptr && tls_ptr != (void *)-1
- ? tls_ptr + tls_index.offset
- : NULL;
+ if (!tls_ptr || tls_ptr == (void *)-1)
+ return NULL;
+ return tls_ptr + tls_index.offset;
}
#ifdef SUBPROGS
@@ -355,10 +359,10 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
size_t idx, void *tls_base,
struct strobe_value_generic *value,
struct strobemeta_payload *data,
- void *payload)
+ size_t off)
{
void *location;
- uint32_t len;
+ uint64_t len;
data->str_lens[idx] = 0;
location = calc_location(&cfg->str_locs[idx], tls_base);
@@ -366,10 +370,10 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
/*
* if bpf_probe_read_user_str returns error (<0), due to casting to
- * unsinged int, it will become big number, so next check is
+ * unsigned int, it will become big number, so next check is
* sufficient to check for errors AND prove to BPF verifier, that
* bpf_probe_read_user_str won't return anything bigger than
* STROBE_MAX_STR_LEN
@@ -378,31 +382,30 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
return 0;
data->str_lens[idx] = len;
- return len;
+ return off + len;
}
-static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
- size_t idx, void *tls_base,
- struct strobe_value_generic *value,
- struct strobemeta_payload *data,
- void *payload)
+static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data,
+ size_t off)
{
struct strobe_map_descr* descr = &data->map_descrs[idx];
struct strobe_map_raw map;
void *location;
- uint32_t len;
- int i;
+ uint64_t len;
descr->tag_len = 0; /* presume no tag is set */
descr->cnt = -1; /* presume no value is set */
location = calc_location(&cfg->map_locs[idx], tls_base);
if (!location)
- return payload;
+ return off;
bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
- return payload;
+ return off;
descr->id = map.id;
descr->cnt = map.cnt;
@@ -411,40 +414,92 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
data->req_meta_valid = 1;
}
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
if (len <= STROBE_MAX_STR_LEN) {
descr->tag_len = len;
- payload += len;
+ off += len;
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
if (i >= map.cnt)
break;
descr->key_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].key);
if (len <= STROBE_MAX_STR_LEN) {
descr->key_lens[i] = len;
- payload += len;
+ off += len;
}
descr->val_lens[i] = 0;
- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
+ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
map.entries[i].val);
if (len <= STROBE_MAX_STR_LEN) {
descr->val_lens[i] = len;
- payload += len;
+ off += len;
}
}
- return payload;
+ return off;
}
+#ifdef USE_BPF_LOOP
+enum read_type {
+ READ_INT_VAR,
+ READ_MAP_VAR,
+ READ_STR_VAR,
+};
+
+struct read_var_ctx {
+ struct strobemeta_payload *data;
+ void *tls_base;
+ struct strobemeta_cfg *cfg;
+ size_t payload_off;
+ /* value gets mutated */
+ struct strobe_value_generic *value;
+ enum read_type type;
+};
+
+static int read_var_callback(__u64 index, struct read_var_ctx *ctx)
+{
+ /* lose precision info for ctx->payload_off, verifier won't track
+ * double xor, barrier_var() is needed to force clang keep both xors.
+ */
+ ctx->payload_off ^= index;
+ barrier_var(ctx->payload_off);
+ ctx->payload_off ^= index;
+ switch (ctx->type) {
+ case READ_INT_VAR:
+ if (index >= STROBE_MAX_INTS)
+ return 1;
+ read_int_var(ctx->cfg, index, ctx->tls_base, ctx->value, ctx->data);
+ break;
+ case READ_MAP_VAR:
+ if (index >= STROBE_MAX_MAPS)
+ return 1;
+ if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP)
+ return 1;
+ ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ case READ_STR_VAR:
+ if (index >= STROBE_MAX_STRS)
+ return 1;
+ if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN)
+ return 1;
+ ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base,
+ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ }
+ return 0;
+}
+#endif /* USE_BPF_LOOP */
+
/*
* read_strobe_meta returns NULL, if no metadata was read; otherwise returns
* pointer to *right after* payload ends
@@ -460,7 +515,8 @@ static void *read_strobe_meta(struct task_struct *task,
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct strobe_value_generic value = {0};
struct strobemeta_cfg *cfg;
- void *tls_base, *payload;
+ size_t payload_off;
+ void *tls_base;
cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
if (!cfg)
@@ -468,42 +524,74 @@ static void *read_strobe_meta(struct task_struct *task,
data->int_vals_set_mask = 0;
data->req_meta_valid = 0;
- payload = data->payload;
+ payload_off = 0;
/*
* we don't have struct task_struct definition, it should be:
* tls_base = (void *)task->thread.fsbase;
*/
tls_base = (void *)task;
+#ifdef USE_BPF_LOOP
+ struct read_var_ctx ctx = {
+ .cfg = cfg,
+ .tls_base = tls_base,
+ .value = &value,
+ .data = data,
+ .payload_off = 0,
+ };
+ int err;
+
+ ctx.type = READ_INT_VAR;
+ err = bpf_loop(STROBE_MAX_INTS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_INTS)
+ return NULL;
+
+ ctx.type = READ_STR_VAR;
+ err = bpf_loop(STROBE_MAX_STRS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_STRS)
+ return NULL;
+
+ ctx.type = READ_MAP_VAR;
+ err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_MAPS)
+ return NULL;
+
+ payload_off = ctx.payload_off;
+ /* this should not really happen, here only to satisfy verifier */
+ if (payload_off > sizeof(data->payload))
+ payload_off = sizeof(data->payload);
+#else
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_INTS; ++i) {
read_int_var(cfg, i, tls_base, &value, data);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
- payload += read_str_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
-#endif
+ __pragma_loop_unroll
+#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
- payload = read_map_var(cfg, i, tls_base, &value, data, payload);
+ payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
}
+#endif /* USE_BPF_LOOP */
+
/*
* return pointer right after end of payload, so it's possible to
* calculate exact amount of useful data that needs to be sent
*/
- return payload;
+ return &data->payload[payload_off];
}
SEC("raw_tracepoint/kfree_skb")
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c
new file mode 100644
index 000000000000..d18b992f0165
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 100
+#define STROBE_MAX_MAP_ENTRIES 20
+#define USE_BPF_LOOP
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
new file mode 100644
index 000000000000..ba10c3896213
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_2)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops___v1 {
+ int (*test_1)(void);
+};
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ int (*does_not_exist)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v1 testmod_1 = {
+ .test_1 = (void *)test_1
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .does_not_exist = (void *)test_2
+};
+
+SEC("?.struct_ops")
+struct bpf_testmod_ops___v1 optional_map = {
+ .test_1 = (void *)test_1,
+};
+
+SEC("?.struct_ops.link")
+struct bpf_testmod_ops___v1 optional_map2 = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
new file mode 100644
index 000000000000..6049d9c902d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(foo)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(bar)
+{
+ test_1_result = 24;
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)bar
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
new file mode 100644
index 000000000000..284a5b008e0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * This subprogram validates that libbpf handles the situation in which BPF
+ * object has subprograms in .text section, but has no entry BPF programs.
+ * At some point that was causing issues due to legacy logic of treating such
+ * subprogram as entry program (with unknown program type, which would fail).
+ */
+int dangling_subprog(void)
+{
+ /* do nothing, just be here */
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_do_detach;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
new file mode 100644
index 000000000000..d8cc99f5c2e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1_forgotten)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops ops = {
+ /* we forgot to reference test_1_forgotten above, oops */
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
new file mode 100644
index 000000000000..ad8bb546c9bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define bpf_kfunc_multi_st_ops_test_1(args) bpf_kfunc_multi_st_ops_test_1(args, st_ops_id)
+int st_ops_id;
+
+int test_pid;
+int test_err;
+
+#define MAP1_MAGIC 1234
+
+SEC("struct_ops")
+int BPF_PROG(test_1, struct st_ops_args *args)
+{
+ return MAP1_MAGIC;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(sys_enter, struct pt_regs *regs, long id)
+{
+ struct st_ops_args args = {};
+ struct task_struct *task;
+ int ret;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP1_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC("syscall")
+int syscall_prog(void *ctx)
+{
+ struct st_ops_args args = {};
+ int ret;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP1_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_multi_st_ops st_ops_map = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
new file mode 100644
index 000000000000..cea1a2f4b62f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define bpf_kfunc_multi_st_ops_test_1(args) bpf_kfunc_multi_st_ops_test_1(args, st_ops_id)
+int st_ops_id;
+
+int test_pid;
+int test_err;
+
+#define MAP2_MAGIC 4567
+
+SEC("struct_ops")
+int BPF_PROG(test_1, struct st_ops_args *args)
+{
+ return MAP2_MAGIC;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(sys_enter, struct pt_regs *regs, long id)
+{
+ struct st_ops_args args = {};
+ struct task_struct *task;
+ int ret;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP2_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC("syscall")
+int syscall_prog(void *ctx)
+{
+ struct st_ops_args args = {};
+ int ret;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP2_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_multi_st_ops st_ops_map = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
new file mode 100644
index 000000000000..2b98b7710816
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
@@ -0,0 +1,30 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task
+ * here is acquired automatically as the task argument is tagged with "__ref".
+ */
+SEC("struct_ops/test_return_ref_kptr")
+struct task_struct *BPF_PROG(kptr_return, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ if (dummy % 2) {
+ bpf_task_release(task);
+ return NULL;
+ }
+ return task;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return,
+};
+
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
new file mode 100644
index 000000000000..caeea158ef69
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__invalid_scalar.c
@@ -0,0 +1,26 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a non-zero scalar value.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 has smin=1 smax=1 should have been in [0, 0]")
+struct task_struct *BPF_PROG(kptr_return_fail__invalid_scalar, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ bpf_task_release(task);
+ return (struct task_struct *)1;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__invalid_scalar,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
new file mode 100644
index 000000000000..b8b4f05c3d7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__local_kptr.c
@@ -0,0 +1,34 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a local kptr.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
+struct task_struct *BPF_PROG(kptr_return_fail__local_kptr, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ struct task_struct *t;
+
+ bpf_task_release(task);
+
+ t = bpf_obj_new(typeof(*task));
+ if (!t)
+ return NULL;
+
+ return t;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__local_kptr,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
new file mode 100644
index 000000000000..7ddeb28c2329
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__nonzero_offset.c
@@ -0,0 +1,25 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a modified referenced kptr.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("dereference of modified trusted_ptr_ ptr R0 off={{[0-9]+}} disallowed")
+struct task_struct *BPF_PROG(kptr_return_fail__nonzero_offset, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ return (struct task_struct *)&task->jobctl;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__nonzero_offset,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
new file mode 100644
index 000000000000..6a2dd5367802
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return_fail__wrong_type.c
@@ -0,0 +1,30 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This test struct_ops BPF programs returning referenced kptr. The verifier should
+ * reject programs returning a referenced kptr of the wrong type.
+ */
+SEC("struct_ops/test_return_ref_kptr")
+__failure __msg("At program exit the register R0 is not a known value (ptr_or_null_)")
+struct task_struct *BPF_PROG(kptr_return_fail__wrong_type, int dummy,
+ struct task_struct *task, struct cgroup *cgrp)
+{
+ struct task_struct *ret;
+
+ ret = (struct task_struct *)bpf_cgroup_acquire(cgrp);
+ bpf_task_release(task);
+
+ return ret;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_kptr_return = {
+ .test_return_ref_kptr = (void *)kptr_return_fail__wrong_type,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
new file mode 100644
index 000000000000..ccab3935aa42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+/* This is a test BPF program that uses struct_ops to access an argument
+ * that may be NULL. This is a test for the verifier to ensure that it can
+ * rip PTR_MAYBE_NULL correctly.
+ */
+SEC("struct_ops/test_maybe_null")
+int BPF_PROG(test_maybe_null, int dummy,
+ struct task_struct *task)
+{
+ if (task)
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_maybe_null = (void *)test_maybe_null,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
new file mode 100644
index 000000000000..8b5515f4f724
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+SEC("struct_ops/test_maybe_null_struct_ptr")
+int BPF_PROG(test_maybe_null_struct_ptr, int dummy,
+ struct task_struct *task)
+{
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_struct_ptr = {
+ .test_maybe_null = (void *)test_maybe_null_struct_ptr,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
new file mode 100644
index 000000000000..71c420c3a5a6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+int test_2_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 0xdeadbeef;
+ return 0;
+}
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2, int a, int b)
+{
+ test_2_result = a + b;
+}
+
+SEC("?struct_ops/test_3")
+int BPF_PROG(test_3, int a, int b)
+{
+ test_2_result = a + b + 3;
+ return a + b + 3;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+};
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2_v2, int a, int b)
+{
+ test_2_result = a * b;
+}
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+};
+
+struct bpf_testmod_ops___zeroed {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+ void (*zeroed_op)(int a, int b);
+ int zeroed;
+};
+
+SEC("struct_ops/test_3")
+int BPF_PROG(zeroed_op)
+{
+ return 1;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___zeroed testmod_zeroed = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+ .zeroed_op = (void *)zeroed_op,
+};
+
+struct bpf_testmod_ops___incompatible {
+ int (*test_1)(void);
+ void (*test_2)(int *a);
+ int data;
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___incompatible testmod_incompatible = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 3,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
new file mode 100644
index 000000000000..5b23ea817f1f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TRAMP(x) \
+ SEC("struct_ops/tramp_" #x) \
+ int BPF_PROG(tramp_ ## x, int a) \
+ { \
+ return a; \
+ }
+
+TRAMP(1)
+TRAMP(2)
+TRAMP(3)
+TRAMP(4)
+TRAMP(5)
+TRAMP(6)
+TRAMP(7)
+TRAMP(8)
+TRAMP(9)
+TRAMP(10)
+TRAMP(11)
+TRAMP(12)
+TRAMP(13)
+TRAMP(14)
+TRAMP(15)
+TRAMP(16)
+TRAMP(17)
+TRAMP(18)
+TRAMP(19)
+TRAMP(20)
+TRAMP(21)
+TRAMP(22)
+TRAMP(23)
+TRAMP(24)
+TRAMP(25)
+TRAMP(26)
+TRAMP(27)
+TRAMP(28)
+TRAMP(29)
+TRAMP(30)
+TRAMP(31)
+TRAMP(32)
+TRAMP(33)
+TRAMP(34)
+TRAMP(35)
+TRAMP(36)
+TRAMP(37)
+TRAMP(38)
+TRAMP(39)
+TRAMP(40)
+
+#define F_TRAMP(x) .tramp_ ## x = (void *)tramp_ ## x
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops multi_pages = {
+ F_TRAMP(1),
+ F_TRAMP(2),
+ F_TRAMP(3),
+ F_TRAMP(4),
+ F_TRAMP(5),
+ F_TRAMP(6),
+ F_TRAMP(7),
+ F_TRAMP(8),
+ F_TRAMP(9),
+ F_TRAMP(10),
+ F_TRAMP(11),
+ F_TRAMP(12),
+ F_TRAMP(13),
+ F_TRAMP(14),
+ F_TRAMP(15),
+ F_TRAMP(16),
+ F_TRAMP(17),
+ F_TRAMP(18),
+ F_TRAMP(19),
+ F_TRAMP(20),
+ F_TRAMP(21),
+ F_TRAMP(22),
+ F_TRAMP(23),
+ F_TRAMP(24),
+ F_TRAMP(25),
+ F_TRAMP(26),
+ F_TRAMP(27),
+ F_TRAMP(28),
+ F_TRAMP(29),
+ F_TRAMP(30),
+ F_TRAMP(31),
+ F_TRAMP(32),
+ F_TRAMP(33),
+ F_TRAMP(34),
+ F_TRAMP(35),
+ F_TRAMP(36),
+ F_TRAMP(37),
+ F_TRAMP(38),
+ F_TRAMP(39),
+ F_TRAMP(40),
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
new file mode 100644
index 000000000000..5d0937fa07be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int rand;
+int arr[1];
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1_turn_off)
+{
+ return arr[rand]; /* potentially way out of range access */
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops ops = {
+ .test_1 = (void *)test_1_turn_off,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
new file mode 100644
index 000000000000..dbe646013811
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_2(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[10] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 200 bytes */
+ int b[50] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 400 bytes */
+ int a[100] = {};
+
+ a[10] = 1;
+ val_i = subprog1(a);
+ bpf_testmod_ops3_call_test_2();
+ return 0;
+}
+
+SEC("struct_ops")
+int BPF_PROG(test_2)
+{
+ /* stack size 200 bytes */
+ int a[50] = {};
+
+ a[10] = 3;
+ val_j = subprog1(a);
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
new file mode 100644
index 000000000000..3d89ad7cbe2a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_2(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[10] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 200 bytes */
+ int b[50] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 100 bytes */
+ int a[25] = {};
+
+ a[10] = 1;
+ val_i = subprog1(a);
+ bpf_testmod_ops3_call_test_2();
+ return 0;
+}
+
+SEC("struct_ops")
+int BPF_PROG(test_2)
+{
+ /* stack size 400 bytes */
+ int a[100] = {};
+
+ a[10] = 3;
+ val_j = subprog1(a);
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
new file mode 100644
index 000000000000..b1f6d7e5a8e5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+bool skip __attribute((__section__(".data"))) = false;
+#else
+bool skip = true;
+#endif
+
+void bpf_testmod_ops3_call_test_1(void) __ksym;
+
+int val_i, val_j;
+
+__noinline static int subprog2(int *a, int *b)
+{
+ return val_i + a[1] + b[20];
+}
+
+__noinline static int subprog1(int *a)
+{
+ /* stack size 400 bytes */
+ int b[100] = {};
+
+ b[20] = 2;
+ return subprog2(a, b);
+}
+
+
+SEC("struct_ops")
+int BPF_PROG(test_1)
+{
+ /* stack size 20 bytes */
+ int a[5] = {};
+
+ a[1] = 1;
+ val_j += subprog1(a);
+ bpf_testmod_ops3_call_test_1();
+ return 0;
+}
+
+SEC(".struct_ops")
+struct bpf_testmod_ops3 testmod_1 = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
new file mode 100644
index 000000000000..9c0a65466356
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
@@ -0,0 +1,31 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __ksym;
+
+/* This is a test BPF program that uses struct_ops to access a referenced
+ * kptr argument. This is a test for the verifier to ensure that it
+ * 1) recognizes the task as a referenced object (i.e., ref_obj_id > 0), and
+ * 2) the same reference can be acquired from multiple paths as long as it
+ * has not been released.
+ */
+SEC("struct_ops/test_refcounted")
+int BPF_PROG(refcounted, int dummy, struct task_struct *task)
+{
+ if (dummy == 1)
+ bpf_task_release(task);
+ else
+ bpf_task_release(task);
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_refcounted = {
+ .test_refcounted = (void *)refcounted,
+};
+
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
new file mode 100644
index 000000000000..ae074aa62852
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__global_subprog.c
@@ -0,0 +1,39 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern void bpf_task_release(struct task_struct *p) __ksym;
+
+__noinline int subprog_release(__u64 *ctx __arg_ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+ int dummy = (int)ctx[0];
+
+ bpf_task_release(task);
+
+ return dummy + 1;
+}
+
+/* Test that the verifier rejects a program that contains a global
+ * subprogram with referenced kptr arguments
+ */
+SEC("struct_ops/test_refcounted")
+__failure __log_level(2)
+__msg("Validating subprog_release() func#1...")
+__msg("invalid bpf_context access off=8. Reference may already be released")
+int refcounted_fail__global_subprog(unsigned long long *ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+
+ bpf_task_release(task);
+
+ return subprog_release(ctx);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__global_subprog,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c
new file mode 100644
index 000000000000..e945b1a04294
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__ref_leak.c
@@ -0,0 +1,22 @@
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Test that the verifier rejects a program that acquires a referenced
+ * kptr through context without releasing the reference
+ */
+SEC("struct_ops/test_refcounted")
+__failure __msg("Unreleased reference id=1 alloc_insn=0")
+int BPF_PROG(refcounted_fail__ref_leak, int dummy,
+ struct task_struct *task)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__ref_leak,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
new file mode 100644
index 000000000000..3b125025a1f2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted_fail__tail_call.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array SEC(".maps");
+
+/* Test that the verifier rejects a program with referenced kptr arguments
+ * that tail call
+ */
+SEC("struct_ops/test_refcounted")
+__failure __msg("program with __ref argument cannot tail call")
+int refcounted_fail__tail_call(unsigned long long *ctx)
+{
+ struct task_struct *task = (struct task_struct *)ctx[1];
+
+ bpf_task_release(task);
+ bpf_tail_call(ctx, &prog_array, 0);
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_ref_acquire = {
+ .test_refcounted = (void *)refcounted_fail__tail_call,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/summarization.c b/tools/testing/selftests/bpf/progs/summarization.c
new file mode 100644
index 000000000000..f89effe82c9e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/summarization.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline
+long changes_pkt_data(struct __sk_buff *sk)
+{
+ return bpf_skb_pull_data(sk, 0);
+}
+
+__noinline __weak
+long does_not_change_pkt_data(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+SEC("?tc")
+int main_changes_with_subprogs(struct __sk_buff *sk)
+{
+ changes_pkt_data(sk);
+ does_not_change_pkt_data(sk);
+ return 0;
+}
+
+SEC("?tc")
+int main_changes(struct __sk_buff *sk)
+{
+ bpf_skb_pull_data(sk, 0);
+ return 0;
+}
+
+SEC("?tc")
+int main_does_not_change(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+__noinline
+long might_sleep(struct pt_regs *ctx __arg_ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+__noinline __weak
+long does_not_sleep(struct pt_regs *ctx __arg_ctx)
+{
+ return 0;
+}
+
+SEC("?uprobe.s")
+int main_might_sleep_with_subprogs(struct pt_regs *ctx)
+{
+ might_sleep(ctx);
+ does_not_sleep(ctx);
+ return 0;
+}
+
+SEC("?uprobe.s")
+int main_might_sleep(struct pt_regs *ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+SEC("?uprobe.s")
+int main_does_not_sleep(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/summarization_freplace.c b/tools/testing/selftests/bpf/progs/summarization_freplace.c
new file mode 100644
index 000000000000..935f00e0e9ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/summarization_freplace.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("?freplace")
+long changes_pkt_data(struct __sk_buff *sk)
+{
+ return bpf_skb_pull_data(sk, 0);
+}
+
+SEC("?freplace")
+long does_not_change_pkt_data(struct __sk_buff *sk)
+{
+ return 0;
+}
+
+SEC("?freplace")
+long might_sleep(struct pt_regs *ctx)
+{
+ int i;
+
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+SEC("?freplace")
+long does_not_sleep(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
new file mode 100644
index 000000000000..b698cc62a371
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <../../../tools/include/linux/filter.h>
+#include <linux/btf.h>
+#include <string.h>
+#include <errno.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map {
+ int id;
+} __attribute__((preserve_access_index));
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, union bpf_attr);
+ __uint(max_entries, 1);
+} bpf_attr_array SEC(".maps");
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_array_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+static int btf_load(void)
+{
+ struct btf_blob {
+ struct btf_header btf_hdr;
+ __u32 types[8];
+ __u32 str;
+ } raw_btf = {
+ .btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
+ .str_len = sizeof(raw_btf.str),
+ },
+ .types = {
+ /* long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
+ /* unsigned long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ },
+ };
+ static union bpf_attr btf_load_attr = {
+ .btf_size = sizeof(raw_btf),
+ };
+
+ btf_load_attr.btf = (long)&raw_btf;
+ return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
+}
+
+SEC("syscall")
+int load_prog(struct args *ctx)
+{
+ static char license[] = "GPL";
+ static struct bpf_insn insns[] = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ static union bpf_attr map_create_attr = {
+ .map_type = BPF_MAP_TYPE_HASH,
+ .key_size = 8,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 2,
+ };
+ static union bpf_attr map_update_attr = { .map_fd = 1, };
+ static __u64 key = 12;
+ static __u64 value = 34;
+ static union bpf_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .insn_cnt = ARRAY_SIZE(insns),
+ };
+ int ret;
+
+ ret = btf_load();
+ if (ret <= 0)
+ return ret;
+
+ ctx->btf_fd = ret;
+ map_create_attr.max_entries = ctx->max_entries;
+ map_create_attr.btf_fd = ret;
+
+ prog_load_attr.license = ptr_to_u64(license);
+ prog_load_attr.insns = ptr_to_u64(insns);
+ prog_load_attr.log_buf = ctx->log_buf;
+ prog_load_attr.log_size = ctx->log_size;
+ prog_load_attr.log_level = 1;
+
+ ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->map_fd = ret;
+ insns[3].imm = ret;
+
+ map_update_attr.map_fd = ret;
+ map_update_attr.key = ptr_to_u64(&key);
+ map_update_attr.value = ptr_to_u64(&value);
+ ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
+ if (ret < 0)
+ return ret;
+
+ ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
+ if (ret <= 0)
+ return ret;
+ ctx->prog_fd = ret;
+ return 1;
+}
+
+SEC("syscall")
+int update_outer_map(void *ctx)
+{
+ int zero = 0, ret = 0, outer_fd = -1, inner_fd = -1, err;
+ const int attr_sz = sizeof(union bpf_attr);
+ union bpf_attr *attr;
+
+ attr = bpf_map_lookup_elem((struct bpf_map *)&bpf_attr_array, &zero);
+ if (!attr)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_id = ((struct bpf_map *)&outer_array_map)->id;
+ outer_fd = bpf_sys_bpf(BPF_MAP_GET_FD_BY_ID, attr, attr_sz);
+ if (outer_fd < 0)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_type = BPF_MAP_TYPE_ARRAY;
+ attr->key_size = 4;
+ attr->value_size = 4;
+ attr->max_entries = 1;
+ inner_fd = bpf_sys_bpf(BPF_MAP_CREATE, attr, attr_sz);
+ if (inner_fd < 0)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_fd = outer_fd;
+ attr->key = ptr_to_u64(&zero);
+ attr->value = ptr_to_u64(&inner_fd);
+ err = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, attr, attr_sz);
+ if (err)
+ goto out;
+
+ memset(attr, 0, attr_sz);
+ attr->map_fd = outer_fd;
+ attr->key = ptr_to_u64(&zero);
+ err = bpf_sys_bpf(BPF_MAP_DELETE_ELEM, attr, attr_sz);
+ if (err)
+ goto out;
+ ret = 1;
+out:
+ if (inner_fd >= 0)
+ bpf_sys_close(inner_fd);
+ if (outer_fd >= 0)
+ bpf_sys_close(outer_fd);
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
index 7115bcefbe8a..8159a0b4a69a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -11,8 +11,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -20,7 +20,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Multiple locations to make sure we patch
@@ -45,4 +45,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
index 0431e4fe7efd..a5ff53e61702 100644
--- a/tools/testing/selftests/bpf/progs/tailcall2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -10,41 +10,41 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
return 0;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 2);
return 1;
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
return 2;
}
-SEC("classifier/3")
-int bpf_func_3(struct __sk_buff *skb)
+SEC("tc")
+int classifier_3(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 4);
return 3;
}
-SEC("classifier/4")
-int bpf_func_4(struct __sk_buff *skb)
+SEC("tc")
+int classifier_4(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 3);
return 4;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -56,4 +56,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index 739dc2a51e74..f60bcd7b8d4b 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -10,17 +10,17 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -28,4 +28,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall4.c b/tools/testing/selftests/bpf/progs/tailcall4.c
index f82075b47d7d..a56bbc2313ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall4.c
@@ -10,11 +10,11 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, selector);
@@ -30,4 +30,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall5.c b/tools/testing/selftests/bpf/progs/tailcall5.c
index ce5450744fd4..8d03496eb6ca 100644
--- a/tools/testing/selftests/bpf/progs/tailcall5.c
+++ b/tools/testing/selftests/bpf/progs/tailcall5.c
@@ -10,11 +10,11 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int selector;
+int selector = 0;
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -22,7 +22,7 @@ TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
int idx = 0;
@@ -37,4 +37,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall6.c b/tools/testing/selftests/bpf/progs/tailcall6.c
new file mode 100644
index 000000000000..d77b8abd62f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall6.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count, which;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count++;
+ if (__builtin_constant_p(which))
+ __bpf_unreachable();
+ bpf_tail_call(skb, &jmp_table, which);
+ return 1;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ if (__builtin_constant_p(which))
+ __bpf_unreachable();
+ bpf_tail_call(skb, &jmp_table, which);
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
index 0103f3dd9f02..8c91428deb90 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -10,8 +10,8 @@ struct {
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
- SEC("classifier/" #x) \
- int bpf_func_##x(struct __sk_buff *skb) \
+ SEC("tc") \
+ int classifier_##x(struct __sk_buff *skb) \
{ \
return x; \
}
@@ -26,7 +26,7 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len * 2;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 1);
@@ -35,4 +35,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
index 7b1c04183824..ce97d141daee 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -20,16 +20,16 @@ int subprog_tail(struct __sk_buff *skb)
return 1;
}
-static volatile int count;
+int count = 0;
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
count++;
return subprog_tail(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
bpf_tail_call_static(skb, &jmp_table, 0);
@@ -38,4 +38,3 @@ int entry(struct __sk_buff *skb)
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
index 0d5482bea6c9..99c8d1d8a187 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb)
else
bpf_tail_call_static(skb, &jmp_table, 1);
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len;
}
@@ -30,32 +33,39 @@ int subprog_tail(struct __sk_buff *skb)
bpf_tail_call_static(skb, &jmp_table, 0);
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len * 2;
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return subprog_tail2(skb);
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return skb->len * 3;
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
volatile char arr[128] = {};
+ __sink(arr[sizeof(arr) - 1]);
+
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
index 9a1b166b7fbe..a017d6b2f1dd 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -3,17 +3,35 @@
#include <bpf/bpf_helpers.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} nop_table SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
-static volatile int count;
+int count = 0;
+int noise = 0;
+
+static __always_inline int subprog_noise(void)
+{
+ __u32 key = 0;
+
+ bpf_map_lookup_elem(&nop_table, &key);
+ return 0;
+}
__noinline
int subprog_tail_2(struct __sk_buff *skb)
{
+ if (noise)
+ subprog_noise();
bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3;
}
@@ -32,30 +50,29 @@ int subprog_tail(struct __sk_buff *skb)
return skb->len;
}
-SEC("classifier/1")
-int bpf_func_1(struct __sk_buff *skb)
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
{
return subprog_tail_2(skb);
}
-SEC("classifier/2")
-int bpf_func_2(struct __sk_buff *skb)
+SEC("tc")
+int classifier_2(struct __sk_buff *skb)
{
count++;
return subprog_tail_2(skb);
}
-SEC("classifier/0")
-int bpf_func_0(struct __sk_buff *skb)
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
{
return subprog_tail_1(skb);
}
-SEC("classifier")
+SEC("tc")
int entry(struct __sk_buff *skb)
{
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
new file mode 100644
index 000000000000..4a9f63bea66c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define __unused __attribute__((unused))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int done = 0;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb __unused)
+{
+ done = 1;
+ return 0;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ /* Don't propagate the constant to the caller */
+ volatile int ret = 1;
+
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ /* Have data on stack which size is not a multiple of 8 */
+ volatile char arr[1] = {};
+
+ __sink(arr[0]);
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
new file mode 100644
index 000000000000..8436c6729167
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fentry/subprog_tail")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
new file mode 100644
index 000000000000..fe16412c6e6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int count = 0;
+
+SEC("fexit/subprog_tail")
+int BPF_PROG(fexit, struct sk_buff *skb)
+{
+ count++;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
new file mode 100644
index 000000000000..d556b19413d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+#include "bpf_test_utils.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ clobber_regs_stack();
+
+ count++;
+ subprog_tail(skb);
+ subprog_tail(skb);
+
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
new file mode 100644
index 000000000000..ae94c9c70ab7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_test_utils.h"
+
+int classifier_0(struct __sk_buff *skb);
+int classifier_1(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ [1] = (void *) &classifier_1,
+ },
+};
+
+int count0 = 0;
+int count1 = 0;
+
+static __noinline
+int subprog_tail0(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count0++;
+ subprog_tail0(skb);
+ return 0;
+}
+
+static __noinline
+int subprog_tail1(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
+{
+ count1++;
+ subprog_tail1(skb);
+ return 0;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ clobber_regs_stack();
+
+ subprog_tail0(skb);
+ subprog_tail1(skb);
+
+ __sink(ret);
+ return (count1 << 16) | count0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
new file mode 100644
index 000000000000..56b6b0099840
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_test_utils.h"
+
+int classifier_0(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table0 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table1 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb, void *jmp_table)
+{
+ bpf_tail_call_static(skb, jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count++;
+ subprog_tail(skb, &jmp_table0);
+ subprog_tail(skb, &jmp_table1);
+ return count;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ clobber_regs_stack();
+
+ bpf_tail_call_static(skb, &jmp_table0, 0);
+
+ __sink(ret);
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
new file mode 100644
index 000000000000..5261395713cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_test_utils.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/dummy")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ clobber_regs_stack();
+
+ count++;
+ subprog_tail(ctx);
+ subprog_tail(ctx);
+
+ return 0;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_fail.c b/tools/testing/selftests/bpf/progs/tailcall_fail.c
new file mode 100644
index 000000000000..bc77921d2bb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_fail.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock lock;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_tail_call_spin_lock(struct __sk_buff *ctx)
+{
+ bpf_spin_lock(&lock);
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_tail_call_rcu_lock(struct __sk_buff *ctx)
+{
+ bpf_rcu_read_lock();
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_preempt_disable-ed region")
+int reject_tail_call_preempt_lock(struct __sk_buff *ctx)
+{
+ bpf_guard_preempt();
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call would lead to reference leak")
+int reject_tail_call_ref(struct __sk_buff *ctx)
+{
+ struct foo { int i; } *p;
+
+ p = bpf_obj_new(typeof(*p));
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
new file mode 100644
index 000000000000..6713b809df44
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+SEC("freplace")
+int entry_freplace(struct __sk_buff *skb)
+{
+ count++;
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return count;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c
new file mode 100644
index 000000000000..c78b94b75e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_poke.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test, int a)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call1, int a)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call2, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
new file mode 100644
index 000000000000..e9c4fea7a4bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _TASK_KFUNC_COMMON_H
+#define _TASK_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __tasks_kfunc_map_value {
+ struct task_struct __kptr * task;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct __tasks_kfunc_map_value);
+ __uint(max_entries, 1);
+} __tasks_kfunc_map SEC(".maps");
+
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+struct task_struct *bpf_task_from_vpid(s32 vpid) __ksym;
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p)
+{
+ s32 pid;
+ long status;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return NULL;
+
+ return bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+}
+
+static inline int tasks_kfunc_map_insert(struct task_struct *p)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid);
+ if (status)
+ return status;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v) {
+ bpf_map_delete_elem(&__tasks_kfunc_map, &pid);
+ return -ENOENT;
+ }
+
+ acquired = bpf_task_acquire(p);
+ if (!acquired)
+ return -ENOENT;
+
+ old = bpf_kptr_xchg(&v->task, acquired);
+ if (old) {
+ bpf_task_release(old);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+#endif /* _TASK_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
new file mode 100644
index 000000000000..4c07ea193f72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task)
+{
+ int status;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ return NULL;
+
+ return tasks_kfunc_map_value_lookup(task);
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_acquire() on an untrusted pointer. */
+ acquired = bpf_task_acquire(v->task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags;
+
+ /* Can't invoke bpf_task_acquire() on a random frame pointer. */
+ acquired = bpf_task_acquire((struct task_struct *)&stack_task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/free_task")
+__failure __msg("calling kernel function bpf_task_acquire is not allowed")
+int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("kretprobe/free_task")
+__failure __msg("calling kernel function bpf_task_acquire is not allowed")
+int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ bpf_rcu_read_lock();
+ if (!task) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+ /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */
+ acquired = bpf_task_acquire(task);
+ if (acquired)
+ bpf_task_release(acquired);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* Can't invoke bpf_task_acquire() on a NULL pointer. */
+ acquired = bpf_task_acquire(NULL);
+ if (!acquired)
+ return 0;
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+
+ /* Acquired task is never released. */
+ __sink(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Unreleased reference")
+int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr)
+ return 0;
+
+ /* Kptr retrieved from map is never released. */
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_acquire(task);
+ /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value *v;
+
+ v = insert_lookup_task(task);
+ if (!v)
+ return 0;
+
+ /* Can't invoke bpf_task_release() on an untrusted pointer. */
+ bpf_task_release(v->task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point")
+int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = (struct task_struct *)&clone_flags;
+
+ /* Cannot release random frame pointer. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags)
+{
+ struct __tasks_kfunc_map_value local, *v;
+ long status;
+ struct task_struct *acquired, *old;
+ s32 pid;
+
+ status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid);
+ if (status)
+ return 0;
+
+ local.task = NULL;
+ status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST);
+ if (status)
+ return status;
+
+ v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid);
+ if (!v)
+ return -ENOENT;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return -EEXIST;
+
+ old = bpf_kptr_xchg(&v->task, acquired);
+
+ /* old cannot be passed to bpf_task_release() without a NULL check. */
+ bpf_task_release(old);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("release kernel function bpf_task_release expects")
+int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags)
+{
+ /* Cannot release trusted task pointer which was not acquired. */
+ bpf_task_release(task);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(task->pid);
+
+ /* Releasing bpf_task_from_pid() lookup without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_vpid(task->pid);
+
+ /* Releasing bpf_task_from_vpid() lookup without a NULL check. */
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("lsm/task_free")
+__failure __msg("R1 must be a rcu pointer")
+int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
+{
+ struct task_struct *acquired;
+
+ /* the argument of lsm task_free hook is untrusted. */
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ bpf_task_release(acquired);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("access beyond the end of member comm")
+int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags)
+{
+ bpf_strncmp(task->comm, 17, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("access beyond the end of member comm")
+int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags)
+{
+ bpf_strncmp(task->comm + 1, 16, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("write into memory")
+int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags)
+{
+ bpf_probe_read_kernel(task->comm, 16, task->comm);
+ return 0;
+}
+
+SEC("fentry/__set_task_comm")
+__failure __msg("R1 type=ptr_ expected")
+int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec)
+{
+ /*
+ * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee
+ * its safety. Hence it cannot be accessed with normal load insns.
+ */
+ bpf_strncmp(task->comm, 16, "foo");
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *local;
+ struct __tasks_kfunc_map_value *v;
+
+ if (tasks_kfunc_map_insert(task))
+ return 0;
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v)
+ return 0;
+
+ bpf_rcu_read_lock();
+ local = v->task;
+ if (!local) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+ /* Can't release a kptr that's still stored in a map. */
+ bpf_task_release(local);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
new file mode 100644
index 000000000000..5fb4fc19d26a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "../bpf_experimental.h"
+#include "task_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+
+struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
+/* The two-param bpf_task_acquire doesn't exist */
+struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
+/* Incorrect type for first param */
+struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
+
+void invalid_kfunc(void) __ksym __weak;
+void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
+
+static bool is_test_kfunc_task(void)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ return pid == cur_pid;
+}
+
+static int test_acquire_release(struct task_struct *task)
+{
+ struct task_struct *acquired = NULL;
+
+ if (!bpf_ksym_exists(bpf_task_acquire)) {
+ err = 3;
+ return 0;
+ }
+ if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
+ err = 4;
+ return 0;
+ }
+ if (bpf_ksym_exists(invalid_kfunc)) {
+ /* the verifier's dead code elimination should remove this */
+ err = 5;
+ asm volatile ("goto -1"); /* for (;;); */
+ }
+
+ acquired = bpf_task_acquire(task);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 6;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = NULL;
+ int fake_ctx = 42;
+
+ if (bpf_ksym_exists(bpf_task_acquire___one)) {
+ acquired = bpf_task_acquire___one(task);
+ } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
+ /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
+ * call will find vmlinux's bpf_task_acquire, but subsequent
+ * bpf_core_types_are_compat will fail
+ */
+ acquired = bpf_task_acquire___two(task, &fake_ctx);
+ err = 3;
+ return 0;
+ } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
+ /* bpf_core_types_are_compat will fail similarly to above case */
+ acquired = bpf_task_acquire___three(&fake_ctx);
+ err = 4;
+ return 0;
+ }
+
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 5;
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
+{
+ /* Neither symbol should successfully resolve.
+ * Success or failure of one ___flavor should not affect others
+ */
+ if (bpf_ksym_exists(bpf_task_acquire___two))
+ err = 1;
+ else if (bpf_ksym_exists(bpf_task_acquire___three))
+ err = 2;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(task);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ return test_acquire_release(bpf_get_current_task_btf());
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
+{
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status)
+ err = 1;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr, *acquired;
+ struct __tasks_kfunc_map_value *v, *local;
+ int refcnt, refcnt_after_drop;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&v->task, NULL);
+ if (!kptr) {
+ err = 3;
+ return 0;
+ }
+
+ local = bpf_obj_new(typeof(*local));
+ if (!local) {
+ err = 4;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, kptr);
+ if (kptr) {
+ err = 5;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, NULL);
+ if (!kptr) {
+ err = 6;
+ bpf_obj_drop(local);
+ return 0;
+ }
+
+ /* Stash a copy into local kptr and check if it is released recursively */
+ acquired = bpf_task_acquire(kptr);
+ if (!acquired) {
+ err = 7;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+ bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
+
+ acquired = bpf_kptr_xchg(&local->task, acquired);
+ if (acquired) {
+ err = 8;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ bpf_task_release(acquired);
+ return 0;
+ }
+
+ bpf_obj_drop(local);
+
+ bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
+ if (refcnt != refcnt_after_drop + 1) {
+ err = 9;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ bpf_task_release(kptr);
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *kptr;
+ struct __tasks_kfunc_map_value *v;
+ long status;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ status = tasks_kfunc_map_insert(task);
+ if (status) {
+ err = 1;
+ return 0;
+ }
+
+ v = tasks_kfunc_map_value_lookup(task);
+ if (!v) {
+ err = 2;
+ return 0;
+ }
+
+ bpf_rcu_read_lock();
+ kptr = v->task;
+ if (!kptr) {
+ err = 3;
+ } else {
+ kptr = bpf_task_acquire(kptr);
+ if (!kptr)
+ err = 4;
+ else
+ bpf_task_release(kptr);
+ }
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *current, *acquired;
+
+ if (!is_test_kfunc_task())
+ return 0;
+
+ current = bpf_get_current_task_btf();
+ acquired = bpf_task_acquire(current);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 1;
+
+ return 0;
+}
+
+static void lookup_compare_pid(const struct task_struct *p)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(p->pid);
+ if (!acquired) {
+ err = 1;
+ return;
+ }
+
+ if (acquired->pid != p->pid)
+ err = 2;
+ bpf_task_release(acquired);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(task);
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ lookup_compare_pid(bpf_get_current_task_btf());
+ return 0;
+}
+
+static int is_pid_lookup_valid(s32 pid)
+{
+ struct task_struct *acquired;
+
+ acquired = bpf_task_from_pid(pid);
+ if (acquired) {
+ bpf_task_release(acquired);
+ return 1;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
+{
+ if (!is_test_kfunc_task())
+ return 0;
+
+ bpf_strncmp(task->comm, 12, "foo");
+ bpf_strncmp(task->comm, 16, "foo");
+ bpf_strncmp(&task->comm[8], 4, "foo");
+
+ if (is_pid_lookup_valid(-1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (is_pid_lookup_valid(0xcafef00d)) {
+ err = 2;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired;
+
+ /* task->group_leader is listed as a trusted, non-NULL field of task struct. */
+ acquired = bpf_task_acquire(task->group_leader);
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 1;
+
+
+ return 0;
+}
+
+SEC("syscall")
+int test_task_from_vpid_current(const void *ctx)
+{
+ struct task_struct *current, *v_task;
+
+ v_task = bpf_task_from_vpid(1);
+ if (!v_task) {
+ err = 1;
+ return 0;
+ }
+
+ current = bpf_get_current_task_btf();
+
+ /* The current process should be the init process (pid 1) in the new pid namespace. */
+ if (current != v_task)
+ err = 2;
+
+ bpf_task_release(v_task);
+ return 0;
+}
+
+SEC("syscall")
+int test_task_from_vpid_invalid(const void *ctx)
+{
+ struct task_struct *v_task;
+
+ v_task = bpf_task_from_vpid(-1);
+ if (v_task) {
+ err = 1;
+ goto err;
+ }
+
+ /* There should be only one process (current process) in the new pid namespace. */
+ v_task = bpf_task_from_vpid(2);
+ if (v_task) {
+ err = 2;
+ goto err;
+ }
+
+ v_task = bpf_task_from_vpid(9999);
+ if (v_task) {
+ err = 3;
+ goto err;
+ }
+
+ return 0;
+err:
+ bpf_task_release(v_task);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
new file mode 100644
index 000000000000..432fff2af844
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TASK_LOCAL_DATA_BPF_H
+#define __TASK_LOCAL_DATA_BPF_H
+
+/*
+ * Task local data is a library that facilitates sharing per-task data
+ * between user space and bpf programs.
+ *
+ *
+ * USAGE
+ *
+ * A TLD, an entry of data in task local data, first needs to be created by the
+ * user space. This is done by calling user space API, TLD_DEFINE_KEY() or
+ * tld_create_key(), with the name of the TLD and the size.
+ *
+ * TLD_DEFINE_KEY(prio, "priority", sizeof(int));
+ *
+ * or
+ *
+ * void func_call(...) {
+ * tld_key_t prio, in_cs;
+ *
+ * prio = tld_create_key("priority", sizeof(int));
+ * in_cs = tld_create_key("in_critical_section", sizeof(bool));
+ * ...
+ *
+ * A key associated with the TLD, which has an opaque type tld_key_t, will be
+ * initialized or returned. It can be used to get a pointer to the TLD in the
+ * user space by calling tld_get_data().
+ *
+ * In a bpf program, tld_object_init() first needs to be called to initialized a
+ * tld_object on the stack. Then, TLDs can be accessed by calling tld_get_data().
+ * The API will try to fetch the key by the name and use it to locate the data.
+ * A pointer to the TLD will be returned. It also caches the key in a task local
+ * storage map, tld_key_map, whose value type, struct tld_keys, must be defined
+ * by the developer.
+ *
+ * struct tld_keys {
+ * tld_key_t prio;
+ * tld_key_t in_cs;
+ * };
+ *
+ * SEC("struct_ops")
+ * void prog(struct task_struct task, ...)
+ * {
+ * struct tld_object tld_obj;
+ * int err, *p;
+ *
+ * err = tld_object_init(task, &tld_obj);
+ * if (err)
+ * return;
+ *
+ * p = tld_get_data(&tld_obj, prio, "priority", sizeof(int));
+ * if (p)
+ * // do something depending on *p
+ */
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+#define TLD_ROUND_MASK(x, y) ((__typeof__(x))((y) - 1))
+#define TLD_ROUND_UP(x, y) ((((x) - 1) | TLD_ROUND_MASK(x, y)) + 1)
+
+#define TLD_MAX_DATA_CNT (__PAGE_SIZE / sizeof(struct tld_metadata) - 1)
+
+#ifndef TLD_NAME_LEN
+#define TLD_NAME_LEN 62
+#endif
+
+#ifndef TLD_KEY_MAP_CREATE_RETRY
+#define TLD_KEY_MAP_CREATE_RETRY 10
+#endif
+
+typedef struct {
+ __s16 off;
+} tld_key_t;
+
+struct tld_metadata {
+ char name[TLD_NAME_LEN];
+ __u16 size;
+};
+
+struct tld_meta_u {
+ __u8 cnt;
+ __u16 size;
+ struct tld_metadata metadata[TLD_MAX_DATA_CNT];
+};
+
+struct tld_data_u {
+ __u64 start; /* offset of tld_data_u->data in a page */
+ char data[__PAGE_SIZE - sizeof(__u64)];
+};
+
+struct tld_map_value {
+ struct tld_data_u __uptr *data;
+ struct tld_meta_u __uptr *meta;
+};
+
+typedef struct tld_uptr_dummy {
+ struct tld_data_u data[0];
+ struct tld_meta_u meta[0];
+} *tld_uptr_dummy_t;
+
+struct tld_object {
+ struct tld_map_value *data_map;
+ struct tld_keys *key_map;
+ /*
+ * Force the compiler to generate the actual definition of tld_meta_u
+ * and tld_data_u in BTF. Without it, tld_meta_u and u_tld_data will
+ * be BTF_KIND_FWD.
+ */
+ tld_uptr_dummy_t dummy[0];
+};
+
+/*
+ * Map value of tld_key_map for caching keys. Must be defined by the developer.
+ * Members should be tld_key_t and passed to the 3rd argument of tld_fetch_key().
+ */
+struct tld_keys;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tld_map_value);
+} tld_data_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tld_keys);
+} tld_key_map SEC(".maps");
+
+/**
+ * tld_object_init() - Initialize a tld_object.
+ *
+ * @task: The task_struct of the target task
+ * @tld_obj: A pointer to a tld_object to be initialized
+ *
+ * Return 0 on success; -ENODATA if the user space did not initialize task local data
+ * for the current task through tld_get_data(); -ENOMEM if the creation of tld_key_map
+ * fails
+ */
+__attribute__((unused))
+static int tld_object_init(struct task_struct *task, struct tld_object *tld_obj)
+{
+ int i;
+
+ tld_obj->data_map = bpf_task_storage_get(&tld_data_map, task, 0, 0);
+ if (!tld_obj->data_map)
+ return -ENODATA;
+
+ bpf_for(i, 0, TLD_KEY_MAP_CREATE_RETRY) {
+ tld_obj->key_map = bpf_task_storage_get(&tld_key_map, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (likely(tld_obj->key_map))
+ break;
+ }
+ if (!tld_obj->key_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Return the offset of TLD if @name is found. Otherwise, return the current TLD count
+ * using the nonpositive range so that the next tld_get_data() can skip fetching key if
+ * no new TLD is added or start comparing name from the first newly added TLD.
+ */
+__attribute__((unused))
+static int __tld_fetch_key(struct tld_object *tld_obj, const char *name, int i_start)
+{
+ struct tld_metadata *metadata;
+ int i, cnt, start, off = 0;
+
+ if (!tld_obj->data_map || !tld_obj->data_map->data || !tld_obj->data_map->meta)
+ return 0;
+
+ start = tld_obj->data_map->data->start;
+ cnt = tld_obj->data_map->meta->cnt;
+ metadata = tld_obj->data_map->meta->metadata;
+
+ bpf_for(i, 0, cnt) {
+ if (i >= TLD_MAX_DATA_CNT)
+ break;
+
+ if (i >= i_start && !bpf_strncmp(metadata[i].name, TLD_NAME_LEN, name))
+ return start + off;
+
+ off += TLD_ROUND_UP(metadata[i].size, 8);
+ }
+
+ return -cnt;
+}
+
+/**
+ * tld_get_data() - Retrieve a pointer to the TLD associated with the name.
+ *
+ * @tld_obj: A pointer to a valid tld_object initialized by tld_object_init()
+ * @key: The cached key of the TLD in tld_key_map
+ * @name: The name of the key associated with a TLD
+ * @size: The size of the TLD. Must be a known constant value
+ *
+ * Return a pointer to the TLD associated with @name; NULL if not found or @size is too
+ * big. @key is used to cache the key if the TLD is found to speed up subsequent calls.
+ * It should be defined as an member of tld_keys of tld_key_t type by the developer.
+ */
+#define tld_get_data(tld_obj, key, name, size) \
+ ({ \
+ void *data = NULL, *_data = (tld_obj)->data_map->data; \
+ long off = (tld_obj)->key_map->key.off; \
+ int cnt; \
+ \
+ if (likely(_data)) { \
+ if (likely(off > 0)) { \
+ barrier_var(off); \
+ if (likely(off < __PAGE_SIZE - size)) \
+ data = _data + off; \
+ } else { \
+ cnt = -(off); \
+ if (likely((tld_obj)->data_map->meta) && \
+ cnt < (tld_obj)->data_map->meta->cnt) { \
+ off = __tld_fetch_key(tld_obj, name, cnt); \
+ (tld_obj)->key_map->key.off = off; \
+ \
+ if (likely(off < __PAGE_SIZE - size)) { \
+ barrier_var(off); \
+ if (off > 0) \
+ data = _data + off; \
+ } \
+ } \
+ } \
+ } \
+ data; \
+ })
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage.c b/tools/testing/selftests/bpf/progs/task_local_storage.c
new file mode 100644
index 000000000000..80a0a20db88d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} enter_id SEC(".maps");
+
+#define MAGIC_VALUE 0xabcd1234
+
+pid_t target_pid = 0;
+int mismatch_cnt = 0;
+int enter_cnt = 0;
+int exit_cnt = 0;
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&enter_cnt, 1);
+ *ptr = MAGIC_VALUE + enter_cnt;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_exit")
+int BPF_PROG(on_exit, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&enter_id, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ __sync_fetch_and_add(&exit_cnt, 1);
+ if (*ptr != MAGIC_VALUE + exit_cnt)
+ __sync_fetch_and_add(&mismatch_cnt, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
new file mode 100644
index 000000000000..41d88ed222ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, __u64);
+} task_storage SEC(".maps");
+
+int run_count = 0;
+int valid_ptr_count = 0;
+int null_ptr_count = 0;
+
+SEC("fentry/exit_creds")
+int BPF_PROG(trace_exit_creds, struct task_struct *task)
+{
+ __u64 *ptr;
+
+ ptr = bpf_task_storage_get(&task_storage, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ __sync_fetch_and_add(&valid_ptr_count, 1);
+ else
+ __sync_fetch_and_add(&null_ptr_count, 1);
+
+ __sync_fetch_and_add(&run_count, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
new file mode 100644
index 000000000000..f1853c38aada
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+char _license[] SEC("license") = "GPL";
+int nr_del_errs = 0;
+int test_pid = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_a SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, long);
+} map_b SEC(".maps");
+
+SEC("fentry/bpf_local_storage_update")
+int BPF_PROG(on_update)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ long *ptr;
+
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ /* ptr will not be NULL when it is called from
+ * the bpf_task_storage_get(&map_b,...F_CREATE) in
+ * the BPF_PROG(on_enter) below. It is because
+ * the value can be found in map_a and the kernel
+ * does not need to acquire any spin_lock.
+ */
+ if (ptr) {
+ int err;
+
+ *ptr += 1;
+ err = bpf_task_storage_delete(&map_a, task);
+ if (err == -EBUSY)
+ nr_del_errs++;
+ }
+
+ /* This will still fail because map_b is empty and
+ * this BPF_PROG(on_update) has failed to acquire
+ * the percpu busy lock => meaning potential
+ * deadlock is detected and it will fail to create
+ * new storage.
+ */
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr)
+ *ptr += 1;
+
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(on_enter, struct pt_regs *regs, long id)
+{
+ struct task_struct *task;
+ long *ptr;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ptr = bpf_task_storage_get(&map_a, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr && !*ptr)
+ *ptr = 200;
+
+ ptr = bpf_task_storage_get(&map_b, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (ptr && !*ptr)
+ *ptr = 100;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_ls_uptr.c b/tools/testing/selftests/bpf/progs/task_ls_uptr.c
new file mode 100644
index 000000000000..ddbe11b46eef
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_ls_uptr.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "uptr_test_common.h"
+
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct value_type);
+} datamap SEC(".maps");
+
+pid_t target_pid = 0;
+pid_t parent_pid = 0;
+
+SEC("tp_btf/sys_enter")
+int on_enter(__u64 *ctx)
+{
+ struct task_struct *task, *data_task;
+ struct value_type *ptr;
+ struct user_data *udata;
+ struct cgroup *cgrp;
+
+ task = bpf_get_current_task_btf();
+ if (task->pid != target_pid)
+ return 0;
+
+ data_task = bpf_task_from_pid(parent_pid);
+ if (!data_task)
+ return 0;
+
+ ptr = bpf_task_storage_get(&datamap, data_task, 0, 0);
+ bpf_task_release(data_task);
+ if (!ptr)
+ return 0;
+
+ cgrp = bpf_kptr_xchg(&ptr->cgrp, NULL);
+ if (cgrp) {
+ int lvl = cgrp->level;
+
+ bpf_cgroup_release(cgrp);
+ return lvl;
+ }
+
+ udata = ptr->udata;
+ if (!udata || udata->result)
+ return 0;
+ udata->result = MAGIC_VALUE + udata->a + udata->b;
+
+ udata = ptr->nested.udata;
+ if (udata && !udata->nested_result)
+ udata->nested_result = udata->result;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
new file mode 100644
index 000000000000..986829aaf73a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+extern bool CONFIG_PREEMPTION __kconfig __weak;
+int nr_get_errs = 0;
+int nr_del_errs = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+SEC("lsm.s/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ struct task_struct *task;
+ int ret, zero = 0;
+ int *value;
+
+ if (!CONFIG_PREEMPTION)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ value = bpf_task_storage_get(&task_storage, task, &zero,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!value)
+ __sync_fetch_and_add(&nr_get_errs, 1);
+
+ ret = bpf_task_storage_delete(&task_storage,
+ bpf_get_current_task_btf());
+ if (ret == -EBUSY)
+ __sync_fetch_and_add(&nr_del_errs, 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work.c b/tools/testing/selftests/bpf/progs/task_work.c
new file mode 100644
index 000000000000..663a80990f8f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char _license[] SEC("license") = "GPL";
+
+const void *user_ptr = NULL;
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} lrumap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ struct elem *work = value;
+
+ bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
+ return 0;
+}
+
+int key = 0;
+
+SEC("perf_event")
+int oncpu_hash_map(struct pt_regs *args)
+{
+ struct elem empty_work = { .data = { 0 } };
+ struct elem *work;
+ struct task_struct *task;
+ int err;
+
+ task = bpf_get_current_task_btf();
+ err = bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
+ if (err)
+ return 0;
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work)
+ return 0;
+
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+int oncpu_array_map(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+int oncpu_lru_map(struct pt_regs *args)
+{
+ struct elem empty_work = { .data = { 0 } };
+ struct elem *work;
+ struct task_struct *task;
+ int err;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&lrumap, &key);
+ if (work)
+ return 0;
+ err = bpf_map_update_elem(&lrumap, &key, &empty_work, BPF_NOEXIST);
+ if (err)
+ return 0;
+ work = bpf_map_lookup_elem(&lrumap, &key);
+ if (!work || work->data[0])
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c
new file mode 100644
index 000000000000..1270953fd092
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work_fail.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+const void *user_ptr = NULL;
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ struct elem *work = value;
+
+ bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
+ return 0;
+}
+
+int key = 0;
+
+SEC("perf_event")
+__failure __msg("doesn't match map pointer in R3")
+int mismatch_map(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("arg#1 doesn't point to a map value")
+int no_map_task_work(struct pt_regs *args)
+{
+ struct task_struct *task;
+ struct bpf_task_work tw;
+
+ task = bpf_get_current_task_btf();
+ bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("Possibly NULL pointer passed to trusted arg1")
+int task_work_null(struct pt_regs *args)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("Possibly NULL pointer passed to trusted arg2")
+int map_null(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c
new file mode 100644
index 000000000000..55e555f7f41b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work_stress.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+#define ENTRIES 128
+
+char _license[] SEC("license") = "GPL";
+
+__u64 callback_scheduled = 0;
+__u64 callback_success = 0;
+__u64 schedule_error = 0;
+__u64 delete_success = 0;
+
+struct elem {
+ __u32 count;
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, ENTRIES);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ __sync_fetch_and_add(&callback_success, 1);
+ return 0;
+}
+
+SEC("syscall")
+int schedule_task_work(void *ctx)
+{
+ struct elem empty_work = {.count = 0};
+ struct elem *work;
+ int key = 0, err;
+
+ key = bpf_ktime_get_ns() % ENTRIES;
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work) {
+ bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work)
+ return 0;
+ }
+ err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
+ process_work, NULL);
+ if (err)
+ __sync_fetch_and_add(&schedule_error, 1);
+ else
+ __sync_fetch_and_add(&callback_scheduled, 1);
+ return 0;
+}
+
+SEC("syscall")
+int delete_task_work(void *ctx)
+{
+ int key = 0, err;
+
+ key = bpf_get_prandom_u32() % ENTRIES;
+ err = bpf_map_delete_elem(&hmap, &key);
+ if (!err)
+ __sync_fetch_and_add(&delete_success, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
new file mode 100644
index 000000000000..fe6249d99b31
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline
+int subprog_tc(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ __sink(skb);
+ __sink(ret);
+ /* let verifier know that 'subprog_tc' can change pointers to skb->data */
+ bpf_skb_change_proto(skb, 0, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry_tc(struct __sk_buff *skb)
+{
+ return subprog_tc(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tc_dummy.c b/tools/testing/selftests/bpf/progs/tc_dummy.c
new file mode 100644
index 000000000000..69a3d0dc8787
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_dummy.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ return 1;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
new file mode 100644
index 000000000000..0016c90e9c13
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops incompl_cong_ops = {
+ /* Intentionally leaving out any of the required cong_avoid() and
+ * cong_control() here.
+ */
+ .ssthresh = (void *)incompl_cong_ops_ssthresh,
+ .undo_cwnd = (void *)incompl_cong_ops_undo_cwnd,
+ .name = "bpf_incompl_ops",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
new file mode 100644
index 000000000000..f95862f570b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_kfunc.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+extern void bbr_init(struct sock *sk) __ksym;
+extern void bbr_main(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs) __ksym;
+extern u32 bbr_sndbuf_expand(struct sock *sk) __ksym;
+extern u32 bbr_undo_cwnd(struct sock *sk) __ksym;
+extern void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern u32 bbr_ssthresh(struct sock *sk) __ksym;
+extern u32 bbr_min_tso_segs(struct sock *sk) __ksym;
+extern void bbr_set_state(struct sock *sk, u8 new_state) __ksym;
+
+extern void dctcp_init(struct sock *sk) __ksym;
+extern void dctcp_update_alpha(struct sock *sk, u32 flags) __ksym;
+extern void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) __ksym;
+extern u32 dctcp_ssthresh(struct sock *sk) __ksym;
+extern u32 dctcp_cwnd_undo(struct sock *sk) __ksym;
+extern void dctcp_state(struct sock *sk, u8 new_state) __ksym;
+
+extern void cubictcp_init(struct sock *sk) __ksym;
+extern u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
+extern void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) __ksym;
+extern void cubictcp_state(struct sock *sk, u8 new_state) __ksym;
+extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
+extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
+
+SEC("struct_ops")
+void BPF_PROG(init, struct sock *sk)
+{
+ bbr_init(sk);
+ dctcp_init(sk);
+ cubictcp_init(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(in_ack_event, struct sock *sk, u32 flags)
+{
+ dctcp_update_alpha(sk, flags);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cong_control, struct sock *sk, u32 ack, int flag, const struct rate_sample *rs)
+{
+ bbr_main(sk, ack, flag, rs);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cong_avoid, struct sock *sk, u32 ack, u32 acked)
+{
+ cubictcp_cong_avoid(sk, ack, acked);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(sndbuf_expand, struct sock *sk)
+{
+ return bbr_sndbuf_expand(sk);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(undo_cwnd, struct sock *sk)
+{
+ bbr_undo_cwnd(sk);
+ return dctcp_cwnd_undo(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
+{
+ bbr_cwnd_event(sk, event);
+ dctcp_cwnd_event(sk, event);
+ cubictcp_cwnd_event(sk, event);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(ssthresh, struct sock *sk)
+{
+ bbr_ssthresh(sk);
+ dctcp_ssthresh(sk);
+ return cubictcp_recalc_ssthresh(sk);
+}
+
+SEC("struct_ops")
+u32 BPF_PROG(min_tso_segs, struct sock *sk)
+{
+ return bbr_min_tso_segs(sk);
+}
+
+SEC("struct_ops")
+void BPF_PROG(set_state, struct sock *sk, u8 new_state)
+{
+ bbr_set_state(sk, new_state);
+ dctcp_state(sk, new_state);
+ cubictcp_state(sk, new_state);
+}
+
+SEC("struct_ops")
+void BPF_PROG(pkts_acked, struct sock *sk, const struct ack_sample *sample)
+{
+ cubictcp_acked(sk, sample);
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops tcp_ca_kfunc = {
+ .init = (void *)init,
+ .in_ack_event = (void *)in_ack_event,
+ .cong_control = (void *)cong_control,
+ .cong_avoid = (void *)cong_avoid,
+ .sndbuf_expand = (void *)sndbuf_expand,
+ .undo_cwnd = (void *)undo_cwnd,
+ .cwnd_event = (void *)cwnd_event,
+ .ssthresh = (void *)ssthresh,
+ .min_tso_segs = (void *)min_tso_segs,
+ .set_state = (void *)set_state,
+ .pkts_acked = (void *)pkts_acked,
+ .name = "tcp_ca_kfunc",
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
new file mode 100644
index 000000000000..54f916a931c6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops")
+size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
+{
+ return 0;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops unsupp_cong_op = {
+ .get_info = (void *)unsupp_cong_op_get_info,
+ .name = "bpf_unsupp_op",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_update.c b/tools/testing/selftests/bpf/progs/tcp_ca_update.c
new file mode 100644
index 000000000000..e4bd82bc0d01
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_update.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int ca1_cnt = 0;
+int ca2_cnt = 0;
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_1_init, struct sock *sk)
+{
+ ca1_cnt++;
+}
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_2_init, struct sock *sk)
+{
+ ca2_cnt++;
+}
+
+SEC("struct_ops")
+void BPF_PROG(ca_update_cong_control, struct sock *sk,
+ const struct rate_sample *rs)
+{
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_update_1 = {
+ .init = (void *)ca_update_1_init,
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_update",
+};
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_update_2 = {
+ .init = (void *)ca_update_2_init,
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_update",
+};
+
+SEC(".struct_ops.link")
+struct tcp_congestion_ops ca_wrong = {
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_wrong",
+};
+
+SEC(".struct_ops")
+struct tcp_congestion_ops ca_no_link = {
+ .cong_control = (void *)ca_update_cong_control,
+ .ssthresh = (void *)ca_update_ssthresh,
+ .undo_cwnd = (void *)ca_update_undo_cwnd,
+ .name = "tcp_ca_no_link",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
new file mode 100644
index 000000000000..022291f21dfb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define USEC_PER_SEC 1000000UL
+
+static unsigned int tcp_left_out(const struct tcp_sock *tp)
+{
+ return tp->sacked_out + tp->lost_out;
+}
+
+static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+{
+ return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
+}
+
+SEC("struct_ops")
+void BPF_PROG(write_sk_pacing_init, struct sock *sk)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
+ SK_PACING_NEEDED);
+#else
+ sk->sk_pacing_status = SK_PACING_NEEDED;
+#endif
+}
+
+SEC("struct_ops")
+void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long rate =
+ ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
+ (tp->srtt_us ?: 1U << 3);
+ sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+ tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops")
+__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops write_sk_pacing = {
+ .init = (void *)write_sk_pacing_init,
+ .cong_control = (void *)write_sk_pacing_cong_control,
+ .ssthresh = (void *)write_sk_pacing_ssthresh,
+ .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
+ .name = "bpf_w_sk_pacing",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_rtt.c b/tools/testing/selftests/bpf/progs/tcp_rtt.c
index 0cb3204ddb18..42c729f85524 100644
--- a/tools/testing/selftests/bpf/progs/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/progs/tcp_rtt.c
@@ -3,7 +3,6 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1;
struct tcp_rtt_storage {
__u32 invoked;
@@ -11,6 +10,9 @@ struct tcp_rtt_storage {
__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
+
+ __u32 mrtt_us; /* args[0] */
+ __u32 srtt; /* args[1] */
};
struct {
@@ -56,5 +58,8 @@ int _sockops(struct bpf_sock_ops *ctx)
storage->delivered_ce = tcp_sk->delivered_ce;
storage->icsk_retransmits = tcp_sk->icsk_retransmits;
+ storage->mrtt_us = ctx->args[0];
+ storage->srtt = ctx->args[1];
+
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c
new file mode 100644
index 000000000000..326b7d1f496a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_access_variable_array.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+unsigned long span = 0;
+
+SEC("fentry/sched_balance_rq")
+int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
+ struct sched_domain *sd)
+{
+ span = sd->span[0];
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_assign_reuse.c b/tools/testing/selftests/bpf/progs/test_assign_reuse.c
new file mode 100644
index 000000000000..4f2e2321ea06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_assign_reuse.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/pkt_cls.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+__u64 sk_cookie_seen;
+__u64 reuseport_executed;
+union {
+ struct tcphdr tcp;
+ struct udphdr udp;
+} headers;
+
+const volatile __u16 dest_port;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} sk_map SEC(".maps");
+
+SEC("sk_reuseport")
+int reuse_accept(struct sk_reuseport_md *ctx)
+{
+ reuseport_executed++;
+
+ if (ctx->ip_protocol == IPPROTO_TCP) {
+ if (ctx->data + sizeof(headers.tcp) > ctx->data_end)
+ return SK_DROP;
+
+ if (__builtin_memcmp(&headers.tcp, ctx->data, sizeof(headers.tcp)) != 0)
+ return SK_DROP;
+ } else if (ctx->ip_protocol == IPPROTO_UDP) {
+ if (ctx->data + sizeof(headers.udp) > ctx->data_end)
+ return SK_DROP;
+
+ if (__builtin_memcmp(&headers.udp, ctx->data, sizeof(headers.udp)) != 0)
+ return SK_DROP;
+ } else {
+ return SK_DROP;
+ }
+
+ sk_cookie_seen = bpf_get_socket_cookie(ctx->sk);
+ return SK_PASS;
+}
+
+SEC("sk_reuseport")
+int reuse_drop(struct sk_reuseport_md *ctx)
+{
+ reuseport_executed++;
+ sk_cookie_seen = 0;
+ return SK_DROP;
+}
+
+static int
+assign_sk(struct __sk_buff *skb)
+{
+ int zero = 0, ret = 0;
+ struct bpf_sock *sk;
+
+ sk = bpf_map_lookup_elem(&sk_map, &zero);
+ if (!sk)
+ return TC_ACT_SHOT;
+ ret = bpf_sk_assign(skb, sk, 0);
+ bpf_sk_release(sk);
+ return ret ? TC_ACT_SHOT : TC_ACT_OK;
+}
+
+static bool
+maybe_assign_tcp(struct __sk_buff *skb, struct tcphdr *th)
+{
+ if (th + 1 > (void *)(long)(skb->data_end))
+ return TC_ACT_SHOT;
+
+ if (!th->syn || th->ack || th->dest != bpf_htons(dest_port))
+ return TC_ACT_OK;
+
+ __builtin_memcpy(&headers.tcp, th, sizeof(headers.tcp));
+ return assign_sk(skb);
+}
+
+static bool
+maybe_assign_udp(struct __sk_buff *skb, struct udphdr *uh)
+{
+ if (uh + 1 > (void *)(long)(skb->data_end))
+ return TC_ACT_SHOT;
+
+ if (uh->dest != bpf_htons(dest_port))
+ return TC_ACT_OK;
+
+ __builtin_memcpy(&headers.udp, uh, sizeof(headers.udp));
+ return assign_sk(skb);
+}
+
+SEC("tc")
+int tc_main(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth;
+
+ eth = (struct ethhdr *)(data);
+ if (eth + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
+
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (iph->protocol == IPPROTO_TCP)
+ return maybe_assign_tcp(skb, (struct tcphdr *)(iph + 1));
+ else if (iph->protocol == IPPROTO_UDP)
+ return maybe_assign_udp(skb, (struct udphdr *)(iph + 1));
+ else
+ return TC_ACT_SHOT;
+ } else {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
+
+ if (ip6h + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ return maybe_assign_tcp(skb, (struct tcphdr *)(ip6h + 1));
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ return maybe_assign_udp(skb, (struct udphdr *)(ip6h + 1));
+ else
+ return TC_ACT_SHOT;
+ }
+}
diff --git a/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c
new file mode 100644
index 000000000000..f548b7446218
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+int kprobe_res = 0;
+
+/**
+ * This program will be manually made sleepable on the userspace side
+ * and should thus be unattachable.
+ */
+SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
+int handle_kprobe_sleepable(struct pt_regs *ctx)
+{
+ kprobe_res = 1;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 8056a4c6d918..fb79e6cab932 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -1,42 +1,183 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <errno.h>
+#include "bpf_misc.h"
-int kprobe_res = 0;
-int kretprobe_res = 0;
-int uprobe_res = 0;
-int uretprobe_res = 0;
+u32 dynamic_sz = 1;
+int kprobe2_res = 0;
+int kretprobe2_res = 0;
+int uprobe_byname_res = 0;
+int uretprobe_byname_res = 0;
+int uprobe_byname2_res = 0;
+int uretprobe_byname2_res = 0;
+int uprobe_byname3_sleepable_res = 0;
+int uprobe_byname3_str_sleepable_res = 0;
+int uprobe_byname3_res = 0;
+int uretprobe_byname3_sleepable_res = 0;
+int uretprobe_byname3_str_sleepable_res = 0;
+int uretprobe_byname3_res = 0;
+void *user_ptr = 0;
-SEC("kprobe/sys_nanosleep")
-int handle_kprobe(struct pt_regs *ctx)
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+
+SEC("ksyscall/nanosleep")
+int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
+{
+ kprobe2_res = 11;
+ return 0;
+}
+
+SEC("kretsyscall/nanosleep")
+int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
+{
+ kretprobe2_res = 22;
+ return ret;
+}
+
+SEC("uprobe")
+int handle_uprobe_ref_ctr(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe_ref_ctr(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+/* use auto-attach format for section definition. */
+SEC("uretprobe//proc/self/exe:trigger_func2")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_res = 6;
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_UPROBE(handle_uprobe_byname2, const char *pathname, const char *mode)
+{
+ char mode_buf[2] = {};
+
+ /* verify fopen mode */
+ bpf_probe_read_user(mode_buf, sizeof(mode_buf), mode);
+ if (mode_buf[0] == 'r' && mode_buf[1] == 0)
+ uprobe_byname2_res = 7;
+ return 0;
+}
+
+SEC("uretprobe")
+int BPF_URETPROBE(handle_uretprobe_byname2, void *ret)
+{
+ uretprobe_byname2_res = 8;
+ return 0;
+}
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+static __always_inline bool verify_sleepable_user_copy_str(void)
+{
+ int ret;
+ char data_long[20];
+ char data_long_pad[20];
+ char data_long_err[20];
+ char data_short[4];
+ char data_short_pad[4];
+
+ ret = bpf_copy_from_user_str(data_short, sizeof(data_short), user_ptr, 0);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_short_pad, sizeof(data_short_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ /* Make sure this passes the verifier */
+ ret = bpf_copy_from_user_str(data_long, dynamic_sz & sizeof(data_long), user_ptr, 0);
+
+ if (ret != 0)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 0);
+
+ if (bpf_strncmp(data_long, 10, "test_data\0") != 0 || ret != 10)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_pad, sizeof(data_long_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_long_pad, 10, "test_data\0") != 0 || ret != 10 || data_long_pad[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_err, sizeof(data_long_err), (void *)data_long, BPF_F_PAD_ZEROS);
+
+ if (ret > 0 || data_long_err[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 2);
+
+ if (ret != -EINVAL)
+ return false;
+
+ return true;
+}
+
+SEC("uprobe.s//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
{
- kprobe_res = 1;
+ if (verify_sleepable_user_copy())
+ uprobe_byname3_sleepable_res = 9;
+ if (verify_sleepable_user_copy_str())
+ uprobe_byname3_str_sleepable_res = 10;
return 0;
}
-SEC("kretprobe/sys_nanosleep")
-int BPF_KRETPROBE(handle_kretprobe)
+/**
+ * same target as the uprobe.s above to force sleepable and non-sleepable
+ * programs in the same bpf_prog_array
+ */
+SEC("uprobe//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3(struct pt_regs *ctx)
{
- kretprobe_res = 2;
+ uprobe_byname3_res = 11;
return 0;
}
-SEC("uprobe/trigger_func")
-int handle_uprobe(struct pt_regs *ctx)
+SEC("uretprobe.s//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
{
- uprobe_res = 3;
+ if (verify_sleepable_user_copy())
+ uretprobe_byname3_sleepable_res = 12;
+ if (verify_sleepable_user_copy_str())
+ uretprobe_byname3_str_sleepable_res = 13;
return 0;
}
-SEC("uretprobe/trigger_func")
-int handle_uretprobe(struct pt_regs *ctx)
+SEC("uretprobe//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3(struct pt_regs *ctx)
{
- uretprobe_res = 4;
+ uretprobe_byname3_res = 14;
return 0;
}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c
new file mode 100644
index 000000000000..7f08bce94596
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+int kprobe_res = 0;
+int kretprobe_res = 0;
+int uprobe_res = 0;
+int uretprobe_res = 0;
+int uprobe_byname_res = 0;
+void *user_ptr = 0;
+
+SEC("kprobe")
+int handle_kprobe(struct pt_regs *ctx)
+{
+ kprobe_res = 1;
+ return 0;
+}
+
+SEC("kretprobe")
+int handle_kretprobe(struct pt_regs *ctx)
+{
+ kretprobe_res = 2;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ uprobe_res = 3;
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe(struct pt_regs *ctx)
+{
+ uretprobe_res = 4;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_autoattach.c b/tools/testing/selftests/bpf/progs/test_autoattach.c
new file mode 100644
index 000000000000..11a44493ebce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_autoattach.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+bool prog1_called = false;
+bool prog2_called = false;
+
+SEC("raw_tp/sys_enter")
+int prog1(const void *ctx)
+{
+ prog1_called = true;
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(const void *ctx)
+{
+ prog2_called = true;
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
new file mode 100644
index 000000000000..c83142b55f47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+int my_tid;
+
+__u64 kprobe_res;
+__u64 kprobe_multi_res;
+__u64 kretprobe_res;
+__u64 uprobe_res;
+__u64 uretprobe_res;
+__u64 tp_res;
+__u64 pe_res;
+__u64 raw_tp_res;
+__u64 tp_btf_res;
+__u64 fentry_res;
+__u64 fexit_res;
+__u64 fmod_ret_res;
+__u64 lsm_res;
+
+static void update(void *ctx, __u64 *res)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid())
+ return;
+
+ *res |= bpf_get_attach_cookie(ctx);
+}
+
+SEC("kprobe")
+int handle_kprobe(struct pt_regs *ctx)
+{
+ update(ctx, &kprobe_res);
+ return 0;
+}
+
+SEC("kretprobe")
+int handle_kretprobe(struct pt_regs *ctx)
+{
+ update(ctx, &kretprobe_res);
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ update(ctx, &uprobe_res);
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe(struct pt_regs *ctx)
+{
+ update(ctx, &uretprobe_res);
+ return 0;
+}
+
+/* bpf_prog_array, used by kernel internally to keep track of attached BPF
+ * programs to a given BPF hook (e.g., for tracepoints) doesn't allow the same
+ * BPF program to be attached multiple times. So have three identical copies
+ * ready to attach to the same tracepoint.
+ */
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp1(struct pt_regs *ctx)
+{
+ update(ctx, &tp_res);
+ return 0;
+}
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp2(struct pt_regs *ctx)
+{
+ update(ctx, &tp_res);
+ return 0;
+}
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle_tp3(void *ctx)
+{
+ update(ctx, &tp_res);
+ return 1;
+}
+
+SEC("perf_event")
+int handle_pe(struct pt_regs *ctx)
+{
+ update(ctx, &pe_res);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_raw_tp(void *ctx)
+{
+ update(ctx, &raw_tp_res);
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int handle_tp_btf(void *ctx)
+{
+ update(ctx, &tp_btf_res);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_test1, int a)
+{
+ update(ctx, &fentry_res);
+ return 0;
+}
+
+SEC("fexit/bpf_fentry_test1")
+int BPF_PROG(fexit_test1, int a, int ret)
+{
+ update(ctx, &fexit_res);
+ return 0;
+}
+
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
+{
+ update(ctx, &fmod_ret_res);
+ return 1234;
+}
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid())
+ return ret;
+ update(ctx, &lsm_res);
+ return -EPERM;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
new file mode 100644
index 000000000000..4a4e0b8d9b72
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct generic_map_value {
+ void *data;
+};
+
+char _license[] SEC("license") = "GPL";
+
+const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
+const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
+const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
+
+int err = 0;
+u32 pid = 0;
+
+#define DEFINE_ARRAY_WITH_KPTR(_size) \
+ struct bin_data_##_size { \
+ char data[_size - sizeof(void *)]; \
+ }; \
+ /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \
+ struct bin_data_##_size *__bin_data_##_size; \
+ struct map_value_##_size { \
+ struct bin_data_##_size __kptr * data; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_##_size); \
+ __uint(max_entries, 128); \
+ } array_##_size SEC(".maps")
+
+#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
+ struct percpu_bin_data_##_size { \
+ char data[_size]; \
+ }; \
+ struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
+ struct map_value_percpu_##_size { \
+ struct percpu_bin_data_##_size __percpu_kptr * data; \
+ }; \
+ struct { \
+ __uint(type, BPF_MAP_TYPE_ARRAY); \
+ __type(key, int); \
+ __type(value, struct map_value_percpu_##_size); \
+ __uint(max_entries, 128); \
+ } array_percpu_##_size SEC(".maps")
+
+static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
+ if (!new) {
+ err = 2;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_obj_drop(old);
+ err = 3;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 4;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old) {
+ err = 5;
+ return;
+ }
+ bpf_obj_drop(old);
+ }
+}
+
+static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old, *new;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 1;
+ return;
+ }
+ /* per-cpu allocator may not be able to refill in time */
+ new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
+ if (!new)
+ continue;
+
+ old = bpf_kptr_xchg(&value->data, new);
+ if (old) {
+ bpf_percpu_obj_drop(old);
+ err = 2;
+ return;
+ }
+ }
+}
+
+static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
+ unsigned int idx)
+{
+ struct generic_map_value *value;
+ unsigned int i, key;
+ void *old;
+
+ for (i = 0; i < batch; i++) {
+ key = i;
+ value = bpf_map_lookup_elem(map, &key);
+ if (!value) {
+ err = 3;
+ return;
+ }
+ old = bpf_kptr_xchg(&value->data, NULL);
+ if (!old)
+ continue;
+ bpf_percpu_obj_drop(old);
+ }
+}
+
+#define CALL_BATCH_ALLOC(size, batch, idx) \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
+
+#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
+ batch_free((struct bpf_map *)(&array_##size), batch, idx); \
+ } while (0)
+
+#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
+
+#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
+ do { \
+ batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
+ } while (0)
+
+/* kptr doesn't support bin_data_8 which is a zero-sized array */
+DEFINE_ARRAY_WITH_KPTR(16);
+DEFINE_ARRAY_WITH_KPTR(32);
+DEFINE_ARRAY_WITH_KPTR(64);
+DEFINE_ARRAY_WITH_KPTR(96);
+DEFINE_ARRAY_WITH_KPTR(128);
+DEFINE_ARRAY_WITH_KPTR(192);
+DEFINE_ARRAY_WITH_KPTR(256);
+DEFINE_ARRAY_WITH_KPTR(512);
+DEFINE_ARRAY_WITH_KPTR(1024);
+DEFINE_ARRAY_WITH_KPTR(2048);
+DEFINE_ARRAY_WITH_KPTR(4096);
+
+DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
+DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes objects in batch to trigger refilling,
+ * then free 128 16-bytes objects in batch to trigger freeing.
+ */
+ CALL_BATCH_ALLOC_FREE(16, 128, 0);
+ CALL_BATCH_ALLOC_FREE(32, 128, 1);
+ CALL_BATCH_ALLOC_FREE(64, 128, 2);
+ CALL_BATCH_ALLOC_FREE(96, 128, 3);
+ CALL_BATCH_ALLOC_FREE(128, 128, 4);
+ CALL_BATCH_ALLOC_FREE(192, 128, 5);
+ CALL_BATCH_ALLOC_FREE(256, 128, 6);
+ CALL_BATCH_ALLOC_FREE(512, 64, 7);
+ CALL_BATCH_ALLOC_FREE(1024, 32, 8);
+ CALL_BATCH_ALLOC_FREE(2048, 16, 9);
+ CALL_BATCH_ALLOC_FREE(4096, 8, 10);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 16-bytes objects in batch to trigger refilling,
+ * then free these objects through map free.
+ */
+ CALL_BATCH_ALLOC(16, 128, 0);
+ CALL_BATCH_ALLOC(32, 128, 1);
+ CALL_BATCH_ALLOC(64, 128, 2);
+ CALL_BATCH_ALLOC(96, 128, 3);
+ CALL_BATCH_ALLOC(128, 128, 4);
+ CALL_BATCH_ALLOC(192, 128, 5);
+ CALL_BATCH_ALLOC(256, 128, 6);
+ CALL_BATCH_ALLOC(512, 64, 7);
+ CALL_BATCH_ALLOC(1024, 32, 8);
+ CALL_BATCH_ALLOC(2048, 16, 9);
+ CALL_BATCH_ALLOC(4096, 8, 10);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_batch_percpu_alloc_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
+ * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
+ */
+ CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
+ CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
+
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int test_percpu_free_through_map_free(void *ctx)
+{
+ if ((u32)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
+ * then free these object through map free.
+ */
+ CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
+ CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
+ CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
+ CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
+ CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
+ CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
+ CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
+ CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
+ CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
new file mode 100644
index 000000000000..f7b330ddd007
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define EAFNOSUPPORT 97
+#define EPROTO 71
+#define ENONET 64
+#define EINVAL 22
+#define ENOENT 2
+
+#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
+
+extern unsigned long CONFIG_HZ __kconfig;
+
+int test_einval_bpf_tuple = 0;
+int test_einval_reserved = 0;
+int test_einval_reserved_new = 0;
+int test_einval_netns_id = 0;
+int test_einval_len_opts = 0;
+int test_eproto_l4proto = 0;
+int test_enonet_netns_id = 0;
+int test_enoent_lookup = 0;
+int test_eafnosupport = 0;
+int test_alloc_entry = -EINVAL;
+int test_insert_entry = -EAFNOSUPPORT;
+int test_succ_lookup = -ENOENT;
+int test_ct_zone_id_alloc_entry = -EINVAL;
+int test_ct_zone_id_insert_entry = -EAFNOSUPPORT;
+int test_ct_zone_id_succ_lookup = -ENOENT;
+int test_ct_zone_dir_enoent_lookup = 0;
+int test_ct_zone_id_enoent_lookup = 0;
+u32 test_delta_timeout = 0;
+u32 test_status = 0;
+u32 test_insert_lookup_mark = 0;
+int test_snat_addr = -EINVAL;
+int test_dnat_addr = -EINVAL;
+__be32 saddr = 0;
+__be16 sport = 0;
+__be32 daddr = 0;
+__be16 dport = 0;
+int test_exist_lookup = -ENOENT;
+u32 test_exist_lookup_mark = 0;
+
+enum nf_nat_manip_type___local {
+ NF_NAT_MANIP_SRC___local,
+ NF_NAT_MANIP_DST___local
+};
+
+struct nf_conn;
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+};
+
+struct bpf_ct_opts___new {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u16 ct_zone_id;
+ u8 ct_zone_dir;
+ u8 reserved[3];
+} __attribute__((preserve_access_index));
+
+struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
+void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
+ int port, enum nf_nat_manip_type___local) __ksym;
+
+static __always_inline void
+nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_bpf_tuple = opts_def.error;
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ opts_def.l4proto = IPPROTO_TCP;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved = opts_def.error;
+
+ opts_def.netns_id = -2;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.netns_id = -1;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_netns_id = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def) - 1);
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_len_opts = opts_def.error;
+
+ opts_def.l4proto = IPPROTO_ICMP;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.l4proto = IPPROTO_TCP;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_eproto_l4proto = opts_def.error;
+
+ opts_def.netns_id = 0xf00f;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.netns_id = -1;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_enonet_netns_id = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_enoent_lookup = opts_def.error;
+
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
+ sizeof(opts_def));
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_eafnosupport = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+ ct->mark = 77;
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ struct nf_conntrack_tuple *tuple;
+
+ /* check snat and dnat addresses */
+ tuple = &ct_lk->tuplehash[IP_CT_DIR_REPLY].tuple;
+ if (tuple->dst.u3.ip == saddr.ip &&
+ tuple->dst.u.all == bpf_htons(sport))
+ test_snat_addr = 0;
+ if (tuple->src.u3.ip == daddr.ip &&
+ tuple->src.u.all == bpf_htons(dport))
+ test_dnat_addr = 0;
+
+ /* update ct entry timeout */
+ bpf_ct_change_timeout(ct_lk, 10000);
+ test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
+ test_delta_timeout /= CONFIG_HZ;
+ test_insert_lookup_mark = ct_lk->mark;
+ bpf_ct_change_status(ct_lk,
+ IPS_CONFIRMED | IPS_SEEN_REPLY);
+ test_status = ct_lk->status;
+
+ bpf_ct_release(ct_lk);
+ test_succ_lookup = 0;
+ }
+ bpf_ct_release(ct_ins);
+ test_insert_entry = 0;
+ }
+ test_alloc_entry = 0;
+ }
+
+ bpf_tuple.ipv4.saddr = saddr;
+ bpf_tuple.ipv4.daddr = daddr;
+ bpf_tuple.ipv4.sport = sport;
+ bpf_tuple.ipv4.dport = dport;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ test_exist_lookup = 0;
+ if (ct->mark == 42) {
+ ct->mark++;
+ test_exist_lookup_mark = ct->mark;
+ }
+ bpf_ct_release(ct);
+ } else {
+ test_exist_lookup = opts_def.error;
+ }
+}
+
+static __always_inline void
+nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___new *, u32),
+ void *ctx)
+{
+ struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
+ struct bpf_sock_tuple bpf_tuple;
+ struct nf_conn *ct;
+
+ __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
+
+ opts_def.reserved[0] = 1;
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ opts_def.reserved[0] = 0;
+ if (ct)
+ bpf_ct_release(ct);
+ else
+ test_einval_reserved_new = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ /* use non-default ct zone */
+ opts_def.ct_zone_id = 10;
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ __u16 sport = bpf_get_prandom_u32();
+ __u16 dport = bpf_get_prandom_u32();
+ union nf_inet_addr saddr = {};
+ union nf_inet_addr daddr = {};
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+
+ /* snat */
+ saddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ /* dnat */
+ daddr.ip = bpf_get_prandom_u32();
+ bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ /* entry should exist in same ct zone we inserted it */
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ bpf_ct_release(ct_lk);
+ test_ct_zone_id_succ_lookup = 0;
+ }
+
+ /* entry should not exist with wrong direction */
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG;
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_dir_enoent_lookup = opts_def.error;
+
+ /* entry should not exist in default ct zone */
+ opts_def.ct_zone_id = 0;
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk)
+ bpf_ct_release(ct_lk);
+ else
+ test_ct_zone_id_enoent_lookup = opts_def.error;
+
+ bpf_ct_release(ct_ins);
+ test_ct_zone_id_insert_entry = 0;
+ }
+ test_ct_zone_id_alloc_entry = 0;
+ }
+}
+
+SEC("xdp")
+int nf_xdp_ct_test(struct xdp_md *ctx)
+{
+ nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
+ return 0;
+}
+
+SEC("tc")
+int nf_skb_ct_test(struct __sk_buff *ctx)
+{
+ nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
new file mode 100644
index 000000000000..a586f087ffeb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_KFUNC_PROTOTYPES
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct nf_conn;
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 reserved[3];
+} __attribute__((preserve_access_index));
+
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
+void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+
+SEC("?tc")
+int alloc_release(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_release(ct);
+ return 0;
+}
+
+SEC("?tc")
+int insert_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int lookup_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int write_not_allowlisted_field(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct->status = 0xF00;
+ return 0;
+}
+
+SEC("?tc")
+int set_timeout_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int set_status_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_status(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_timeout_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_status_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_status(ct, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c b/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c
new file mode 100644
index 000000000000..c88ccc53529a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_decl_tag.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if __has_attribute(btf_decl_tag)
+#define __tag1 __attribute__((btf_decl_tag("tag1")))
+#define __tag2 __attribute__((btf_decl_tag("tag2")))
+volatile const bool skip_tests __tag1 __tag2 = false;
+#else
+#define __tag1
+#define __tag2
+volatile const bool skip_tests = true;
+#endif
+
+struct key_t {
+ int a;
+ int b __tag1 __tag2;
+ int c;
+} __tag1 __tag2;
+
+typedef struct {
+ int a;
+ int b;
+} value_t __tag1 __tag2;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 3);
+ __type(key, struct key_t);
+ __type(value, value_t);
+} hashmap1 SEC(".maps");
+
+
+static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2
+{
+ struct key_t key;
+ value_t val = {};
+
+ key.a = key.b = key.c = x;
+ bpf_map_update_elem(&hashmap1, &key, &val, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+ return foo(x);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_ext.c b/tools/testing/selftests/bpf/progs/test_btf_ext.c
new file mode 100644
index 000000000000..cdf20331db04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_ext.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Meta Platforms Inc. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__noinline static void f0(void)
+{
+ __u64 a = 1;
+
+ __sink(a);
+}
+
+SEC("xdp")
+__u64 global_func(struct xdp_md *xdp)
+{
+ f0();
+ return XDP_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
deleted file mode 100644
index 31538c9ed193..000000000000
--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_legacy.h"
-
-int _version SEC("version") = 1;
-
-struct ipv_counts {
- unsigned int v4;
- unsigned int v6;
-};
-
-struct bpf_map_def SEC("maps") btf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct ipv_counts),
- .max_entries = 4,
-};
-
-BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
-
-__attribute__((noinline))
-int test_long_fname_2(void)
-{
- struct ipv_counts *counts;
- int key = 0;
-
- counts = bpf_map_lookup_elem(&btf_map, &key);
- if (!counts)
- return 0;
-
- counts->v6++;
-
- return 0;
-}
-
-__attribute__((noinline))
-int test_long_fname_1(void)
-{
- return test_long_fname_2();
-}
-
-SEC("dummy_tracepoint")
-int _dummy_tracepoint(void *arg)
-{
- return test_long_fname_1();
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
index c1e0c8c7c55f..c218cf8989a9 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
@@ -21,8 +21,8 @@ struct inner_map_sz2 {
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
/* it's possible to use anonymous struct as inner map definition here */
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -61,8 +61,8 @@ struct inner_map_sz4 {
struct outer_arr_dyn {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(map_flags, BPF_F_INNER_MAP);
@@ -81,7 +81,7 @@ struct outer_arr_dyn {
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
- __uint(key_size, sizeof(int));
+ __type(key, int);
/* Here everything works flawlessly due to reuse of struct inner_map
* and compiler will complain at the attempt to use non-inner_map
* references below. This is great experience.
@@ -111,8 +111,8 @@ struct sockarr_sz2 {
struct outer_sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__array(values, struct sockarr_sz1);
} outer_sockarr SEC(".maps") = {
.values = { (void *)&sockarr_sz1 },
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
index 6c5560162746..251854a041b5 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -4,23 +4,11 @@
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
-int _version SEC("version") = 1;
-
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
-/* just to validate we can handle maps in multiple sections */
-struct bpf_map_def SEC("maps") btf_map_legacy = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(long long),
- .max_entries = 4,
-};
-
-BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
@@ -40,11 +28,6 @@ int test_long_fname_2(void)
counts->v6++;
- /* just verify we can reference both maps */
- counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
- if (!counts)
- return 0;
-
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 506da7fd2da2..1dabb88f8cb4 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
@@ -3,19 +3,17 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
-struct bpf_map_def SEC("maps") btf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct ipv_counts),
- .max_entries = 4,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct ipv_counts));
+ __uint(max_entries, 4);
+} btf_map SEC(".maps");
__attribute__((noinline))
int test_long_fname_2(void)
diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
index 9a6b85dd52d2..1cd1a1b72cb5 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
@@ -1,31 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#include <string.h>
-#include <errno.h>
-#include <netinet/in.h>
-#include <linux/stddef.h>
-#include <linux/bpf.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/if_ether.h>
-#include <linux/pkt_cls.h>
-
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_tcp_helpers.h"
+
+#ifndef ENOENT
+#define ENOENT 2
+#endif
struct sockaddr_in6 srv_sa6 = {};
+struct sockaddr_in srv_sa4 = {};
__u16 listen_tp_sport = 0;
__u16 req_sk_sport = 0;
__u32 recv_cookie = 0;
__u32 gen_cookie = 0;
+__u32 mss = 0;
__u32 linum = 0;
#define LOG() ({ if (!linum) linum = __LINE__; })
-static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
- struct tcp_sock *tp,
+static void test_syncookie_helper(void *iphdr, int iphdr_size,
+ struct tcphdr *th, struct tcp_sock *tp,
struct __sk_buff *skb)
{
if (th->syn) {
@@ -44,17 +40,18 @@ static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
return;
}
- mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
+ mss_cookie = bpf_tcp_gen_syncookie(tp, iphdr, iphdr_size,
th, 40);
if (mss_cookie < 0) {
if (mss_cookie != -ENOENT)
LOG();
} else {
gen_cookie = (__u32)mss_cookie;
+ mss = mss_cookie >> 32;
}
} else if (gen_cookie) {
/* It was in cookie mode */
- int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
+ int ret = bpf_tcp_check_syncookie(tp, iphdr, iphdr_size,
th, sizeof(*th));
if (ret < 0) {
@@ -66,26 +63,58 @@ static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
}
}
-static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
+static int handle_ip_tcp(struct ethhdr *eth, struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple;
+ struct bpf_sock_tuple *tuple = NULL;
+ unsigned int tuple_len = 0;
struct bpf_sock *bpf_skc;
- unsigned int tuple_len;
+ void *data_end, *iphdr;
+ struct ipv6hdr *ip6h;
+ struct iphdr *ip4h;
struct tcphdr *th;
- void *data_end;
+ int iphdr_size;
data_end = (void *)(long)(skb->data_end);
- th = (struct tcphdr *)(ip6h + 1);
- if (th + 1 > data_end)
- return TC_ACT_OK;
-
- /* Is it the testing traffic? */
- if (th->dest != srv_sa6.sin6_port)
+ switch (eth->h_proto) {
+ case bpf_htons(ETH_P_IP):
+ ip4h = (struct iphdr *)(eth + 1);
+ if (ip4h + 1 > data_end)
+ return TC_ACT_OK;
+ if (ip4h->protocol != IPPROTO_TCP)
+ return TC_ACT_OK;
+ th = (struct tcphdr *)(ip4h + 1);
+ if (th + 1 > data_end)
+ return TC_ACT_OK;
+ /* Is it the testing traffic? */
+ if (th->dest != srv_sa4.sin_port)
+ return TC_ACT_OK;
+ tuple_len = sizeof(tuple->ipv4);
+ tuple = (struct bpf_sock_tuple *)&ip4h->saddr;
+ iphdr = ip4h;
+ iphdr_size = sizeof(*ip4h);
+ break;
+ case bpf_htons(ETH_P_IPV6):
+ ip6h = (struct ipv6hdr *)(eth + 1);
+ if (ip6h + 1 > data_end)
+ return TC_ACT_OK;
+ if (ip6h->nexthdr != IPPROTO_TCP)
+ return TC_ACT_OK;
+ th = (struct tcphdr *)(ip6h + 1);
+ if (th + 1 > data_end)
+ return TC_ACT_OK;
+ /* Is it the testing traffic? */
+ if (th->dest != srv_sa6.sin6_port)
+ return TC_ACT_OK;
+ tuple_len = sizeof(tuple->ipv6);
+ tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
+ iphdr = ip6h;
+ iphdr_size = sizeof(*ip6h);
+ break;
+ default:
return TC_ACT_OK;
+ }
- tuple_len = sizeof(tuple->ipv6);
- tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
if ((void *)tuple + tuple_len > data_end) {
LOG();
return TC_ACT_OK;
@@ -132,7 +161,7 @@ static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
- test_syncookie_helper(ip6h, th, tp, skb);
+ test_syncookie_helper(iphdr, iphdr_size, th, tp, skb);
bpf_sk_release(tp);
return TC_ACT_OK;
}
@@ -145,10 +174,9 @@ release:
return TC_ACT_OK;
}
-SEC("classifier/ingress")
+SEC("tc")
int cls_ingress(struct __sk_buff *skb)
{
- struct ipv6hdr *ip6h;
struct ethhdr *eth;
void *data_end;
@@ -158,17 +186,11 @@ int cls_ingress(struct __sk_buff *skb)
if (eth + 1 > data_end)
return TC_ACT_OK;
- if (eth->h_proto != bpf_htons(ETH_P_IPV6))
- return TC_ACT_OK;
-
- ip6h = (struct ipv6hdr *)(eth + 1);
- if (ip6h + 1 > data_end)
+ if (eth->h_proto != bpf_htons(ETH_P_IP) &&
+ eth->h_proto != bpf_htons(ETH_P_IPV6))
return TC_ACT_OK;
- if (ip6h->nexthdr == IPPROTO_TCP)
- return handle_ip6_tcp(ip6h, skb);
-
- return TC_ACT_OK;
+ return handle_ip_tcp(eth, skb);
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_build_id.c b/tools/testing/selftests/bpf/progs/test_build_id.c
new file mode 100644
index 000000000000..32ce59f9aa27
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_build_id.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct bpf_stack_build_id stack_sleepable[128];
+int res_sleepable;
+
+struct bpf_stack_build_id stack_nofault[128];
+int res_nofault;
+
+SEC("uprobe.multi/./uprobe_multi:uprobe")
+int uprobe_nofault(struct pt_regs *ctx)
+{
+ res_nofault = bpf_get_stack(ctx, stack_nofault, sizeof(stack_nofault),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+SEC("uprobe.multi.s/./uprobe_multi:uprobe")
+int uprobe_sleepable(struct pt_regs *ctx)
+{
+ res_sleepable = bpf_get_stack(ctx, stack_sleepable, sizeof(stack_sleepable),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
new file mode 100644
index 000000000000..4fee0fdc7607
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cgroup1_hierarchy.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+__u32 target_ancestor_level;
+__u64 target_ancestor_cgid;
+int target_pid, target_hid;
+
+struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+
+static int bpf_link_create_verify(int cmd)
+{
+ struct cgroup *cgrp, *ancestor;
+ struct task_struct *task;
+ int ret = 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+
+ /* Then it can run in parallel with others */
+ if (task->pid != target_pid)
+ return 0;
+
+ cgrp = bpf_task_get_cgroup1(task, target_hid);
+ if (!cgrp)
+ return 0;
+
+ /* Refuse it if its cgid or its ancestor's cgid is the target cgid */
+ if (cgrp->kn->id == target_ancestor_cgid)
+ ret = -1;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, target_ancestor_level);
+ if (!ancestor)
+ goto out;
+
+ if (ancestor->kn->id == target_ancestor_cgid)
+ ret = -1;
+ bpf_cgroup_release(ancestor);
+
+out:
+ bpf_cgroup_release(cgrp);
+ return ret;
+}
+
+SEC("lsm/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ return bpf_link_create_verify(cmd);
+}
+
+SEC("fentry")
+int BPF_PROG(fentry_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
index 77e47b9e4446..4faba88e45a5 100644
--- a/tools/testing/selftests/bpf/progs/test_cgroup_link.c
+++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
@@ -6,14 +6,14 @@
int calls = 0;
int alt_calls = 0;
-SEC("cgroup_skb/egress1")
+SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
__sync_fetch_and_add(&calls, 1);
return 1;
}
-SEC("cgroup_skb/egress2")
+SEC("cgroup_skb/egress")
int egress_alt(struct __sk_buff *skb)
{
__sync_fetch_and_add(&alt_calls, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
new file mode 100644
index 000000000000..7b6b2b342c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Jesper Dangaard Brouer */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Userspace will update with MTU it can see on device */
+volatile const int GLOBAL_USER_MTU;
+volatile const __u32 GLOBAL_USER_IFINDEX;
+
+/* BPF-prog will update these with MTU values it can see */
+__u32 global_bpf_mtu_xdp = 0;
+__u32 global_bpf_mtu_tc = 0;
+
+SEC("xdp")
+int xdp_use_helper_basic(struct xdp_md *ctx)
+{
+ __u32 mtu_len = 0;
+
+ if (bpf_check_mtu(ctx, 0, &mtu_len, 0, 0))
+ return XDP_ABORTED;
+
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_use_helper(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ __u32 mtu_len = 0;
+ __u32 ifindex = 0;
+ int delta = 0;
+
+ /* When ifindex is zero, save net_device lookup and use ctx netdev */
+ if (GLOBAL_USER_IFINDEX > 0)
+ ifindex = GLOBAL_USER_IFINDEX;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0)) {
+ /* mtu_len is also valid when check fail */
+ retval = XDP_ABORTED;
+ goto out;
+ }
+
+ if (mtu_len != GLOBAL_USER_MTU)
+ retval = XDP_DROP;
+
+out:
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_exceed_mtu(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ int retval = XDP_ABORTED; /* Fail */
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = XDP_PASS; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = XDP_DROP;
+ }
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_minus_delta(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ __u32 mtu_len = 0;
+ int delta;
+
+ /* Borderline test case: Minus delta exceeding packet length allowed */
+ delta = -((data_len - ETH_HLEN) + 1);
+
+ /* Minus length (adjusted via delta) still pass MTU check, other helpers
+ * are responsible for catching this, when doing actual size adjust
+ */
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
+ retval = XDP_ABORTED;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_input_len(struct xdp_md *ctx)
+{
+ int retval = XDP_PASS; /* Expected retval on successful test */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input len is L3, like MTU and iph->tot_len.
+ * Remember XDP data_len is L2.
+ */
+ __u32 mtu_len = data_len - ETH_HLEN;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+ retval = XDP_ABORTED;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("xdp")
+int xdp_input_len_exceed(struct xdp_md *ctx)
+{
+ int retval = XDP_ABORTED; /* Fail */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int err;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size like MTU.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ mtu_len += 1; /* Exceed with 1 */
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+ if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = XDP_PASS ; /* Success in exceeding MTU check */
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_use_helper(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 mtu_len = 0;
+ int delta = 0;
+
+ if (bpf_check_mtu(ctx, 0, &mtu_len, delta, 0)) {
+ retval = BPF_DROP;
+ goto out;
+ }
+
+ if (mtu_len != GLOBAL_USER_MTU)
+ retval = BPF_REDIRECT;
+out:
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_exceed_mtu(struct __sk_buff *ctx)
+{
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int retval = BPF_DROP; /* Fail */
+ __u32 skb_len = ctx->len;
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (skb_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = BPF_OK; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_DROP;
+ }
+
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_exceed_mtu_da(struct __sk_buff *ctx)
+{
+ /* SKB Direct-Access variant */
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 data_len = data_end - data;
+ int retval = BPF_DROP; /* Fail */
+ __u32 mtu_len = 0;
+ int delta;
+ int err;
+
+ /* Exceed MTU with 1 via delta adjust */
+ delta = GLOBAL_USER_MTU - (data_len - ETH_HLEN) + 1;
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0);
+ if (err) {
+ retval = BPF_OK; /* Success in exceeding MTU check */
+ if (err != BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_DROP;
+ }
+
+ global_bpf_mtu_tc = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_minus_delta(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ __u32 skb_len = ctx->len;
+ __u32 mtu_len = 0;
+ int delta;
+
+ /* Borderline test case: Minus delta exceeding packet length allowed */
+ delta = -((skb_len - ETH_HLEN) + 1);
+
+ /* Minus length (adjusted via delta) still pass MTU check, other helpers
+ * are responsible for catching this, when doing actual size adjust
+ */
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0))
+ retval = BPF_DROP;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_input_len(struct __sk_buff *ctx)
+{
+ int retval = BPF_OK; /* Expected retval on successful test */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+ retval = BPF_DROP;
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_input_len_exceed(struct __sk_buff *ctx)
+{
+ int retval = BPF_DROP; /* Fail */
+ __u32 ifindex = GLOBAL_USER_IFINDEX;
+ int err;
+
+ /* API allow user give length to check as input via mtu_len param,
+ * resulting MTU value is still output in mtu_len param after call.
+ *
+ * Input length value is L3 size like MTU.
+ */
+ __u32 mtu_len = GLOBAL_USER_MTU;
+
+ mtu_len += 1; /* Exceed with 1 */
+
+ err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+ if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+ retval = BPF_OK; /* Success in exceeding MTU check */
+
+ global_bpf_mtu_xdp = mtu_len;
+ return retval;
+}
+
+SEC("tc")
+int tc_chk_segs_flag(struct __sk_buff *ctx)
+{
+ __u32 mtu_len = 0;
+ int err;
+
+ err = bpf_check_mtu(ctx, GLOBAL_USER_IFINDEX, &mtu_len, 0, BPF_MTU_CHK_SEGS);
+
+ return err == -EINVAL ? BPF_OK : BPF_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index c9f8464996ea..26a53e54b8fa 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -15,12 +15,16 @@
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "test_cls_redirect.h"
+#include "bpf_misc.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
#ifdef SUBPROGS
#define INLINING __noinline
@@ -28,9 +32,6 @@
#define INLINING __always_inline
#endif
-#define offsetofend(TYPE, MEMBER) \
- (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
-
#define IP_OFFSET_MASK (0x1FFF)
#define IP_MF (0x2000)
@@ -39,8 +40,8 @@ char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
-static volatile const __be16 ENCAPSULATION_PORT;
-static volatile const __be32 ENCAPSULATION_IP;
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;
@@ -70,6 +71,7 @@ typedef struct {
uint64_t errors_total_encap_adjust_failed;
uint64_t errors_total_encap_buffer_too_small;
uint64_t errors_total_redirect_loop;
+ uint64_t errors_total_encap_mtu_violate;
} metrics_t;
typedef enum {
@@ -125,7 +127,7 @@ typedef uint8_t *net_ptr __attribute__((align_value(8)));
typedef struct buf {
struct __sk_buff *skb;
net_ptr head;
- /* NB: tail musn't have alignment other than 1, otherwise
+ /* NB: tail mustn't have alignment other than 1, otherwise
* LLVM will go and eliminate code, e.g. when checking packet lengths.
*/
uint8_t *const tail;
@@ -266,7 +268,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) {
acc += ipw[i];
}
@@ -293,7 +295,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
};
*is_fragment = false;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
@@ -407,6 +409,7 @@ static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *e
payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
uint16_t proto = ETH_P_IP;
+ uint32_t mtu_len = 0;
/* Loop protection: the inner packet's TTL is decremented as a safeguard
* against any forwarding loop. As the only interesting field is the TTL
@@ -479,6 +482,11 @@ static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *e
}
}
+ if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
+ metrics->errors_total_encap_mtu_violate++;
+ return TC_ACT_SHOT;
+ }
+
if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
@@ -593,7 +601,7 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
return TC_ACT_SHOT;
}
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(pkt, encap->unigue.hop_count -
encap->unigue.next_hop - 1);
}
@@ -603,8 +611,8 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
- * clang will substitue a costant for sizeof, which allows the verifier
- * to track it's value. Based on this, it can figure out the constant
+ * clang will substitute a constant for sizeof, which allows the verifier
+ * to track its value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
@@ -921,7 +929,7 @@ static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
}
}
-SEC("classifier/cls_redirect")
+SEC("tc")
int cls_redirect(struct __sk_buff *skb)
{
metrics_t *metrics = get_global_metrics();
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
index 76eab0aacba0..eb55cb8a3dbd 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
@@ -10,7 +10,16 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/udp.h>
+#include <netinet/udp.h>
+
+/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE
+ * friendly version breaks compilation for older clang versions <= 15
+ * when invoked in a static assert. Restore original here.
+ */
+#ifdef offsetof
+#undef offsetof
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#endif
struct gre_base_hdr {
uint16_t flags;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
new file mode 100644
index 000000000000..dfd4a2710391
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2019, 2020 Cloudflare
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <linux/bpf.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/tcp.h>
+#include <netinet/udp.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#include "test_cls_redirect.h"
+#include "bpf_kfuncs.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
+
+#define IP_OFFSET_MASK (0x1FFF)
+#define IP_MF (0x2000)
+
+char _license[] SEC("license") = "Dual BSD/GPL";
+
+/**
+ * Destination port and IP used for UDP encapsulation.
+ */
+volatile const __be16 ENCAPSULATION_PORT;
+volatile const __be32 ENCAPSULATION_IP;
+
+typedef struct {
+ uint64_t processed_packets_total;
+ uint64_t l3_protocol_packets_total_ipv4;
+ uint64_t l3_protocol_packets_total_ipv6;
+ uint64_t l4_protocol_packets_total_tcp;
+ uint64_t l4_protocol_packets_total_udp;
+ uint64_t accepted_packets_total_syn;
+ uint64_t accepted_packets_total_syn_cookies;
+ uint64_t accepted_packets_total_last_hop;
+ uint64_t accepted_packets_total_icmp_echo_request;
+ uint64_t accepted_packets_total_established;
+ uint64_t forwarded_packets_total_gue;
+ uint64_t forwarded_packets_total_gre;
+
+ uint64_t errors_total_unknown_l3_proto;
+ uint64_t errors_total_unknown_l4_proto;
+ uint64_t errors_total_malformed_ip;
+ uint64_t errors_total_fragmented_ip;
+ uint64_t errors_total_malformed_icmp;
+ uint64_t errors_total_unwanted_icmp;
+ uint64_t errors_total_malformed_icmp_pkt_too_big;
+ uint64_t errors_total_malformed_tcp;
+ uint64_t errors_total_malformed_udp;
+ uint64_t errors_total_icmp_echo_replies;
+ uint64_t errors_total_malformed_encapsulation;
+ uint64_t errors_total_encap_adjust_failed;
+ uint64_t errors_total_encap_buffer_too_small;
+ uint64_t errors_total_redirect_loop;
+ uint64_t errors_total_encap_mtu_violate;
+} metrics_t;
+
+typedef enum {
+ INVALID = 0,
+ UNKNOWN,
+ ECHO_REQUEST,
+ SYN,
+ SYN_COOKIE,
+ ESTABLISHED,
+} verdict_t;
+
+typedef struct {
+ uint16_t src, dst;
+} flow_ports_t;
+
+_Static_assert(
+ sizeof(flow_ports_t) !=
+ offsetofend(struct bpf_sock_tuple, ipv4.dport) -
+ offsetof(struct bpf_sock_tuple, ipv4.sport) - 1,
+ "flow_ports_t must match sport and dport in struct bpf_sock_tuple");
+_Static_assert(
+ sizeof(flow_ports_t) !=
+ offsetofend(struct bpf_sock_tuple, ipv6.dport) -
+ offsetof(struct bpf_sock_tuple, ipv6.sport) - 1,
+ "flow_ports_t must match sport and dport in struct bpf_sock_tuple");
+
+struct iphdr_info {
+ void *hdr;
+ __u64 len;
+};
+
+typedef int ret_t;
+
+/* This is a bit of a hack. We need a return value which allows us to
+ * indicate that the regular flow of the program should continue,
+ * while allowing functions to use XDP_PASS and XDP_DROP, etc.
+ */
+static const ret_t CONTINUE_PROCESSING = -1;
+
+/* Convenience macro to call functions which return ret_t.
+ */
+#define MAYBE_RETURN(x) \
+ do { \
+ ret_t __ret = x; \
+ if (__ret != CONTINUE_PROCESSING) \
+ return __ret; \
+ } while (0)
+
+static bool ipv4_is_fragment(const struct iphdr *ip)
+{
+ uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
+ return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
+}
+
+static int pkt_parse_ipv4(struct bpf_dynptr *dynptr, __u64 *offset, struct iphdr *iphdr)
+{
+ if (bpf_dynptr_read(iphdr, sizeof(*iphdr), dynptr, *offset, 0))
+ return -1;
+
+ *offset += sizeof(*iphdr);
+
+ if (iphdr->ihl < 5)
+ return -1;
+
+ /* skip ipv4 options */
+ *offset += (iphdr->ihl - 5) * 4;
+
+ return 0;
+}
+
+/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
+static bool pkt_parse_icmp_l4_ports(struct bpf_dynptr *dynptr, __u64 *offset, flow_ports_t *ports)
+{
+ if (bpf_dynptr_read(ports, sizeof(*ports), dynptr, *offset, 0))
+ return false;
+
+ *offset += sizeof(*ports);
+
+ /* Ports in the L4 headers are reversed, since we are parsing an ICMP
+ * payload which is going towards the eyeball.
+ */
+ uint16_t dst = ports->src;
+ ports->src = ports->dst;
+ ports->dst = dst;
+ return true;
+}
+
+static uint16_t pkt_checksum_fold(uint32_t csum)
+{
+ /* The highest reasonable value for an IPv4 header
+ * checksum requires two folds, so we just do that always.
+ */
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ return (uint16_t)~csum;
+}
+
+static void pkt_ipv4_checksum(struct iphdr *iph)
+{
+ iph->check = 0;
+
+ /* An IP header without options is 20 bytes. Two of those
+ * are the checksum, which we always set to zero. Hence,
+ * the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7,
+ * which fits in 32 bit.
+ */
+ _Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes");
+ uint32_t acc = 0;
+ uint16_t *ipw = (uint16_t *)iph;
+
+ for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++)
+ acc += ipw[i];
+
+ iph->check = pkt_checksum_fold(acc);
+}
+
+static bool pkt_skip_ipv6_extension_headers(struct bpf_dynptr *dynptr, __u64 *offset,
+ const struct ipv6hdr *ipv6, uint8_t *upper_proto,
+ bool *is_fragment)
+{
+ /* We understand five extension headers.
+ * https://tools.ietf.org/html/rfc8200#section-4.1 states that all
+ * headers should occur once, except Destination Options, which may
+ * occur twice. Hence we give up after 6 headers.
+ */
+ struct {
+ uint8_t next;
+ uint8_t len;
+ } exthdr = {
+ .next = ipv6->nexthdr,
+ };
+ *is_fragment = false;
+
+ for (int i = 0; i < 6; i++) {
+ switch (exthdr.next) {
+ case IPPROTO_FRAGMENT:
+ *is_fragment = true;
+ /* NB: We don't check that hdrlen == 0 as per spec. */
+ /* fallthrough; */
+
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_MH:
+ if (bpf_dynptr_read(&exthdr, sizeof(exthdr), dynptr, *offset, 0))
+ return false;
+
+ /* hdrlen is in 8-octet units, and excludes the first 8 octets. */
+ *offset += (exthdr.len + 1) * 8;
+
+ /* Decode next header */
+ break;
+
+ default:
+ /* The next header is not one of the known extension
+ * headers, treat it as the upper layer header.
+ *
+ * This handles IPPROTO_NONE.
+ *
+ * Encapsulating Security Payload (50) and Authentication
+ * Header (51) also end up here (and will trigger an
+ * unknown proto error later). They have a custom header
+ * format and seem too esoteric to care about.
+ */
+ *upper_proto = exthdr.next;
+ return true;
+ }
+ }
+
+ /* We never found an upper layer header. */
+ return false;
+}
+
+static int pkt_parse_ipv6(struct bpf_dynptr *dynptr, __u64 *offset, struct ipv6hdr *ipv6,
+ uint8_t *proto, bool *is_fragment)
+{
+ if (bpf_dynptr_read(ipv6, sizeof(*ipv6), dynptr, *offset, 0))
+ return -1;
+
+ *offset += sizeof(*ipv6);
+
+ if (!pkt_skip_ipv6_extension_headers(dynptr, offset, ipv6, proto, is_fragment))
+ return -1;
+
+ return 0;
+}
+
+/* Global metrics, per CPU
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, unsigned int);
+ __type(value, metrics_t);
+} metrics_map SEC(".maps");
+
+static metrics_t *get_global_metrics(void)
+{
+ uint64_t key = 0;
+ return bpf_map_lookup_elem(&metrics_map, &key);
+}
+
+static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
+{
+ const int payload_off =
+ sizeof(*encap) +
+ sizeof(struct in_addr) * encap->unigue.hop_count;
+ int32_t encap_overhead = payload_off - sizeof(struct ethhdr);
+
+ /* Changing the ethertype if the encapsulated packet is ipv6 */
+ if (encap->gue.proto_ctype == IPPROTO_IPV6)
+ encap->eth.h_proto = bpf_htons(ETH_P_IPV6);
+
+ if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC,
+ BPF_F_ADJ_ROOM_FIXED_GSO |
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
+ bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC))
+ return TC_ACT_SHOT;
+
+ return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
+}
+
+static ret_t forward_with_gre(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ encap_headers_t *encap, struct in_addr *next_hop,
+ metrics_t *metrics)
+{
+ const int payload_off =
+ sizeof(*encap) +
+ sizeof(struct in_addr) * encap->unigue.hop_count;
+ int32_t encap_overhead =
+ payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr);
+ int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead;
+ __u8 encap_buffer[sizeof(encap_gre_t)] = {};
+ uint16_t proto = ETH_P_IP;
+ uint32_t mtu_len = 0;
+ encap_gre_t *encap_gre;
+
+ metrics->forwarded_packets_total_gre++;
+
+ /* Loop protection: the inner packet's TTL is decremented as a safeguard
+ * against any forwarding loop. As the only interesting field is the TTL
+ * hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes
+ * as they handle the split packets if needed (no need for the data to be
+ * in the linear section).
+ */
+ if (encap->gue.proto_ctype == IPPROTO_IPV6) {
+ proto = ETH_P_IPV6;
+ uint8_t ttl;
+ int rc;
+
+ rc = bpf_skb_load_bytes(
+ skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
+ &ttl, 1);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (ttl == 0) {
+ metrics->errors_total_redirect_loop++;
+ return TC_ACT_SHOT;
+ }
+
+ ttl--;
+ rc = bpf_skb_store_bytes(
+ skb, payload_off + offsetof(struct ipv6hdr, hop_limit),
+ &ttl, 1, 0);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+ } else {
+ uint8_t ttl;
+ int rc;
+
+ rc = bpf_skb_load_bytes(
+ skb, payload_off + offsetof(struct iphdr, ttl), &ttl,
+ 1);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (ttl == 0) {
+ metrics->errors_total_redirect_loop++;
+ return TC_ACT_SHOT;
+ }
+
+ /* IPv4 also has a checksum to patch. While the TTL is only one byte,
+ * this function only works for 2 and 4 bytes arguments (the result is
+ * the same).
+ */
+ rc = bpf_l3_csum_replace(
+ skb, payload_off + offsetof(struct iphdr, check), ttl,
+ ttl - 1, 2);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ ttl--;
+ rc = bpf_skb_store_bytes(
+ skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1,
+ 0);
+ if (rc != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+ }
+
+ if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) {
+ metrics->errors_total_encap_mtu_violate++;
+ return TC_ACT_SHOT;
+ }
+
+ if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
+ BPF_F_ADJ_ROOM_FIXED_GSO |
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
+ bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) {
+ metrics->errors_total_encap_adjust_failed++;
+ return TC_ACT_SHOT;
+ }
+
+ if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) {
+ metrics->errors_total_encap_buffer_too_small++;
+ return TC_ACT_SHOT;
+ }
+
+ encap_gre = bpf_dynptr_slice_rdwr(dynptr, 0, encap_buffer, sizeof(encap_buffer));
+ if (!encap_gre) {
+ metrics->errors_total_encap_buffer_too_small++;
+ return TC_ACT_SHOT;
+ }
+
+ encap_gre->ip.protocol = IPPROTO_GRE;
+ encap_gre->ip.daddr = next_hop->s_addr;
+ encap_gre->ip.saddr = ENCAPSULATION_IP;
+ encap_gre->ip.tot_len =
+ bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta);
+ encap_gre->gre.flags = 0;
+ encap_gre->gre.protocol = bpf_htons(proto);
+ pkt_ipv4_checksum((void *)&encap_gre->ip);
+
+ if (encap_gre == encap_buffer)
+ bpf_dynptr_write(dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
+
+ return bpf_redirect(skb->ifindex, 0);
+}
+
+static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ encap_headers_t *encap, struct in_addr *next_hop,
+ metrics_t *metrics)
+{
+ /* swap L2 addresses */
+ /* This assumes that packets are received from a router.
+ * So just swapping the MAC addresses here will make the packet go back to
+ * the router, which will send it to the appropriate machine.
+ */
+ unsigned char temp[ETH_ALEN];
+ memcpy(temp, encap->eth.h_dest, sizeof(temp));
+ memcpy(encap->eth.h_dest, encap->eth.h_source,
+ sizeof(encap->eth.h_dest));
+ memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source));
+
+ if (encap->unigue.next_hop == encap->unigue.hop_count - 1 &&
+ encap->unigue.last_hop_gre) {
+ return forward_with_gre(skb, dynptr, encap, next_hop, metrics);
+ }
+
+ metrics->forwarded_packets_total_gue++;
+ uint32_t old_saddr = encap->ip.saddr;
+ encap->ip.saddr = encap->ip.daddr;
+ encap->ip.daddr = next_hop->s_addr;
+ if (encap->unigue.next_hop < encap->unigue.hop_count) {
+ encap->unigue.next_hop++;
+ }
+
+ /* Remove ip->saddr, add next_hop->s_addr */
+ const uint64_t off = offsetof(typeof(*encap), ip.check);
+ int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4);
+ if (ret < 0) {
+ return TC_ACT_SHOT;
+ }
+
+ return bpf_redirect(skb->ifindex, 0);
+}
+
+static ret_t skip_next_hops(__u64 *offset, int n)
+{
+ switch (n) {
+ case 1:
+ *offset += sizeof(struct in_addr);
+ case 0:
+ return CONTINUE_PROCESSING;
+
+ default:
+ return TC_ACT_SHOT;
+ }
+}
+
+/* Get the next hop from the GLB header.
+ *
+ * Sets next_hop->s_addr to 0 if there are no more hops left.
+ * pkt is positioned just after the variable length GLB header
+ * iff the call is successful.
+ */
+static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_headers_t *encap,
+ struct in_addr *next_hop)
+{
+ if (encap->unigue.next_hop > encap->unigue.hop_count)
+ return TC_ACT_SHOT;
+
+ /* Skip "used" next hops. */
+ MAYBE_RETURN(skip_next_hops(offset, encap->unigue.next_hop));
+
+ if (encap->unigue.next_hop == encap->unigue.hop_count) {
+ /* No more next hops, we are at the end of the GLB header. */
+ next_hop->s_addr = 0;
+ return CONTINUE_PROCESSING;
+ }
+
+ if (bpf_dynptr_read(next_hop, sizeof(*next_hop), dynptr, *offset, 0))
+ return TC_ACT_SHOT;
+
+ *offset += sizeof(*next_hop);
+
+ /* Skip the remaining next hops (may be zero). */
+ return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
+}
+
+/* Fill a bpf_sock_tuple to be used with the socket lookup functions.
+ * This is a kludge that let's us work around verifier limitations:
+ *
+ * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
+ *
+ * clang will substitute a constant for sizeof, which allows the verifier
+ * to track it's value. Based on this, it can figure out the constant
+ * return value, and calling code works while still being "generic" to
+ * IPv4 and IPv6.
+ */
+static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
+ uint64_t iphlen, uint16_t sport, uint16_t dport)
+{
+ switch (iphlen) {
+ case sizeof(struct iphdr): {
+ struct iphdr *ipv4 = (struct iphdr *)iph;
+ tuple->ipv4.daddr = ipv4->daddr;
+ tuple->ipv4.saddr = ipv4->saddr;
+ tuple->ipv4.sport = sport;
+ tuple->ipv4.dport = dport;
+ return sizeof(tuple->ipv4);
+ }
+
+ case sizeof(struct ipv6hdr): {
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph;
+ memcpy(&tuple->ipv6.daddr, &ipv6->daddr,
+ sizeof(tuple->ipv6.daddr));
+ memcpy(&tuple->ipv6.saddr, &ipv6->saddr,
+ sizeof(tuple->ipv6.saddr));
+ tuple->ipv6.sport = sport;
+ tuple->ipv6.dport = dport;
+ return sizeof(tuple->ipv6);
+ }
+
+ default:
+ return 0;
+ }
+}
+
+static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple,
+ uint64_t tuplen, void *iph, struct tcphdr *tcp)
+{
+ struct bpf_sock *sk =
+ bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
+
+ if (sk == NULL)
+ return UNKNOWN;
+
+ if (sk->state != BPF_TCP_LISTEN) {
+ bpf_sk_release(sk);
+ return ESTABLISHED;
+ }
+
+ if (iph != NULL && tcp != NULL) {
+ /* Kludge: we've run out of arguments, but need the length of the ip header. */
+ uint64_t iphlen = sizeof(struct iphdr);
+
+ if (tuplen == sizeof(tuple->ipv6))
+ iphlen = sizeof(struct ipv6hdr);
+
+ if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp,
+ sizeof(*tcp)) == 0) {
+ bpf_sk_release(sk);
+ return SYN_COOKIE;
+ }
+ }
+
+ bpf_sk_release(sk);
+ return UNKNOWN;
+}
+
+static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen)
+{
+ struct bpf_sock *sk =
+ bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
+
+ if (sk == NULL)
+ return UNKNOWN;
+
+ if (sk->state == BPF_TCP_ESTABLISHED) {
+ bpf_sk_release(sk);
+ return ESTABLISHED;
+ }
+
+ bpf_sk_release(sk);
+ return UNKNOWN;
+}
+
+static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple,
+ uint64_t tuplen, metrics_t *metrics)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ return classify_tcp(skb, tuple, tuplen, NULL, NULL);
+
+ case IPPROTO_UDP:
+ return classify_udp(skb, tuple, tuplen);
+
+ default:
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+}
+
+static verdict_t process_icmpv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, __u64 *offset,
+ metrics_t *metrics)
+{
+ struct icmphdr icmp;
+ struct iphdr ipv4;
+
+ if (bpf_dynptr_read(&icmp, sizeof(icmp), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+
+ *offset += sizeof(icmp);
+
+ /* We should never receive encapsulated echo replies. */
+ if (icmp.type == ICMP_ECHOREPLY) {
+ metrics->errors_total_icmp_echo_replies++;
+ return INVALID;
+ }
+
+ if (icmp.type == ICMP_ECHO)
+ return ECHO_REQUEST;
+
+ if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) {
+ metrics->errors_total_unwanted_icmp++;
+ return INVALID;
+ }
+
+ if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ /* The source address in the outer IP header is from the entity that
+ * originated the ICMP message. Use the original IP header to restore
+ * the correct flow tuple.
+ */
+ struct bpf_sock_tuple tuple;
+ tuple.ipv4.saddr = ipv4.daddr;
+ tuple.ipv4.daddr = ipv4.saddr;
+
+ if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv4.sport)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ return classify_icmp(skb, ipv4.protocol, &tuple,
+ sizeof(tuple.ipv4), metrics);
+}
+
+static verdict_t process_icmpv6(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct ipv6hdr ipv6;
+ struct icmp6hdr icmp6;
+ bool is_fragment;
+ uint8_t l4_proto;
+
+ if (bpf_dynptr_read(&icmp6, sizeof(icmp6), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_icmp++;
+ return INVALID;
+ }
+
+ /* We should never receive encapsulated echo replies. */
+ if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) {
+ metrics->errors_total_icmp_echo_replies++;
+ return INVALID;
+ }
+
+ if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) {
+ return ECHO_REQUEST;
+ }
+
+ if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) {
+ metrics->errors_total_unwanted_icmp++;
+ return INVALID;
+ }
+
+ if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ if (is_fragment) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ /* Swap source and dest addresses. */
+ memcpy(&tuple.ipv6.saddr, &ipv6.daddr, sizeof(tuple.ipv6.saddr));
+ memcpy(&tuple.ipv6.daddr, &ipv6.saddr, sizeof(tuple.ipv6.daddr));
+
+ if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv6.sport)) {
+ metrics->errors_total_malformed_icmp_pkt_too_big++;
+ return INVALID;
+ }
+
+ return classify_icmp(skb, l4_proto, &tuple, sizeof(tuple.ipv6),
+ metrics);
+}
+
+static verdict_t process_tcp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ struct iphdr_info *info, metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct tcphdr tcp;
+ uint64_t tuplen;
+
+ metrics->l4_protocol_packets_total_tcp++;
+
+ if (bpf_dynptr_read(&tcp, sizeof(tcp), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_tcp++;
+ return INVALID;
+ }
+
+ *offset += sizeof(tcp);
+
+ if (tcp.syn)
+ return SYN;
+
+ tuplen = fill_tuple(&tuple, info->hdr, info->len, tcp.source, tcp.dest);
+ return classify_tcp(skb, &tuple, tuplen, info->hdr, &tcp);
+}
+
+static verdict_t process_udp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb,
+ struct iphdr_info *info, metrics_t *metrics)
+{
+ struct bpf_sock_tuple tuple;
+ struct udphdr udph;
+ uint64_t tuplen;
+
+ metrics->l4_protocol_packets_total_udp++;
+
+ if (bpf_dynptr_read(&udph, sizeof(udph), dynptr, *offset, 0)) {
+ metrics->errors_total_malformed_udp++;
+ return INVALID;
+ }
+ *offset += sizeof(udph);
+
+ tuplen = fill_tuple(&tuple, info->hdr, info->len, udph.source, udph.dest);
+ return classify_udp(skb, &tuple, tuplen);
+}
+
+static verdict_t process_ipv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ __u64 *offset, metrics_t *metrics)
+{
+ struct iphdr ipv4;
+ struct iphdr_info info = {
+ .hdr = &ipv4,
+ .len = sizeof(ipv4),
+ };
+
+ metrics->l3_protocol_packets_total_ipv4++;
+
+ if (pkt_parse_ipv4(dynptr, offset, &ipv4)) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv4.version != 4) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv4_is_fragment(&ipv4)) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ switch (ipv4.protocol) {
+ case IPPROTO_ICMP:
+ return process_icmpv4(skb, dynptr, offset, metrics);
+
+ case IPPROTO_TCP:
+ return process_tcp(dynptr, offset, skb, &info, metrics);
+
+ case IPPROTO_UDP:
+ return process_udp(dynptr, offset, skb, &info, metrics);
+
+ default:
+ metrics->errors_total_unknown_l4_proto++;
+ return INVALID;
+ }
+}
+
+static verdict_t process_ipv6(struct __sk_buff *skb, struct bpf_dynptr *dynptr,
+ __u64 *offset, metrics_t *metrics)
+{
+ struct ipv6hdr ipv6;
+ struct iphdr_info info = {
+ .hdr = &ipv6,
+ .len = sizeof(ipv6),
+ };
+ uint8_t l4_proto;
+ bool is_fragment;
+
+ metrics->l3_protocol_packets_total_ipv6++;
+
+ if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (ipv6.version != 6) {
+ metrics->errors_total_malformed_ip++;
+ return INVALID;
+ }
+
+ if (is_fragment) {
+ metrics->errors_total_fragmented_ip++;
+ return INVALID;
+ }
+
+ switch (l4_proto) {
+ case IPPROTO_ICMPV6:
+ return process_icmpv6(dynptr, offset, skb, metrics);
+
+ case IPPROTO_TCP:
+ return process_tcp(dynptr, offset, skb, &info, metrics);
+
+ case IPPROTO_UDP:
+ return process_udp(dynptr, offset, skb, &info, metrics);
+
+ default:
+ metrics->errors_total_unknown_l4_proto++;
+ return INVALID;
+ }
+}
+
+SEC("tc")
+int cls_redirect(struct __sk_buff *skb)
+{
+ __u8 encap_buffer[sizeof(encap_headers_t)] = {};
+ struct bpf_dynptr dynptr;
+ struct in_addr next_hop;
+ /* Tracks offset of the dynptr. This will be unnecessary once
+ * bpf_dynptr_advance() is available.
+ */
+ __u64 off = 0;
+ ret_t ret;
+
+ bpf_dynptr_from_skb(skb, 0, &dynptr);
+
+ metrics_t *metrics = get_global_metrics();
+ if (metrics == NULL)
+ return TC_ACT_SHOT;
+
+ metrics->processed_packets_total++;
+
+ /* Pass bogus packets as long as we're not sure they're
+ * destined for us.
+ */
+ if (skb->protocol != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ encap_headers_t *encap;
+
+ /* Make sure that all encapsulation headers are available in
+ * the linear portion of the skb. This makes it easy to manipulate them.
+ */
+ if (bpf_skb_pull_data(skb, sizeof(*encap)))
+ return TC_ACT_OK;
+
+ encap = bpf_dynptr_slice_rdwr(&dynptr, 0, encap_buffer, sizeof(encap_buffer));
+ if (!encap)
+ return TC_ACT_OK;
+
+ off += sizeof(*encap);
+
+ if (encap->ip.ihl != 5)
+ /* We never have any options. */
+ return TC_ACT_OK;
+
+ if (encap->ip.daddr != ENCAPSULATION_IP ||
+ encap->ip.protocol != IPPROTO_UDP)
+ return TC_ACT_OK;
+
+ /* TODO Check UDP length? */
+ if (encap->udp.dest != ENCAPSULATION_PORT)
+ return TC_ACT_OK;
+
+ /* We now know that the packet is destined to us, we can
+ * drop bogus ones.
+ */
+ if (ipv4_is_fragment((void *)&encap->ip)) {
+ metrics->errors_total_fragmented_ip++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.variant != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.control != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.flags != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->gue.hlen !=
+ sizeof(encap->unigue) / 4 + encap->unigue.hop_count) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->unigue.version != 0) {
+ metrics->errors_total_malformed_encapsulation++;
+ return TC_ACT_SHOT;
+ }
+
+ if (encap->unigue.reserved != 0)
+ return TC_ACT_SHOT;
+
+ MAYBE_RETURN(get_next_hop(&dynptr, &off, encap, &next_hop));
+
+ if (next_hop.s_addr == 0) {
+ metrics->accepted_packets_total_last_hop++;
+ return accept_locally(skb, encap);
+ }
+
+ verdict_t verdict;
+ switch (encap->gue.proto_ctype) {
+ case IPPROTO_IPIP:
+ verdict = process_ipv4(skb, &dynptr, &off, metrics);
+ break;
+
+ case IPPROTO_IPV6:
+ verdict = process_ipv6(skb, &dynptr, &off, metrics);
+ break;
+
+ default:
+ metrics->errors_total_unknown_l3_proto++;
+ return TC_ACT_SHOT;
+ }
+
+ switch (verdict) {
+ case INVALID:
+ /* metrics have already been bumped */
+ return TC_ACT_SHOT;
+
+ case UNKNOWN:
+ return forward_to_next_hop(skb, &dynptr, encap, &next_hop, metrics);
+
+ case ECHO_REQUEST:
+ metrics->accepted_packets_total_icmp_echo_request++;
+ break;
+
+ case SYN:
+ if (encap->unigue.forward_syn) {
+ return forward_to_next_hop(skb, &dynptr, encap, &next_hop,
+ metrics);
+ }
+
+ metrics->accepted_packets_total_syn++;
+ break;
+
+ case SYN_COOKIE:
+ metrics->accepted_packets_total_syn_cookies++;
+ break;
+
+ case ESTABLISHED:
+ metrics->accepted_packets_total_established++;
+ break;
+ }
+
+ ret = accept_locally(skb, encap);
+
+ if (encap == encap_buffer)
+ bpf_dynptr_write(&dynptr, 0, encap_buffer, sizeof(encap_buffer), 0);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_autosize.c b/tools/testing/selftests/bpf/progs/test_core_autosize.c
index 44f5aa2e8956..9a7829c5e4a7 100644
--- a/tools/testing/selftests/bpf/progs/test_core_autosize.c
+++ b/tools/testing/selftests/bpf/progs/test_core_autosize.c
@@ -125,6 +125,16 @@ int handle_downsize(void *ctx)
return 0;
}
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_core_read_int bpf_core_read
+#else
+#define bpf_core_read_int(dst, sz, src) ({ \
+ /* Prevent "subtraction from stack pointer prohibited" */ \
+ volatile long __off = sizeof(*dst) - (sz); \
+ bpf_core_read((char *)(dst) + __off, sz, src); \
+})
+#endif
+
SEC("raw_tp/sys_enter")
int handle_probed(void *ctx)
{
@@ -132,23 +142,23 @@ int handle_probed(void *ctx)
__u64 tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
ptr_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val1), &in->val1);
val1_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val2), &in->val2);
val2_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val3), &in->val3);
val3_probed = tmp;
tmp = 0;
- bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
+ bpf_core_read_int(&tmp, bpf_core_field_size(in->val4), &in->val4);
val4_probed = tmp;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_extern.c b/tools/testing/selftests/bpf/progs/test_core_extern.c
index 3ac3603ad53d..a3c7c1042f35 100644
--- a/tools/testing/selftests/bpf/progs/test_core_extern.c
+++ b/tools/testing/selftests/bpf/progs/test_core_extern.c
@@ -11,6 +11,7 @@
static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999;
extern int LINUX_KERNEL_VERSION __kconfig;
+extern int LINUX_UNKNOWN_VIRTUAL_EXTERN __kconfig __weak;
extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */
extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak;
extern bool CONFIG_BOOL __kconfig __weak;
@@ -22,6 +23,7 @@ extern const char CONFIG_STR[8] __kconfig __weak;
extern uint64_t CONFIG_MISSING __kconfig __weak;
uint64_t kern_ver = -1;
+uint64_t unkn_virt_val = -1;
uint64_t bpf_syscall = -1;
uint64_t tristate_val = -1;
uint64_t bool_val = -1;
@@ -38,6 +40,7 @@ int handle_sys_enter(struct pt_regs *ctx)
int i;
kern_ver = LINUX_KERNEL_VERSION;
+ unkn_virt_val = LINUX_UNKNOWN_VIRTUAL_EXTERN;
bpf_syscall = CONFIG_BPF_SYSCALL;
tristate_val = CONFIG_TRISTATE;
bool_val = CONFIG_BOOL;
diff --git a/tools/testing/selftests/bpf/progs/test_core_read_macros.c b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
new file mode 100644
index 000000000000..873d85a4739b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* shuffled layout for relocatable (CO-RE) reads */
+struct callback_head___shuffled {
+ void (*func)(struct callback_head___shuffled *head);
+ struct callback_head___shuffled *next;
+};
+
+struct callback_head k_probe_in = {};
+struct callback_head___shuffled k_core_in = {};
+
+struct callback_head *u_probe_in = 0;
+struct callback_head___shuffled *u_core_in = 0;
+
+long k_probe_out = 0;
+long u_probe_out = 0;
+
+long k_core_out = 0;
+long u_core_out = 0;
+
+int my_pid = 0;
+
+SEC("raw_tracepoint/sys_enter")
+int handler(void *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (my_pid != pid)
+ return 0;
+
+ /* next pointers for kernel address space have to be initialized from
+ * BPF side, user-space mmaped addresses are still user-space addresses
+ */
+ k_probe_in.next = &k_probe_in;
+ __builtin_preserve_access_index(({k_core_in.next = &k_core_in;}));
+
+ k_probe_out = (long)BPF_PROBE_READ(&k_probe_in, next, next, func);
+ k_core_out = (long)BPF_CORE_READ(&k_core_in, next, next, func);
+ u_probe_out = (long)BPF_PROBE_READ_USER(u_probe_in, next, next, func);
+ u_core_out = (long)BPF_CORE_READ_USER(u_core_in, next, next, func);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
index 51b3f79df523..448403634eea 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c
@@ -15,6 +15,7 @@ struct {
struct core_reloc_arrays_output {
int a2;
+ int a3;
char b123;
int c1c;
int d00d;
@@ -41,6 +42,7 @@ int test_core_arrays(void *ctx)
{
struct core_reloc_arrays *in = (void *)&data.in;
struct core_reloc_arrays_output *out = (void *)&data.out;
+ int *a;
if (CORE_READ(&out->a2, &in->a[2]))
return 1;
@@ -53,6 +55,9 @@ int test_core_arrays(void *ctx)
if (CORE_READ(&out->f01c, &in->f[0][1].c))
return 1;
+ a = __builtin_preserve_access_index(({ in->a; }));
+ out->a3 = a[0] + a[1] + a[2] + a[3];
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
index ab1e647aeb31..b86fdda2a6ea 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c
@@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
- uint64_t res;
out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
new file mode 100644
index 000000000000..63147fbfae6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2ffffffffULL,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_enum64val(void *ctx)
+{
+#if __clang_major__ >= 15
+ struct core_reloc_enum64val_output *out = (void *)&data.out;
+ enum named_unsigned_enum64 named_unsigned = 0;
+ enum named_signed_enum64 named_signed = 0;
+
+ out->unsigned_val1_exists = bpf_core_enum_value_exists(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL2);
+ out->unsigned_val3_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL3);
+ out->signed_val1_exists = bpf_core_enum_value_exists(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL2);
+ out->signed_val3_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL3);
+
+ out->unsigned_val1 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL2);
+ out->signed_val1 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL2);
+ /* NAMED_ENUM64_VAL3 value is optional */
+
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
index 7e45e2bdf6cd..5b8a75097ea3 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
@@ -45,35 +45,34 @@ int test_core_existence(void *ctx)
struct core_reloc_existence_output *out = (void *)&data.out;
out->a_exists = bpf_core_field_exists(in->a);
- if (bpf_core_field_exists(in->a))
+ if (bpf_core_field_exists(struct core_reloc_existence, a))
out->a_value = BPF_CORE_READ(in, a);
else
out->a_value = 0xff000001u;
out->b_exists = bpf_core_field_exists(in->b);
- if (bpf_core_field_exists(in->b))
+ if (bpf_core_field_exists(struct core_reloc_existence, b))
out->b_value = BPF_CORE_READ(in, b);
else
out->b_value = 0xff000002u;
out->c_exists = bpf_core_field_exists(in->c);
- if (bpf_core_field_exists(in->c))
+ if (bpf_core_field_exists(struct core_reloc_existence, c))
out->c_value = BPF_CORE_READ(in, c);
else
out->c_value = 0xff000003u;
out->arr_exists = bpf_core_field_exists(in->arr);
- if (bpf_core_field_exists(in->arr))
+ if (bpf_core_field_exists(struct core_reloc_existence, arr))
out->arr_value = BPF_CORE_READ(in, arr[0]);
else
out->arr_value = 0xff000004u;
out->s_exists = bpf_core_field_exists(in->s);
- if (bpf_core_field_exists(in->s))
+ if (bpf_core_field_exists(struct core_reloc_existence, s))
out->s_value = BPF_CORE_READ(in, s.x);
else
out->s_value = 0xff000005u;
return 0;
}
-
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
index 145028b52ad8..ee4a601dcb06 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
@@ -21,6 +21,7 @@ struct core_reloc_kernel_output {
/* we have test_progs[-flavor], so cut flavor part */
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
};
struct task_struct {
@@ -30,15 +31,29 @@ struct task_struct {
struct task_struct *group_leader;
};
+struct mm_struct___wrong {
+ int abc_whatever_should_not_exist;
+};
+
+struct task_struct___local {
+ int pid;
+ struct mm_struct___wrong *mm;
+};
+
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
- uint32_t real_tgid = (uint32_t)pid_tgid;
+ int32_t real_tgid = (int32_t)pid_tgid;
int pid, tgid;
if (data.my_pid_tgid != pid_tgid)
@@ -93,6 +108,10 @@ int test_core_kernel(void *ctx)
group_leader, group_leader, group_leader, group_leader,
comm);
+ out->local_task_struct_matches = bpf_core_type_matches(struct task_struct___local);
+#else
+ data.skip = true;
+#endif
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
index 8b533db4a7a5..b2ded497572a 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_mods.c
@@ -42,7 +42,16 @@ struct core_reloc_mods {
core_reloc_mods_substruct_t h;
};
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
+#else
+#define CORE_READ(dst, src) ({ \
+ int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \
+ sizeof(*(src)); \
+ bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \
+ (const char *)(src) + sizeof(*(src)) - __sz); \
+})
+#endif
SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_module.c b/tools/testing/selftests/bpf/progs/test_core_reloc_module.c
index f59f175c7baf..bcb31ff92dcc 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_module.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_module.c
@@ -43,8 +43,8 @@ int BPF_PROG(test_core_module_probed,
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid();
- __u32 real_tgid = (__u32)(pid_tgid >> 32);
- __u32 real_pid = (__u32)pid_tgid;
+ __s32 real_tgid = (__s32)(pid_tgid >> 32);
+ __s32 real_pid = (__s32)pid_tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
@@ -77,8 +77,8 @@ int BPF_PROG(test_core_module_direct,
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid();
- __u32 real_tgid = (__u32)(pid_tgid >> 32);
- __u32 real_pid = (__u32)pid_tgid;
+ __s32 real_tgid = (__s32)(pid_tgid >> 32);
+ __s32 real_pid = (__s32)pid_tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
index d7fb6cfc7891..5b686053ce42 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -15,12 +15,21 @@ struct {
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
+ int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -30,6 +39,7 @@ struct core_reloc_size {
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
+ float float_field;
};
SEC("raw_tracepoint/sys_enter")
@@ -39,12 +49,28 @@ int test_core_size(void *ctx)
struct core_reloc_size_output *out = (void *)&data.out;
out->int_sz = bpf_core_field_size(in->int_field);
+ out->int_off = bpf_core_field_offset(in->int_field);
+
out->struct_sz = bpf_core_field_size(in->struct_field);
+ out->struct_off = bpf_core_field_offset(in->struct_field);
+
out->union_sz = bpf_core_field_size(in->union_field);
+ out->union_off = bpf_core_field_offset(in->union_field);
+
out->arr_sz = bpf_core_field_size(in->arr_field);
- out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
- out->ptr_sz = bpf_core_field_size(in->ptr_field);
- out->enum_sz = bpf_core_field_size(in->enum_field);
+ out->arr_off = bpf_core_field_offset(in->arr_field);
+
+ out->arr_elem_sz = bpf_core_field_size(struct core_reloc_size, arr_field[1]);
+ out->arr_elem_off = bpf_core_field_offset(struct core_reloc_size, arr_field[1]);
+
+ out->ptr_sz = bpf_core_field_size(struct core_reloc_size, ptr_field);
+ out->ptr_off = bpf_core_field_offset(struct core_reloc_size, ptr_field);
+
+ out->enum_sz = bpf_core_field_size(struct core_reloc_size, enum_field);
+ out->enum_off = bpf_core_field_offset(struct core_reloc_size, enum_field);
+
+ out->float_sz = bpf_core_field_size(struct core_reloc_size, float_field);
+ out->float_off = bpf_core_field_offset(struct core_reloc_size, float_field);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
index fb60f8195c53..2edb4df35e6e 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
@@ -19,6 +19,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -43,6 +51,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -50,6 +59,7 @@ typedef char arr_typedef[20];
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -58,9 +68,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -77,10 +102,17 @@ struct core_reloc_type_based_output {
SEC("raw_tracepoint/sys_enter")
int test_core_type_based(void *ctx)
{
-#if __has_builtin(__builtin_preserve_type_info)
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test. Part of it
+ * could run with merely __builtin_preserve_type_info (which could be checked
+ * separately), but we have to find an upper bound.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct core_reloc_type_based_output *out = (void *)&data.out;
out->struct_exists = bpf_core_type_exists(struct a_struct);
+ out->complex_struct_exists = bpf_core_type_exists(struct a_complex_struct);
out->union_exists = bpf_core_type_exists(union a_union);
out->enum_exists = bpf_core_type_exists(enum an_enum);
out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
@@ -89,9 +121,24 @@ int test_core_type_based(void *ctx)
out->typedef_int_exists = bpf_core_type_exists(int_typedef);
out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
+ out->typedef_restrict_ptr_exists = bpf_core_type_exists(restrict_ptr_typedef);
out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
+ out->struct_matches = bpf_core_type_matches(struct a_struct);
+ out->complex_struct_matches = bpf_core_type_matches(struct a_complex_struct);
+ out->union_matches = bpf_core_type_matches(union a_union);
+ out->enum_matches = bpf_core_type_matches(enum an_enum);
+ out->typedef_named_struct_matches = bpf_core_type_matches(named_struct_typedef);
+ out->typedef_anon_struct_matches = bpf_core_type_matches(anon_struct_typedef);
+ out->typedef_struct_ptr_matches = bpf_core_type_matches(struct_ptr_typedef);
+ out->typedef_int_matches = bpf_core_type_matches(int_typedef);
+ out->typedef_enum_matches = bpf_core_type_matches(enum_typedef);
+ out->typedef_void_ptr_matches = bpf_core_type_matches(void_ptr_typedef);
+ out->typedef_restrict_ptr_matches = bpf_core_type_matches(restrict_ptr_typedef);
+ out->typedef_func_proto_matches = bpf_core_type_matches(func_proto_typedef);
+ out->typedef_arr_matches = bpf_core_type_matches(arr_typedef);
+
out->struct_sz = bpf_core_type_size(struct a_struct);
out->union_sz = bpf_core_type_size(union a_union);
out->enum_sz = bpf_core_type_size(enum an_enum);
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
index 22aba3f6e344..6fc8b9d66e34 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
@@ -80,7 +80,7 @@ int test_core_type_id(void *ctx)
* to detect whether this test has to be executed, however strange
* that might look like.
*
- * [0] https://reviews.llvm.org/D85174
+ * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
diff --git a/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
new file mode 100644
index 000000000000..4061f701ca50
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile int my_pid;
+
+bool abc1_called;
+bool abc2_called;
+bool custom1_called;
+bool custom2_called;
+bool kprobe1_called;
+bool xyz_called;
+
+SEC("abc")
+int abc1(void *ctx)
+{
+ abc1_called = true;
+ return 0;
+}
+
+SEC("abc/whatever")
+int abc2(void *ctx)
+{
+ abc2_called = true;
+ return 0;
+}
+
+SEC("custom")
+int custom1(void *ctx)
+{
+ custom1_called = true;
+ return 0;
+}
+
+SEC("custom/something")
+int custom2(void *ctx)
+{
+ custom2_called = true;
+ return 0;
+}
+
+SEC("kprobe")
+int kprobe1(void *ctx)
+{
+ kprobe1_called = true;
+ return 0;
+}
+
+SEC("xyz/blah")
+int xyz(void *ctx)
+{
+ int whatever;
+
+ /* use sleepable helper, custom handler should set sleepable flag */
+ bpf_copy_from_user(&whatever, sizeof(whatever), NULL);
+ xyz_called = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
new file mode 100644
index 000000000000..27c27cff6a3a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Google */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern const int bpf_prog_active __ksym;
+
+SEC("fentry/security_inode_getattr")
+int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
+ __u32 request_mask, unsigned int query_flags)
+{
+ void *active;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* FAIL here! 'active' points to readonly memory. bpf helpers
+ * that update its arguments can not write into it.
+ */
+ bpf_d_path(path, active, sizeof(int));
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_types.c b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c
new file mode 100644
index 000000000000..7e02b7361307
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+extern const int bpf_prog_active __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
+SEC("fentry/security_inode_getattr")
+int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
+ __u32 request_mask, unsigned int query_flags)
+{
+ void *active;
+ u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* FAIL here! 'active' points to 'regular' memory. It
+ * cannot be submitted to ring buffer.
+ */
+ bpf_ringbuf_submit(active, 0);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_deny_namespace.c b/tools/testing/selftests/bpf/progs/test_deny_namespace.c
new file mode 100644
index 000000000000..e96b901a733c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_deny_namespace.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+#include <linux/capability.h>
+
+typedef struct { unsigned long long val; } kernel_cap_t;
+
+struct cred {
+ kernel_cap_t cap_effective;
+} __attribute__((preserve_access_index));
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm.s/userns_create")
+int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
+{
+ kernel_cap_t caps = cred->cap_effective;
+ __u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
+
+ if (ret)
+ return 0;
+
+ ret = -EPERM;
+ if (caps.val & cap_mask)
+ return 0;
+
+ return -EPERM;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_enable_stats.c b/tools/testing/selftests/bpf/progs/test_enable_stats.c
index 01a002ade529..1705097d01d7 100644
--- a/tools/testing/selftests/bpf/progs/test_enable_stats.c
+++ b/tools/testing/selftests/bpf/progs/test_enable_stats.c
@@ -13,6 +13,6 @@ __u64 count = 0;
SEC("raw_tracepoint/sys_enter")
int test_enable_stats(void *ctx)
{
- count += 1;
+ __sync_fetch_and_add(&count, 1);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
new file mode 100644
index 000000000000..fac33a14f200
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
+extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak;
+extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak;
+extern bool CONFIG_PPC64 __kconfig __weak;
+
+/* This function is here to have CONFIG_X86_KERNEL_IBT,
+ * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE,
+ * CONFIG_PPC6 used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_X86_KERNEL_IBT ||
+ CONFIG_PPC_FTRACE_OUT_OF_LINE ||
+ CONFIG_KPROBES_ON_FTRACE ||
+ CONFIG_PPC64 ? 0 : 1;
+}
+
+SEC("kprobe")
+int BPF_PROG(kprobe_run)
+{
+ return 0;
+}
+
+SEC("uprobe")
+int BPF_PROG(uprobe_run)
+{
+ return 0;
+}
+
+SEC("tracepoint")
+int BPF_PROG(tp_run)
+{
+ return 0;
+}
+
+SEC("perf_event")
+int event_run(void *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi")
+int BPF_PROG(kmulti_run)
+{
+ return 0;
+}
+
+SEC("uprobe.multi")
+int BPF_PROG(umulti_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_fsverity.c b/tools/testing/selftests/bpf/progs/test_fsverity.c
new file mode 100644
index 000000000000..9e0f73e8189c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_fsverity.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+#define SIZEOF_STRUCT_FSVERITY_DIGEST 4 /* sizeof(struct fsverity_digest) */
+
+char expected_digest[SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+char digest[SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+__u32 monitored_pid;
+__u32 got_fsverity;
+__u32 digest_matches;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr digest_ptr;
+ __u32 pid;
+ int ret;
+ int i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(digest, sizeof(digest), 0, &digest_ptr);
+ ret = bpf_get_fsverity_digest(f, &digest_ptr);
+ if (ret < 0)
+ return 0;
+ got_fsverity = 1;
+
+ for (i = 0; i < (int)sizeof(digest); i++) {
+ if (digest[i] != expected_digest[i])
+ return 0;
+ }
+
+ digest_matches = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c
new file mode 100644
index 000000000000..54305f4c9f2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+__u32 found_xattr_from_file;
+__u32 found_xattr_from_dentry;
+
+static const char expected_value[] = "hello";
+char value1[32];
+char value2[32];
+
+/* Matches caller of test_get_xattr() in prog_tests/fs_kfuncs.c */
+static const char xattr_names[][64] = {
+ /* The following work. */
+ "user.kfuncs",
+ "security.bpf.xxx",
+
+ /* The following do not work. */
+ "security.bpf",
+ "security.selinux"
+};
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret, i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr);
+
+ for (i = 0; i < ARRAY_SIZE(xattr_names); i++) {
+ ret = bpf_get_file_xattr(f, xattr_names[i], &value_ptr);
+ if (ret == sizeof(expected_value))
+ break;
+ }
+ if (ret != sizeof(expected_value))
+ return 0;
+ if (bpf_strncmp(value1, ret, expected_value))
+ return 0;
+ found_xattr_from_file = 1;
+ return 0;
+}
+
+SEC("lsm.s/inode_getxattr")
+int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret, i;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr);
+
+ for (i = 0; i < ARRAY_SIZE(xattr_names); i++) {
+ ret = bpf_get_dentry_xattr(dentry, xattr_names[i], &value_ptr);
+ if (ret == sizeof(expected_value))
+ break;
+ }
+ if (ret != sizeof(expected_value))
+ return 0;
+ if (bpf_strncmp(value2, ret, expected_value))
+ return 0;
+ found_xattr_from_dentry = 1;
+
+ /* return non-zero to fail getxattr from user space */
+ return -EINVAL;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index 1319be1c54ba..719e314ef3e4 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -68,7 +68,7 @@ static struct foo struct3 = {
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
-SEC("classifier/static_data_load")
+SEC("tc")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 880260f6d536..fc69ff18880d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -3,14 +3,15 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
-#ifndef MAX_STACK
-#define MAX_STACK (512 - 3 * 32 + 8)
-#endif
+#define MAX_STACK 260
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
+ asm volatile ("");
+
return skb->len;
}
@@ -19,6 +20,8 @@ int f1(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return f0(0, skb) + skb->len;
}
@@ -27,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
return f1(skb) + f3(val, skb, 1);
}
@@ -35,11 +42,14 @@ int f3(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->ifindex * val * var;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("combined stack size of 3 calls is")
+int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
new file mode 100644
index 000000000000..09d027bd3ea8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+struct Small {
+ long x;
+};
+
+struct Big {
+ long x;
+ long y;
+};
+
+__noinline int foo(const struct Big *big)
+{
+ if (!big)
+ return 0;
+
+ return bpf_get_prandom_u32() < big->y;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("invalid read from stack")
+int global_func10(struct __sk_buff *skb)
+{
+ const struct Small small = {.x = skb->len };
+
+ return foo((struct Big *)&small) ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func11.c b/tools/testing/selftests/bpf/progs/test_global_func11.c
new file mode 100644
index 000000000000..283e036dc401
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func11.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ return s ? bpf_get_prandom_u32() < s->x : 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func11(struct __sk_buff *skb)
+{
+ return foo((const void *)skb);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func12.c b/tools/testing/selftests/bpf/progs/test_global_func12.c
new file mode 100644
index 000000000000..6e03d42519a6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func12.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ return bpf_get_prandom_u32() < s->x;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("invalid mem access 'mem_or_null'")
+int global_func12(struct __sk_buff *skb)
+{
+ const struct S s = {.x = skb->len };
+
+ foo(&s);
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func13.c b/tools/testing/selftests/bpf/progs/test_global_func13.c
new file mode 100644
index 000000000000..02ea80da75b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func13.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < s->x;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func13(struct __sk_buff *skb)
+{
+ const struct S *s = (const struct S *)(0xbedabeda);
+
+ return foo(s);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func14.c b/tools/testing/selftests/bpf/progs/test_global_func14.c
new file mode 100644
index 000000000000..33b7d5efd7b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func14.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S;
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < *(const int *) s;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("reference type('FWD S') size cannot be determined")
+int global_func14(struct __sk_buff *skb)
+{
+
+ return foo(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
new file mode 100644
index 000000000000..201cc000b3f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(unsigned int *v)
+{
+ if (v)
+ *v = bpf_get_prandom_u32();
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__failure __msg("At program exit the register R0 has ")
+int global_func15(struct __sk_buff *skb)
+{
+ unsigned int v = 1;
+
+ foo(&v);
+
+ return v;
+}
+
+SEC("cgroup_skb/ingress")
+__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
+__failure
+/* check that fallthrough code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 2: (b7) r0 = 1")
+/* check that branch code path marks r0 as precise */
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7")
+__msg("At program exit the register R0 has ")
+__naked int global_func15_tricky_pruning(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 s> 1000 goto 1f;"
+ "r0 = 1;"
+ "1:"
+ "goto +0;" /* checkpoint */
+ /* cgroup_skb/ingress program is expected to return [0, 1]
+ * values, so branch above makes sure that in a fallthrough
+ * case we have a valid 1 stored in R0 register, but in
+ * a branch case we assign some random value to R0. So if
+ * there is something wrong with precision tracking for R0 at
+ * program exit, we might erroneously prune branch case,
+ * because R0 in fallthrough case is imprecise (and thus any
+ * value is valid from POV of verifier is_state_equal() logic)
+ */
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_common
+ );
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c
new file mode 100644
index 000000000000..e3e64bc472cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func16.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(int (*arr)[10])
+{
+ if (arr)
+ return (*arr)[9];
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__success
+int global_func16(struct __sk_buff *skb)
+{
+ int array[10];
+
+ const int rv = foo(&array);
+
+ return rv ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
new file mode 100644
index 000000000000..5de44b09e8ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline int foo(int *p)
+{
+ barrier_var(p);
+ return p ? (*p = 42) : 0;
+}
+
+const volatile int i;
+
+SEC("tc")
+__failure __msg("Caller passes invalid args into func#1")
+int global_func17(struct __sk_buff *skb)
+{
+ return foo((int *)&i);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c
index 2c18d82923a2..2beab9c3b68a 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func2.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func2.c
@@ -1,4 +1,49 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
#define MAX_STACK (512 - 3 * 32)
-#include "test_global_func1.c"
+
+static __attribute__ ((noinline))
+int f0(int var, struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
+ return f0(0, skb) + skb->len;
+}
+
+int f3(int, struct __sk_buff *skb, int);
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + f3(val, skb, 1);
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
+ return skb->ifindex * val * var;
+}
+
+SEC("tc")
+__success
+int global_func2(struct __sk_buff *skb)
+{
+ return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c
index 86f0ecb304fc..142b682d3c2f 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func3.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func3.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -46,20 +47,15 @@ int f7(struct __sk_buff *skb)
return f6(skb);
}
-#ifndef NO_FN8
__attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
-#endif
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("the call stack of 8 frames")
+int global_func3(struct __sk_buff *skb)
{
-#ifndef NO_FN8
return f8(skb);
-#else
- return f7(skb);
-#endif
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func4.c b/tools/testing/selftests/bpf/progs/test_global_func4.c
index 610f75edf276..1733d87ad3f3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func4.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func4.c
@@ -1,4 +1,55 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */
-#define NO_FN8
-#include "test_global_func3.c"
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__attribute__ ((noinline))
+int f1(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+__attribute__ ((noinline))
+int f2(int val, struct __sk_buff *skb)
+{
+ return f1(skb) + val;
+}
+
+__attribute__ ((noinline))
+int f3(int val, struct __sk_buff *skb, int var)
+{
+ return f2(var, skb) + val;
+}
+
+__attribute__ ((noinline))
+int f4(struct __sk_buff *skb)
+{
+ return f3(1, skb, 2);
+}
+
+__attribute__ ((noinline))
+int f5(struct __sk_buff *skb)
+{
+ return f4(skb);
+}
+
+__attribute__ ((noinline))
+int f6(struct __sk_buff *skb)
+{
+ return f5(skb);
+}
+
+__attribute__ ((noinline))
+int f7(struct __sk_buff *skb)
+{
+ return f6(skb);
+}
+
+SEC("tc")
+__success
+int global_func4(struct __sk_buff *skb)
+{
+ return f7(skb);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c
index 260c25b827ef..257c0569ff98 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func5.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func5.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -24,8 +25,9 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("expects pointer to ctx")
+int global_func5(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c
index 69e19c64e10b..46c38c8f2cf0 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func6.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func6.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
int f1(struct __sk_buff *skb)
@@ -24,8 +25,9 @@ int f3(int val, struct __sk_buff *skb)
return skb->ifindex * val;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("modified ctx ptr R2")
+int global_func6(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c
index 309b3f6136bd..f182febfde3c 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func7.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func7.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__attribute__ ((noinline))
void foo(struct __sk_buff *skb)
@@ -10,8 +11,9 @@ void foo(struct __sk_buff *skb)
skb->tc_index = 0;
}
-SEC("classifier/test")
-int test_cls(struct __sk_buff *skb)
+SEC("tc")
+__failure __msg("foo() doesn't return scalar")
+int global_func7(struct __sk_buff *skb)
{
foo(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func8.c b/tools/testing/selftests/bpf/progs/test_global_func8.c
index d55a6544b1ab..9b9c57fa2dd3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func8.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func8.c
@@ -3,6 +3,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
__noinline int foo(struct __sk_buff *skb)
{
@@ -10,7 +11,8 @@ __noinline int foo(struct __sk_buff *skb)
}
SEC("cgroup_skb/ingress")
-int test_cls(struct __sk_buff *skb)
+__success
+int global_func8(struct __sk_buff *skb)
{
if (!foo(skb))
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_global_func9.c b/tools/testing/selftests/bpf/progs/test_global_func9.c
new file mode 100644
index 000000000000..1f2cb0159b8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func9.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct S {
+ int x;
+};
+
+struct C {
+ int x;
+ int y;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct S);
+} map SEC(".maps");
+
+enum E {
+ E_ITEM
+};
+
+static int global_data_x = 100;
+static int volatile global_data_y = 500;
+
+__noinline int foo(const struct S *s)
+{
+ if (s)
+ return bpf_get_prandom_u32() < s->x;
+
+ return 0;
+}
+
+__noinline int bar(int *x)
+{
+ if (x)
+ *x &= bpf_get_prandom_u32();
+
+ return 0;
+}
+__noinline int baz(volatile int *x)
+{
+ if (x)
+ *x &= bpf_get_prandom_u32();
+
+ return 0;
+}
+
+__noinline int qux(enum E *e)
+{
+ if (e)
+ return *e;
+
+ return 0;
+}
+
+__noinline int quux(int (*arr)[10])
+{
+ if (arr)
+ return (*arr)[9];
+
+ return 0;
+}
+
+__noinline int quuz(int **p)
+{
+ if (p)
+ *p = NULL;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+__success
+int global_func9(struct __sk_buff *skb)
+{
+ int result = 0;
+
+ {
+ const struct S s = {.x = skb->len };
+
+ result |= foo(&s);
+ }
+
+ {
+ const __u32 key = 1;
+ const struct S *s = bpf_map_lookup_elem(&map, &key);
+
+ result |= foo(s);
+ }
+
+ {
+ const struct C c = {.x = skb->len, .y = skb->family };
+
+ result |= foo((const struct S *)&c);
+ }
+
+ {
+ result |= foo(NULL);
+ }
+
+ {
+ bar(&result);
+ bar(&global_data_x);
+ }
+
+ {
+ result |= baz(&global_data_y);
+ }
+
+ {
+ enum E e = E_ITEM;
+
+ result |= qux(&e);
+ }
+
+ {
+ int array[10] = {0};
+
+ result |= quux(&array);
+ }
+
+ {
+ int *p;
+
+ result |= quuz(&p);
+ }
+
+ return result ? 1 : 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_args.c b/tools/testing/selftests/bpf/progs/test_global_func_args.c
new file mode 100644
index 000000000000..e712bf77daae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_args.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct S {
+ int v;
+};
+
+struct S global_variable = {};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 7);
+ __type(key, __u32);
+ __type(value, int);
+} values SEC(".maps");
+
+static void save_value(__u32 index, int value)
+{
+ bpf_map_update_elem(&values, &index, &value, 0);
+}
+
+__noinline int foo(__u32 index, struct S *s)
+{
+ if (s) {
+ save_value(index, s->v);
+ return ++s->v;
+ }
+
+ save_value(index, 0);
+
+ return 1;
+}
+
+__noinline int bar(__u32 index, volatile struct S *s)
+{
+ if (s) {
+ save_value(index, s->v);
+ return ++s->v;
+ }
+
+ save_value(index, 0);
+
+ return 1;
+}
+
+__noinline int baz(struct S **s)
+{
+ if (s)
+ *s = 0;
+
+ return 0;
+}
+
+SEC("cgroup_skb/ingress")
+int test_cls(struct __sk_buff *skb)
+{
+ __u32 index = 0;
+
+ {
+ const int v = foo(index++, 0);
+
+ save_value(index++, v);
+ }
+
+ {
+ struct S s = { .v = 100 };
+
+ foo(index++, &s);
+ save_value(index++, s.v);
+ }
+
+ {
+ global_variable.v = 42;
+ bar(index++, &global_variable);
+ save_value(index++, global_variable.v);
+ }
+
+ {
+ struct S v, *p = &v;
+
+ baz(&p);
+ save_value(index++, !p);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
new file mode 100644
index 000000000000..143c8a4852bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static long stack[256];
+
+/*
+ * KPROBE contexts
+ */
+
+__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_typedef_ctx(void *ctx)
+{
+ return kprobe_typedef_ctx_subprog(ctx);
+}
+
+/* s390x defines:
+ *
+ * typedef user_pt_regs bpf_user_pt_regs_t;
+ * typedef struct { ... } user_pt_regs;
+ *
+ * And so "canonical" underlying struct type is anonymous.
+ * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs
+ * are:
+ * - bpf_user_pt_regs_t *ctx (typedef);
+ * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack);
+ * - void *ctx __arg_ctx (arg:ctx tag)
+ *
+ * Other architectures also allow using underlying struct types (e.g.,
+ * `struct pt_regs *ctx` for x86-64)
+ */
+#ifndef bpf_target_s390
+
+#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
+
+__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
+{
+ return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_resolved_ctx(void *ctx)
+{
+ return kprobe_struct_ctx_subprog(ctx);
+}
+
+#endif
+
+/* this is current hack to make this work on old kernels */
+struct bpf_user_pt_regs_t {};
+
+__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?kprobe")
+__success
+int kprobe_workaround_ctx(void *ctx)
+{
+ return kprobe_workaround_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT contexts
+ */
+
+__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_ctx(void *ctx)
+{
+ return raw_tp_ctx_subprog(ctx);
+}
+
+/*
+ * RAW_TRACEPOINT_WRITABLE contexts
+ */
+
+__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success
+int raw_tp_writable_ctx(void *ctx)
+{
+ return raw_tp_writable_ctx_subprog(ctx);
+}
+
+/*
+ * PERF_EVENT contexts
+ */
+
+__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx)
+{
+ return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
+}
+
+SEC("?perf_event")
+__success
+int perf_event_ctx(void *ctx)
+{
+ return perf_event_ctx_subprog(ctx);
+}
+
+/* this global subprog can be now called from many types of entry progs, each
+ * with different context type
+ */
+__weak int subprog_ctx_tag(void *ctx __arg_ctx)
+{
+ return bpf_get_stack(ctx, stack, sizeof(stack), 0);
+}
+
+struct my_struct { int x; };
+
+__weak int subprog_multi_ctx_tags(void *ctx1 __arg_ctx,
+ struct my_struct *mem,
+ void *ctx2 __arg_ctx)
+{
+ if (!mem)
+ return 0;
+
+ return bpf_get_stack(ctx1, stack, sizeof(stack), 0) +
+ mem->x +
+ bpf_get_stack(ctx2, stack, sizeof(stack), 0);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
+
+SEC("?perf_event")
+__success __log_level(2)
+int arg_tag_ctx_perf(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+int arg_tag_ctx_kprobe(void *ctx)
+{
+ struct my_struct x = { .x = 123 };
+
+ return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
new file mode 100644
index 000000000000..ee65bad0436d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* rodata section */
+const volatile pid_t pid;
+const volatile size_t bss_array_len;
+const volatile size_t data_array_len;
+
+/* bss section */
+int sum = 0;
+int array[1];
+
+/* custom data section */
+int my_array[1] SEC(".data.custom");
+
+/* custom data section which should NOT be resizable,
+ * since it contains a single var which is not an array
+ */
+int my_int SEC(".data.non_array");
+
+/* custom data section which should NOT be resizable,
+ * since its last var is not an array
+ */
+int my_array_first[1] SEC(".data.array_not_last");
+int my_int_last SEC(".data.array_not_last");
+
+int percpu_arr[1] SEC(".data.percpu_arr");
+
+/* at least one extern is included, to ensure that a specific
+ * regression is tested whereby resizing resulted in a free-after-use
+ * bug after type information is invalidated by the resize operation.
+ *
+ * There isn't a particularly good API to test for this specific condition,
+ * but by having externs for the resizing tests it will cover this path.
+ */
+extern int LINUX_KERNEL_VERSION __kconfig;
+long version_sink;
+
+SEC("tp/syscalls/sys_enter_getpid")
+int bss_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ /* this will be zero, we just rely on verifier not rejecting this */
+ sum = percpu_arr[bpf_get_smp_processor_id()];
+
+ for (size_t i = 0; i < bss_array_len; ++i)
+ sum += array[i];
+
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int data_array_sum(void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ /* this will be zero, we just rely on verifier not rejecting this */
+ sum = percpu_arr[bpf_get_smp_processor_id()];
+
+ for (size_t i = 0; i < data_array_len; ++i)
+ sum += my_array[i];
+
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
+ return 0;
+}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops st_ops_resize = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/test_hash_large_key.c b/tools/testing/selftests/bpf/progs/test_hash_large_key.c
index 473a22794a62..8b438128f46b 100644
--- a/tools/testing/selftests/bpf/progs/test_hash_large_key.c
+++ b/tools/testing/selftests/bpf/progs/test_hash_large_key.c
@@ -28,7 +28,7 @@ struct bigelement {
SEC("raw_tracepoint/sys_enter")
int bpf_hash_large_key_test(void *ctx)
{
- int zero = 0, err = 1, value = 42;
+ int zero = 0, value = 42;
struct bigelement *key;
key = bpf_map_lookup_elem(&key_map, &zero);
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
new file mode 100644
index 000000000000..5715c569ec03
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <time.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct timer {
+ struct bpf_timer t;
+};
+
+struct lock {
+ struct bpf_spin_lock l;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct timer);
+} timers SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct lock);
+} locks SEC(".maps");
+
+static int timer_cb(void *map, int *key, struct timer *timer)
+{
+ return 0;
+}
+
+static void timer_work(void)
+{
+ struct timer *timer;
+ const int key = 0;
+
+ timer = bpf_map_lookup_elem(&timers, &key);
+ if (timer) {
+ bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(&timer->t, timer_cb);
+ bpf_timer_start(&timer->t, 10E9, 0);
+ bpf_timer_cancel(&timer->t);
+ }
+}
+
+static void spin_lock_work(void)
+{
+ const int key = 0;
+ struct lock *lock;
+
+ lock = bpf_map_lookup_elem(&locks, &key);
+ if (lock) {
+ bpf_spin_lock(&lock->l);
+ bpf_spin_unlock(&lock->l);
+ }
+}
+
+SEC("?raw_tp/sys_enter")
+int raw_tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int tp_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?kprobe")
+int kprobe_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?perf_event")
+int perf_event_timer(void *ctx)
+{
+ timer_work();
+
+ return 0;
+}
+
+SEC("?raw_tp/sys_enter")
+int raw_tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int tp_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?kprobe")
+int kprobe_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+SEC("?perf_event")
+int perf_event_spin_lock(void *ctx)
+{
+ spin_lock_work();
+
+ return 0;
+}
+
+const char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_jhash.h b/tools/testing/selftests/bpf/progs/test_jhash.h
index c300734d26f6..ef53559bbbdf 100644
--- a/tools/testing/selftests/bpf/progs/test_jhash.h
+++ b/tools/testing/selftests/bpf/progs/test_jhash.h
@@ -69,3 +69,34 @@ u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
+
+static __always_inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = JHASH_INITVAL + (length<<2) + initval;
+
+ /* Handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /* Handle the last 3 u32's */
+ switch (length) {
+ case 3: c += k[2];
+ case 2: b += k[1];
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ break;
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kernel_flag.c b/tools/testing/selftests/bpf/progs/test_kernel_flag.c
new file mode 100644
index 000000000000..b45fab3be352
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kernel_flag.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2025 Microsoft Corporation
+ *
+ * Author: Blaise Boscaccy <bboscaccy@linux.microsoft.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_tid;
+
+SEC("lsm.s/bpf")
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ __u32 tid;
+
+ tid = bpf_get_current_pid_tgid() & 0xFFFFFFFF;
+ if (!kernel || tid != monitored_tid)
+ return 0;
+ else
+ return -EINVAL;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
new file mode 100644
index 000000000000..061befb004c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
+ struct bpf_dynptr *sig_ptr,
+ struct bpf_key *trusted_keyring) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+int err, pid;
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?lsm.s/bpf")
+__failure __msg("cannot pass in dynptr at an offset=-8")
+int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ unsigned long val;
+
+ return bpf_verify_pkcs7_signature((struct bpf_dynptr *)&val,
+ (struct bpf_dynptr *)&val, NULL);
+}
+
+SEC("?lsm.s/bpf")
+__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr")
+int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ unsigned long val = 0;
+
+ return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val,
+ (struct bpf_dynptr *)val, NULL);
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(dynptr_data_null, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct bpf_key *trusted_keyring;
+ struct bpf_dynptr ptr;
+ __u32 *value;
+ int ret, zero = 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ value = bpf_map_lookup_elem(&array_map, &zero);
+ if (!value)
+ return 0;
+
+ /* Pass invalid flags. */
+ ret = bpf_dynptr_from_mem(value, sizeof(*value), ((__u64)~0ULL), &ptr);
+ if (ret != -EINVAL)
+ return 0;
+
+ trusted_keyring = bpf_lookup_system_key(0);
+ if (!trusted_keyring)
+ return 0;
+
+ err = bpf_verify_pkcs7_signature(&ptr, &ptr, trusted_keyring);
+
+ bpf_key_put(trusted_keyring);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
new file mode 100644
index 000000000000..0ad1bf1ede8d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_kfuncs.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tc")
+int kfunc_dynptr_nullable_test1(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, NULL);
+
+ return 0;
+}
+
+SEC("tc")
+int kfunc_dynptr_nullable_test2(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(&data, &data);
+
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("expected pointer to stack or const struct bpf_dynptr")
+int kfunc_dynptr_nullable_test3(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_kfunc_dynptr_test(NULL, &data);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
new file mode 100644
index 000000000000..27109b877714
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+extern const int bpf_prog_active __ksym; /* int type global var. */
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ int *active;
+ __u32 cpu;
+
+ cpu = bpf_get_smp_processor_id();
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active) {
+ /* Kernel memory obtained from bpf_{per,this}_cpu_ptr
+ * is read-only, should _not_ pass verification.
+ */
+ /* WRITE_ONCE */
+ *(volatile int *)active = -1;
+ }
+
+ return 0;
+}
+
+__noinline int write_active(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ int *active;
+
+ active = bpf_this_cpu_ptr(&bpf_prog_active);
+ write_active(active);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_module.c b/tools/testing/selftests/bpf/progs/test_ksyms_module.c
new file mode 100644
index 000000000000..0650d918c096
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_module.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#define X_0(x)
+#define X_1(x) x X_0(x)
+#define X_2(x) x X_1(x)
+#define X_3(x) x X_2(x)
+#define X_4(x) x X_3(x)
+#define X_5(x) x X_4(x)
+#define X_6(x) x X_5(x)
+#define X_7(x) x X_6(x)
+#define X_8(x) x X_7(x)
+#define X_9(x) x X_8(x)
+#define X_10(x) x X_9(x)
+#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y)
+
+extern const int bpf_testmod_ksym_percpu __ksym;
+extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
+extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak;
+
+int out_bpf_testmod_ksym = 0;
+const volatile int x = 0;
+
+SEC("tc")
+int load(struct __sk_buff *skb)
+{
+ /* This will be kept by clang, but removed by verifier. Since it is
+ * marked as __weak, libbpf and gen_loader don't error out if BTF ID
+ * is not found for it, instead imm and off is set to 0 for it.
+ */
+ if (x)
+ bpf_testmod_invalid_mod_kfunc();
+ bpf_testmod_test_mod_kfunc(42);
+ out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
+ return 0;
+}
+
+SEC("tc")
+int load_256(struct __sk_buff *skb)
+{
+ /* this will fail if kfunc doesn't reuse its own btf fd index */
+ REPEAT_256(bpf_testmod_test_mod_kfunc(42););
+ bpf_testmod_test_mod_kfunc(42);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
new file mode 100644
index 000000000000..d00268c91e19
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test weak ksyms.
+ *
+ * Copyright (c) 2021 Google
+ */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+int out__existing_typed = -1;
+__u64 out__existing_typeless = -1;
+
+__u64 out__non_existent_typeless = -1;
+__u64 out__non_existent_typed = -1;
+
+/* existing weak symbols */
+
+/* test existing weak symbols can be resolved. */
+extern const struct rq runqueues __ksym __weak; /* typed */
+extern const void bpf_prog_active __ksym __weak; /* typeless */
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
+
+
+/* non-existent weak symbols. */
+
+/* typeless symbols, default to zero. */
+extern const void bpf_link_fops1 __ksym __weak;
+
+/* typed symbols, default to zero. */
+extern const int bpf_link_fops2 __ksym __weak;
+void invalid_kfunc(void) __ksym __weak;
+
+SEC("raw_tp/sys_enter")
+int pass_handler(const void *ctx)
+{
+ struct rq *rq;
+
+ /* tests existing symbols. */
+ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
+ if (rq && bpf_ksym_exists(&runqueues))
+ out__existing_typed = rq->cpu;
+ out__existing_typeless = (__u64)&bpf_prog_active;
+
+ /* tests non-existent symbols. */
+ out__non_existent_typeless = (__u64)&bpf_link_fops1;
+
+ /* tests non-existent symbols. */
+ out__non_existent_typed = (__u64)&bpf_link_fops2;
+
+ if (&bpf_link_fops2) /* can't happen */
+ out__non_existent_typed = (__u64)bpf_per_cpu_ptr(&bpf_link_fops2, 0);
+
+ if (!bpf_ksym_exists(bpf_task_acquire))
+ /* dead code won't be seen by the verifier */
+ bpf_task_acquire(0);
+
+ if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc))
+ /* dead code won't be seen by the verifier */
+ bpf_testmod_test_mod_kfunc(0);
+
+ if (bpf_ksym_exists(invalid_kfunc))
+ /* dead code won't be seen by the verifier */
+ invalid_kfunc();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 33493911d87a..c26057ec46dc 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
@@ -21,8 +21,6 @@
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
@@ -450,7 +448,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index b9e2753f4f91..c8bc0c6947aa 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -218,7 +218,7 @@ static __noinline bool get_packet_dst(struct real_definition **real,
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
@@ -447,7 +447,7 @@ static __noinline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c
new file mode 100644
index 000000000000..f997f5080748
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_helpers.h>
+#include "test_iptunnel_common.h"
+#include <bpf/bpf_endian.h>
+
+#include "bpf_kfuncs.h"
+
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+typedef unsigned int u32;
+
+static __noinline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(u32 *)(k);
+ b += *(u32 *)(k + 4);
+ c += *(u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+ __jhash_final(a, b, c);
+ return c;
+}
+
+static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+ union {
+ __be32 src;
+ __be32 srcv6[4];
+ };
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ union {
+ __u32 ports;
+ __u16 port16[2];
+ };
+ __u8 proto;
+ __u8 flags;
+};
+
+struct ctl_value {
+ union {
+ __u64 value;
+ __u32 ifindex;
+ __u8 mac[6];
+ };
+};
+
+struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+};
+
+struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+};
+
+struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+};
+
+struct eth_hdr {
+ unsigned char eth_dest[ETH_ALEN];
+ unsigned char eth_source[ETH_ALEN];
+ unsigned short eth_proto;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, MAX_VIPS);
+ __type(key, struct vip);
+ __type(value, struct vip_meta);
+} vip_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, CH_RINGS_SIZE);
+ __type(key, __u32);
+ __type(value, __u32);
+} ch_rings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, MAX_REALS);
+ __type(key, __u32);
+ __type(value, struct real_definition);
+} reals SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, MAX_VIPS);
+ __type(key, __u32);
+ __type(value, struct vip_stats);
+} stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, CTL_MAP_SIZE);
+ __type(key, __u32);
+ __type(value, struct ctl_value);
+} ctl_array SEC(".maps");
+
+static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
+{
+ if (ipv6)
+ return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+ pckt->ports, CH_RINGS_SIZE);
+ else
+ return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static __noinline bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
+{
+ __u32 hash = get_packet_hash(pckt, is_ipv6);
+ __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+ __u32 *real_pos;
+
+ if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
+ hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+ return false;
+
+ real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+ if (!real_pos)
+ return false;
+ key = *real_pos;
+ *real = bpf_map_lookup_elem(&reals, &key);
+ if (!(*real))
+ return false;
+ return true;
+}
+
+static __noinline int parse_icmpv6(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct ipv6hdr)] = {};
+ struct icmp6hdr *icmp_hdr;
+ struct ipv6hdr *ip6h;
+
+ icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!icmp_hdr)
+ return TC_ACT_SHOT;
+
+ if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+ return TC_ACT_OK;
+ off += sizeof(struct icmp6hdr);
+ ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!ip6h)
+ return TC_ACT_SHOT;
+ pckt->proto = ip6h->nexthdr;
+ pckt->flags |= F_ICMP;
+ memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+ memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+ return TC_ACT_UNSPEC;
+}
+
+static __noinline int parse_icmp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer_icmp[sizeof(struct iphdr)] = {};
+ __u8 buffer_ip[sizeof(struct iphdr)] = {};
+ struct icmphdr *icmp_hdr;
+ struct iphdr *iph;
+
+ icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer_icmp, sizeof(buffer_icmp));
+ if (!icmp_hdr)
+ return TC_ACT_SHOT;
+ if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+ icmp_hdr->code != ICMP_FRAG_NEEDED)
+ return TC_ACT_OK;
+ off += sizeof(struct icmphdr);
+ iph = bpf_dynptr_slice(skb_ptr, off, buffer_ip, sizeof(buffer_ip));
+ if (!iph || iph->ihl != 5)
+ return TC_ACT_SHOT;
+ pckt->proto = iph->protocol;
+ pckt->flags |= F_ICMP;
+ pckt->src = iph->daddr;
+ pckt->dst = iph->saddr;
+ return TC_ACT_UNSPEC;
+}
+
+static __noinline bool parse_udp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct udphdr)] = {};
+ struct udphdr *udp;
+
+ udp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!udp)
+ return false;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = udp->source;
+ pckt->port16[1] = udp->dest;
+ } else {
+ pckt->port16[0] = udp->dest;
+ pckt->port16[1] = udp->source;
+ }
+ return true;
+}
+
+static __noinline bool parse_tcp(struct bpf_dynptr *skb_ptr, __u64 off,
+ struct packet_description *pckt)
+{
+ __u8 buffer[sizeof(struct tcphdr)] = {};
+ struct tcphdr *tcp;
+
+ tcp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!tcp)
+ return false;
+
+ if (tcp->syn)
+ pckt->flags |= F_SYN_SET;
+
+ if (!(pckt->flags & F_ICMP)) {
+ pckt->port16[0] = tcp->source;
+ pckt->port16[1] = tcp->dest;
+ } else {
+ pckt->port16[0] = tcp->dest;
+ pckt->port16[1] = tcp->source;
+ }
+ return true;
+}
+
+static __noinline int process_packet(struct bpf_dynptr *skb_ptr,
+ struct eth_hdr *eth, __u64 off,
+ bool is_ipv6, struct __sk_buff *skb)
+{
+ struct packet_description pckt = {};
+ struct bpf_tunnel_key tkey = {};
+ struct vip_stats *data_stats;
+ struct real_definition *dst;
+ struct vip_meta *vip_info;
+ struct ctl_value *cval;
+ __u32 v4_intf_pos = 1;
+ __u32 v6_intf_pos = 2;
+ struct ipv6hdr *ip6h;
+ struct vip vip = {};
+ struct iphdr *iph;
+ int tun_flag = 0;
+ __u16 pkt_bytes;
+ __u64 iph_len;
+ __u32 ifindex;
+ __u8 protocol;
+ __u32 vip_num;
+ int action;
+
+ tkey.tunnel_ttl = 64;
+ if (is_ipv6) {
+ __u8 buffer[sizeof(struct ipv6hdr)] = {};
+
+ ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!ip6h)
+ return TC_ACT_SHOT;
+
+ iph_len = sizeof(struct ipv6hdr);
+ protocol = ip6h->nexthdr;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(ip6h->payload_len);
+ off += iph_len;
+ if (protocol == IPPROTO_FRAGMENT) {
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_ICMPV6) {
+ action = parse_icmpv6(skb_ptr, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV6_PLUS_ICMP_HDR;
+ } else {
+ memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+ memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+ }
+ } else {
+ __u8 buffer[sizeof(struct iphdr)] = {};
+
+ iph = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer));
+ if (!iph || iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ protocol = iph->protocol;
+ pckt.proto = protocol;
+ pkt_bytes = bpf_ntohs(iph->tot_len);
+ off += IPV4_HDR_LEN_NO_OPT;
+
+ if (iph->frag_off & PCKT_FRAGMENTED)
+ return TC_ACT_SHOT;
+ if (protocol == IPPROTO_ICMP) {
+ action = parse_icmp(skb_ptr, off, &pckt);
+ if (action >= 0)
+ return action;
+ off += IPV4_PLUS_ICMP_HDR;
+ } else {
+ pckt.src = iph->saddr;
+ pckt.dst = iph->daddr;
+ }
+ }
+ protocol = pckt.proto;
+
+ if (protocol == IPPROTO_TCP) {
+ if (!parse_tcp(skb_ptr, off, &pckt))
+ return TC_ACT_SHOT;
+ } else if (protocol == IPPROTO_UDP) {
+ if (!parse_udp(skb_ptr, off, &pckt))
+ return TC_ACT_SHOT;
+ } else {
+ return TC_ACT_SHOT;
+ }
+
+ if (is_ipv6)
+ memcpy(vip.daddr.v6, pckt.dstv6, 16);
+ else
+ vip.daddr.v4 = pckt.dst;
+
+ vip.dport = pckt.port16[1];
+ vip.protocol = pckt.proto;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info) {
+ vip.dport = 0;
+ vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+ if (!vip_info)
+ return TC_ACT_SHOT;
+ pckt.port16[1] = 0;
+ }
+
+ if (vip_info->flags & F_HASH_NO_SRC_PORT)
+ pckt.port16[0] = 0;
+
+ if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+ return TC_ACT_SHOT;
+
+ if (dst->flags & F_IPV6) {
+ cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+ tun_flag = BPF_F_TUNINFO_IPV6;
+ } else {
+ cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+ if (!cval)
+ return TC_ACT_SHOT;
+ ifindex = cval->ifindex;
+ tkey.remote_ipv4 = dst->dst;
+ }
+ vip_num = vip_info->vip_num;
+ data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+ if (!data_stats)
+ return TC_ACT_SHOT;
+ data_stats->pkts++;
+ data_stats->bytes += pkt_bytes;
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+ *(u32 *)eth->eth_dest = tkey.remote_ipv4;
+ return bpf_redirect(ifindex, 0);
+}
+
+SEC("tc")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ __u8 buffer[sizeof(struct eth_hdr)] = {};
+ struct bpf_dynptr ptr;
+ struct eth_hdr *eth;
+ __u32 eth_proto;
+ __u32 nh_off;
+ int err;
+
+ nh_off = sizeof(struct eth_hdr);
+
+ bpf_dynptr_from_skb(ctx, 0, &ptr);
+ eth = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
+ if (!eth)
+ return TC_ACT_SHOT;
+ eth_proto = eth->eth_proto;
+ if (eth_proto == bpf_htons(ETH_P_IP))
+ err = process_packet(&ptr, eth, nh_off, false, ctx);
+ else if (eth_proto == bpf_htons(ETH_P_IPV6))
+ err = process_packet(&ptr, eth, nh_off, true, ctx);
+ else
+ return TC_ACT_SHOT;
+
+ if (eth == buffer)
+ bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
+
+ return err;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
new file mode 100644
index 000000000000..2a2a942737d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+ defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
+ __clang_major__ >= 18
+const volatile int skip = 0;
+#else
+const volatile int skip = 1;
+#endif
+
+volatile const short val1 = -1;
+volatile const int val2 = -1;
+short val3 = -1;
+int val4 = -1;
+int done1, done2, ret1, ret2;
+
+SEC("?raw_tp/sys_enter")
+int rdonly_map_prog(const void *ctx)
+{
+ if (done1)
+ return 0;
+
+ done1 = 1;
+ /* val1/val2 readonly map */
+ if (val1 == val2)
+ ret1 = 1;
+ return 0;
+
+}
+
+SEC("?raw_tp/sys_enter")
+int map_val_prog(const void *ctx)
+{
+ if (done2)
+ return 0;
+
+ done2 = 1;
+ /* val1/val2 regular read/write map */
+ if (val3 == val4)
+ ret2 = 1;
+ return 0;
+
+}
+
+struct bpf_testmod_struct_arg_1 {
+ int a;
+};
+
+long long int_member;
+
+SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
+int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
+{
+ /* probed memory access */
+ int_member = p->a;
+ return 0;
+}
+
+long long set_optlen, set_retval;
+
+SEC("?cgroup/getsockopt")
+int _getsockopt(volatile struct bpf_sockopt *ctx)
+{
+ int old_optlen, old_retval;
+
+ old_optlen = ctx->optlen;
+ old_retval = ctx->retval;
+
+ ctx->optlen = -1;
+ ctx->retval = -1;
+
+ /* sign extension for ctx member */
+ set_optlen = ctx->optlen;
+ set_retval = ctx->retval;
+
+ ctx->optlen = old_optlen;
+ ctx->retval = old_retval;
+
+ return 0;
+}
+
+long long set_mark;
+
+SEC("?tc")
+int _tc(volatile struct __sk_buff *skb)
+{
+ long long tmp_mark;
+ int old_mark;
+
+ old_mark = skb->mark;
+
+ skb->mark = 0xf6fe;
+
+ /* narrowed sign extension for ctx member */
+#if __clang_major__ >= 18
+ /* force narrow one-byte signed load. Otherwise, compiler may
+ * generate a 32-bit unsigned load followed by an s8 movsx.
+ */
+ asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
+ "%[tmp_mark] = r1"
+ : [tmp_mark]"=r"(tmp_mark)
+ : [ctx]"r"(skb),
+ [off_mark]"i"(offsetof(struct __sk_buff, mark)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ + sizeof(skb->mark) - 1
+#endif
+ )
+ : "r1");
+#else
+ tmp_mark = (char)skb->mark;
+#endif
+ set_mark = tmp_mark;
+
+ skb->mark = old_mark;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c
new file mode 100644
index 000000000000..42718cd8e6a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#define BPF_NO_GLOBAL_DATA
+#include <bpf/bpf_helpers.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} my_pid_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} res_map SEC(".maps");
+
+volatile int my_pid_var = 0;
+volatile int res_var = 0;
+
+SEC("tp/raw_syscalls/sys_enter")
+int handle_legacy(void *ctx)
+{
+ int zero = 0, *my_pid, cur_pid, *my_res;
+
+ my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
+ if (!my_pid)
+ return 1;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != *my_pid)
+ return 1;
+
+ my_res = bpf_map_lookup_elem(&res_map, &zero);
+ if (!my_res)
+ return 1;
+
+ if (*my_res == 0)
+ /* use bpf_printk() in combination with BPF_NO_GLOBAL_DATA to
+ * force .rodata.str1.1 section that previously caused
+ * problems on old kernels due to libbpf always tried to
+ * create a global data map for it
+ */
+ bpf_printk("Legacy-case bpf_printk test, pid %d\n", cur_pid);
+ *my_res = 1;
+
+ return *my_res;
+}
+
+SEC("tp/raw_syscalls/sys_enter")
+int handle_modern(void *ctx)
+{
+ int cur_pid;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != my_pid_var)
+ return 1;
+
+ if (res_var == 0)
+ /* we need bpf_printk() to validate libbpf logic around unused
+ * global maps and legacy kernels; see comment in handle_legacy()
+ */
+ bpf_printk("Modern-case bpf_printk test, pid %d\n", cur_pid);
+ res_var = 1;
+
+ return res_var;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
new file mode 100644
index 000000000000..568816307f71
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} data_input SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm/bpf_map")
+int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
+{
+ if (map != (struct bpf_map *)&data_input)
+ return 0;
+
+ if (fmode & FMODE_WRITE)
+ return -EACCES;
+ barrier();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_log_buf.c b/tools/testing/selftests/bpf/progs/test_log_buf.c
new file mode 100644
index 000000000000..199f459bd5ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_buf.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int a[4];
+const volatile int off = 4000;
+
+SEC("raw_tp/sys_enter")
+int good_prog(const void *ctx)
+{
+ a[0] = (int)(long)ctx;
+ return a[1];
+}
+
+SEC("raw_tp/sys_enter")
+int bad_prog(const void *ctx)
+{
+ /* out of bounds access */
+ return a[off];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c
new file mode 100644
index 000000000000..1bd48feaaa42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct___bad {
+ int pid;
+ int fake_field;
+ void *fake_field_subprog;
+} __attribute__((preserve_access_index));
+
+SEC("?raw_tp/sys_enter")
+int bad_relo(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bpf_core_field_size(t->fake_field);
+}
+
+static __noinline int bad_subprog(void)
+{
+ static struct task_struct___bad *t;
+
+ /* ugliness below is a field offset relocation */
+ return (void *)&t->fake_field_subprog - (void *)t;
+}
+
+SEC("?raw_tp/sys_enter")
+int bad_relo_subprog(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bad_subprog() + bpf_core_field_size(t->pid);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} existing_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} missing_map SEC(".maps");
+
+SEC("?raw_tp/sys_enter")
+int use_missing_map(const void *ctx)
+{
+ int zero = 0, *value;
+
+ value = bpf_map_lookup_elem(&existing_map, &zero);
+
+ value = bpf_map_lookup_elem(&missing_map, &zero);
+
+ return value != NULL;
+}
+
+extern int bpf_nonexistent_kfunc(void) __ksym __weak;
+
+SEC("?raw_tp/sys_enter")
+int use_missing_kfunc(const void *ctx)
+{
+ bpf_nonexistent_kfunc();
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
new file mode 100644
index 000000000000..3a193f42c7e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lookup_and_delete.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u32 set_pid = 0;
+__u64 set_key = 0;
+__u64 set_value = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 2);
+ __type(key, __u64);
+ __type(value, __u64);
+} hash_map SEC(".maps");
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int bpf_lookup_and_delete_test(const void *ctx)
+{
+ if (set_pid == bpf_get_current_pid_tgid() >> 32)
+ bpf_map_update_elem(&hash_map, &set_key, &set_value, BPF_NOEXIST);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c
new file mode 100644
index 000000000000..1f7e1e59b073
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+__s32 key_serial;
+__u32 key_id;
+__u64 flags;
+
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
+extern void bpf_key_put(struct bpf_key *key) __ksym;
+
+SEC("lsm.s/bpf")
+int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct bpf_key *bkey;
+ __u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ if (key_serial)
+ bkey = bpf_lookup_user_key(key_serial, flags);
+ else
+ bkey = bpf_lookup_system_key(key_id);
+
+ if (!bkey)
+ return -ENOENT;
+
+ bpf_key_put(bkey);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_redirect.c b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
new file mode 100644
index 000000000000..83439b87b766
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+
+/* We don't care about whether the packet can be received by network stack.
+ * Just care if the packet is sent to the correct device at correct direction
+ * and not panic the kernel.
+ */
+static int prepend_dummy_mac(struct __sk_buff *skb)
+{
+ char mac[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf,
+ 0xe, 0xd, 0xc, 0xb, 0xa, 0x08, 0x00};
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0))
+ return -1;
+
+ if (bpf_skb_store_bytes(skb, 0, mac, sizeof(mac), 0))
+ return -1;
+
+ return 0;
+}
+
+/* Use the last byte of IP address to redirect the packet */
+static int get_redirect_target(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ if (start + sizeof(*iph) > end)
+ return -1;
+
+ iph = (struct iphdr *)start;
+ return bpf_ntohl(iph->daddr) & 0xff;
+}
+
+SEC("redir_ingress")
+int test_lwt_redirect_in(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+SEC("redir_egress")
+int test_lwt_redirect_out(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_egress_nomac")
+int test_lwt_redirect_out_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_ingress_nomac")
+int test_lwt_redirect_in_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_reroute.c b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
new file mode 100644
index 000000000000..1dc64351929c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+/* This function extracts the last byte of the daddr, and uses it
+ * as output dev index.
+ */
+SEC("lwt_xmit")
+int test_lwt_reroute(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ /* set mark at most once */
+ if (skb->mark != 0)
+ return BPF_OK;
+
+ if (start + sizeof(*iph) > end)
+ return BPF_DROP;
+
+ iph = (struct iphdr *)start;
+ skb->mark = bpf_ntohl(iph->daddr) & 0xff;
+
+ /* do not reroute x.x.x.0 packets */
+ if (skb->mark == 0)
+ return BPF_OK;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 48ff2b2ad5e7..fed66f36adb6 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 10; i++) {
struct sr6_tlv_t tlv;
@@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb)
seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh));
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (unsigned long long lo = 0; lo < 4; lo++) {
seg->lo = bpf_cpu_to_be64(4 - lo);
seg->hi = bpf_cpu_to_be64(hi);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index 1cfeb940cf9f..b295f9b721bf 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -9,21 +9,45 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(__u32));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} mim_array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__uint(map_flags, 0);
- __uint(key_size, sizeof(int));
- /* must be sizeof(__u32) for map in map */
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} mim_hash SEC(".maps");
-SEC("xdp_mimtest")
+/* The following three maps are used to test
+ * perf_event_array map can be an inner
+ * map of hash/array_of_maps.
+ */
+struct perf_event_array {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} inner_map0 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_array_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_hash_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
@@ -49,5 +73,4 @@ int xdp_mimtest0(struct xdp_md *ctx)
return XDP_PASS;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
new file mode 100644
index 000000000000..9c7d75cf0bd6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Isovalent, Inc. */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, int);
+ __uint(max_entries, 4);
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 0); /* This will make map creation to fail */
+ __type(key, __u32);
+ __array(values, struct inner);
+} mim SEC(".maps");
+
+SEC("xdp")
+int xdp_noop0(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
index b5c07ae7b68f..1c02511b73cd 100644
--- a/tools/testing/selftests/bpf/progs/test_map_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -30,10 +30,10 @@ struct {
__type(value, struct array_elem);
} array_map SEC(".maps");
-SEC("map_lock_demo")
+SEC("cgroup/skb")
int bpf_map_lock_test(struct __sk_buff *skb)
{
- struct hmap_elem zero = {}, *val;
+ struct hmap_elem *val;
int rnd = bpf_get_prandom_u32();
int key = 0, err = 1, i;
struct array_elem *q;
diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
new file mode 100644
index 000000000000..ca827b1092da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u64 percpu_array_elem_sum = 0;
+__u64 percpu_hash_elem_sum = 0;
+__u64 percpu_lru_hash_elem_sum = 0;
+const volatile int nr_cpus;
+const volatile int my_pid;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_lru_hash_map SEC(".maps");
+
+struct read_percpu_elem_ctx {
+ void *map;
+ __u64 sum;
+};
+
+static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
+{
+ __u64 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
+ if (value)
+ ctx->sum += *value;
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int sysenter_getuid(const void *ctx)
+{
+ struct read_percpu_elem_ctx map_ctx;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ map_ctx.map = &percpu_array_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_array_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_hash_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_lru_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_lru_hash_elem_sum = map_ctx.sum;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_ops.c b/tools/testing/selftests/bpf/progs/test_map_ops.c
new file mode 100644
index 000000000000..b53b46a090c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_ops.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK);
+ __uint(max_entries, 1);
+ __type(value, int);
+} stack_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} array_map SEC(".maps");
+
+const volatile pid_t pid;
+long err = 0;
+
+static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags)
+{
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpid")
+int map_update(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getppid")
+int map_delete(void *ctx)
+{
+ const int key = 0;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_delete_elem(&hash_map, &key);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int map_push(void *ctx)
+{
+ const int val = 1;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_push_elem(&stack_map, &val, 0);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_geteuid")
+int map_pop(void *ctx)
+{
+ int val;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_pop_elem(&stack_map, &val);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getgid")
+int map_peek(void *ctx)
+{
+ int val;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ err = bpf_map_peek_elem(&stack_map, &val);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_gettid")
+int map_for_each_pass(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+ const u64 flags = 0;
+ int callback_ctx;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, flags);
+
+ err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getpgid")
+int map_for_each_fail(void *ctx)
+{
+ const int key = 0;
+ const int val = 1;
+ const u64 flags = BPF_NOEXIST;
+ int callback_ctx;
+
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, flags);
+
+ /* calling for_each with non-zero flags will return error */
+ err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
new file mode 100644
index 000000000000..27df571abf5b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_migrate_reuseport.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check if we can migrate child sockets.
+ *
+ * 1. If reuse_md->migrating_sk is NULL (SYN packet),
+ * return SK_PASS without selecting a listener.
+ * 2. If reuse_md->migrating_sk is not NULL (socket migration),
+ * select a listener (reuseport_map[migrate_map[cookie]])
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 256);
+ __type(key, int);
+ __type(value, __u64);
+} reuseport_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 256);
+ __type(key, __u64);
+ __type(value, int);
+} migrate_map SEC(".maps");
+
+int migrated_at_close = 0;
+int migrated_at_close_fastopen = 0;
+int migrated_at_send_synack = 0;
+int migrated_at_recv_ack = 0;
+__be16 server_port;
+
+SEC("xdp")
+int drop_ack(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ struct tcphdr *tcp = NULL;
+
+ if (eth + 1 > data_end)
+ goto pass;
+
+ switch (bpf_ntohs(eth->h_proto)) {
+ case ETH_P_IP: {
+ struct iphdr *ip = (struct iphdr *)(eth + 1);
+
+ if (ip + 1 > data_end)
+ goto pass;
+
+ if (ip->protocol != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)((void *)ip + ip->ihl * 4);
+ break;
+ }
+ case ETH_P_IPV6: {
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(eth + 1);
+
+ if (ipv6 + 1 > data_end)
+ goto pass;
+
+ if (ipv6->nexthdr != IPPROTO_TCP)
+ goto pass;
+
+ tcp = (struct tcphdr *)(ipv6 + 1);
+ break;
+ }
+ default:
+ goto pass;
+ }
+
+ if (tcp + 1 > data_end)
+ goto pass;
+
+ if (tcp->dest != server_port)
+ goto pass;
+
+ if (!tcp->syn && tcp->ack)
+ return XDP_DROP;
+
+pass:
+ return XDP_PASS;
+}
+
+SEC("sk_reuseport/migrate")
+int migrate_reuseport(struct sk_reuseport_md *reuse_md)
+{
+ int *key, flags = 0, state, err;
+ __u64 cookie;
+
+ if (!reuse_md->migrating_sk)
+ return SK_PASS;
+
+ state = reuse_md->migrating_sk->state;
+ cookie = bpf_get_socket_cookie(reuse_md->sk);
+
+ key = bpf_map_lookup_elem(&migrate_map, &cookie);
+ if (!key)
+ return SK_DROP;
+
+ err = bpf_sk_select_reuseport(reuse_md, &reuseport_map, key, flags);
+ if (err)
+ return SK_PASS;
+
+ switch (state) {
+ case BPF_TCP_ESTABLISHED:
+ __sync_fetch_and_add(&migrated_at_close, 1);
+ break;
+ case BPF_TCP_SYN_RECV:
+ __sync_fetch_and_add(&migrated_at_close_fastopen, 1);
+ break;
+ case BPF_TCP_NEW_SYN_RECV:
+ if (!reuse_md->len)
+ __sync_fetch_and_add(&migrated_at_send_synack, 1);
+ else
+ __sync_fetch_and_add(&migrated_at_recv_ack, 1);
+ break;
+ }
+
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
index 6077a025092c..d487153a839d 100644
--- a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -27,6 +27,7 @@ unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
+unsigned int nr_hwtstamp = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
@@ -146,6 +147,9 @@ static int check_active_hdr_in(struct bpf_sock_ops *skops)
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
+ if (skops->skb_hwtstamp)
+ nr_hwtstamp++;
+
return CG_OK;
}
@@ -293,7 +297,7 @@ static int handle_passive_estab(struct bpf_sock_ops *skops)
return check_active_hdr_in(skops);
}
-SEC("sockops/misc_estab")
+SEC("sockops")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c
index 4eb42cff5fe9..5a5cc19a15bf 100644
--- a/tools/testing/selftests/bpf/progs/test_mmap.c
+++ b/tools/testing/selftests/bpf/progs/test_mmap.c
@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 4096);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32);
__type(value, char);
@@ -17,7 +16,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 512 * 4); /* at least 4 pages of data */
__uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32);
__type(value, __u64);
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index efd1e287ac17..03d7f89787a1 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#include "../bpf_testmod/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod.h"
__u32 raw_tp_read_sz = 0;
@@ -17,6 +17,30 @@ int BPF_PROG(handle_raw_tp,
return 0;
}
+__u32 raw_tp_bare_write_sz = 0;
+
+SEC("raw_tp/bpf_testmod_test_write_bare_tp")
+int BPF_PROG(handle_raw_tp_bare,
+ struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
+{
+ raw_tp_bare_write_sz = BPF_CORE_READ(write_ctx, len);
+ return 0;
+}
+
+int raw_tp_writable_bare_in_val = 0;
+int raw_tp_writable_bare_early_ret = 0;
+int raw_tp_writable_bare_out_val = 0;
+
+SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp")
+int BPF_PROG(handle_raw_tp_writable_bare,
+ struct bpf_testmod_test_writable_ctx *writable)
+{
+ raw_tp_writable_bare_in_val = writable->val;
+ writable->early_ret = raw_tp_writable_bare_early_ret;
+ writable->val = raw_tp_writable_bare_out_val;
+ return 0;
+}
+
__u32 tp_btf_read_sz = 0;
SEC("tp_btf/bpf_testmod_test_read")
@@ -40,7 +64,7 @@ int BPF_PROG(handle_fentry,
__u32 fentry_manual_read_sz = 0;
-SEC("fentry/placeholder")
+SEC("fentry")
int BPF_PROG(handle_fentry_manual,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
@@ -49,6 +73,29 @@ int BPF_PROG(handle_fentry_manual,
return 0;
}
+__u32 fentry_explicit_read_sz = 0;
+
+SEC("fentry/bpf_testmod:bpf_testmod_test_read")
+int BPF_PROG(handle_fentry_explicit,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_explicit_read_sz = len;
+ return 0;
+}
+
+
+__u32 fentry_explicit_manual_read_sz = 0;
+
+SEC("fentry")
+int BPF_PROG(handle_fentry_explicit_manual,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ fentry_explicit_manual_read_sz = len;
+ return 0;
+}
+
__u32 fexit_read_sz = 0;
int fexit_ret = 0;
@@ -63,6 +110,18 @@ int BPF_PROG(handle_fexit,
return 0;
}
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+ long buf = 0;
+
+ bpf_probe_read_kernel(&buf, 8, ret);
+ bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ return 0;
+}
+
__u32 fmod_ret_read_sz = 0;
SEC("fmod_ret/bpf_testmod_test_read")
@@ -74,4 +133,10 @@ int BPF_PROG(handle_fmod_ret,
return 0; /* don't override the exit code */
}
+SEC("kprobe.multi/bpf_testmod_test_read")
+int BPF_PROG(kprobe_multi)
+{
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c b/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c
new file mode 100644
index 000000000000..03a475160abe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_netfilter_link_attach.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#define NF_ACCEPT 1
+
+SEC("netfilter")
+int nf_link_attach_test(struct bpf_nf_ctx *ctx)
+{
+ return NF_ACCEPT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
index 1dca70a6de2f..386315afad65 100644
--- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
@@ -5,33 +5,48 @@
#include <stdint.h>
#include <bpf/bpf_helpers.h>
-static volatile struct {
- __u64 dev;
- __u64 ino;
- __u64 pid_tgid;
- __u64 user_pid_tgid;
-} res;
-
-SEC("raw_tracepoint/sys_enter")
-int trace(void *ctx)
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u32);
+} sock_map SEC(".maps");
+
+__u64 user_pid = 0;
+__u64 user_tgid = 0;
+__u64 dev = 0;
+__u64 ino = 0;
+
+static void get_pid_tgid(void)
{
- __u64 ns_pid_tgid, expected_pid;
struct bpf_pidns_info nsdata;
- __u32 key = 0;
- if (bpf_get_ns_current_pid_tgid(res.dev, res.ino, &nsdata,
- sizeof(struct bpf_pidns_info)))
- return 0;
+ if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info)))
+ return;
- ns_pid_tgid = (__u64)nsdata.tgid << 32 | nsdata.pid;
- expected_pid = res.user_pid_tgid;
+ user_pid = nsdata.pid;
+ user_tgid = nsdata.tgid;
+}
- if (expected_pid != ns_pid_tgid)
- return 0;
+SEC("?tracepoint/syscalls/sys_enter_nanosleep")
+int tp_handler(const void *ctx)
+{
+ get_pid_tgid();
+ return 0;
+}
- res.pid_tgid = ns_pid_tgid;
+SEC("?cgroup/bind4")
+int cgroup_bind4(struct bpf_sock_addr *ctx)
+{
+ get_pid_tgid();
+ return 1;
+}
- return 0;
+SEC("?sk_msg")
+int sk_msg(struct sk_msg_md *msg)
+{
+ get_pid_tgid();
+ return SK_PASS;
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index ded71b3ff6b4..2850ae788a91 100644
--- a/tools/testing/selftests/bpf/progs/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
@@ -4,6 +4,7 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -19,6 +20,7 @@ int test_obj_id(void *ctx)
__u64 *value;
value = bpf_map_lookup_elem(&test_map_id, &key);
+ __sink(value);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
index abb7344b531f..5edf3cdc213d 100644
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-#include <stdbool.h>
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c
new file mode 100644
index 000000000000..d9b2ba7ac340
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* This parsing logic is taken from the open source library katran, a layer 4
+ * load balancer.
+ *
+ * This code logic using dynptrs can be found in test_parse_tcp_hdr_opt_dynptr.c
+ *
+ * https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/tcp.h>
+#include <stdbool.h>
+#include <linux/ipv6.h>
+#include <linux/if_ether.h>
+#include "test_tcp_hdr_options.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Kind number used for experiments */
+const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
+/* Length of the tcp header option */
+const __u32 tcp_hdr_opt_len_tpr = 6;
+/* maximum number of header options to check to lookup server_id */
+const __u32 tcp_hdr_opt_max_opt_checks = 15;
+
+__u32 server_id;
+
+struct hdr_opt_state {
+ __u32 server_id;
+ __u8 byte_offset;
+ __u8 hdr_bytes_remaining;
+};
+
+static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state)
+{
+ const void *data = (void *)(long)xdp->data;
+ const void *data_end = (void *)(long)xdp->data_end;
+ __u8 *tcp_opt, kind, hdr_len;
+
+ tcp_opt = (__u8 *)(data + state->byte_offset);
+ if (tcp_opt + 1 > data_end)
+ return -1;
+
+ kind = tcp_opt[0];
+
+ if (kind == TCPOPT_EOL)
+ return -1;
+
+ if (kind == TCPOPT_NOP) {
+ state->hdr_bytes_remaining--;
+ state->byte_offset++;
+ return 0;
+ }
+
+ if (state->hdr_bytes_remaining < 2 ||
+ tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end)
+ return -1;
+
+ hdr_len = tcp_opt[1];
+ if (hdr_len > state->hdr_bytes_remaining)
+ return -1;
+
+ if (kind == tcp_hdr_opt_kind_tpr) {
+ if (hdr_len != tcp_hdr_opt_len_tpr)
+ return -1;
+
+ if (tcp_opt + tcp_hdr_opt_len_tpr > data_end)
+ return -1;
+
+ state->server_id = *(__u32 *)&tcp_opt[2];
+ return 1;
+ }
+
+ state->hdr_bytes_remaining -= hdr_len;
+ state->byte_offset += hdr_len;
+ return 0;
+}
+
+SEC("xdp")
+int xdp_ingress_v6(struct xdp_md *xdp)
+{
+ const void *data = (void *)(long)xdp->data;
+ const void *data_end = (void *)(long)xdp->data_end;
+ struct hdr_opt_state opt_state = {};
+ __u8 tcp_hdr_opt_len = 0;
+ struct tcphdr *tcp_hdr;
+ __u64 tcp_offset = 0;
+ int err;
+
+ tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+ tcp_hdr = (struct tcphdr *)(data + tcp_offset);
+ if (tcp_hdr + 1 > data_end)
+ return XDP_DROP;
+
+ tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
+ if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
+ return XDP_DROP;
+
+ opt_state.hdr_bytes_remaining = tcp_hdr_opt_len;
+ opt_state.byte_offset = sizeof(struct tcphdr) + tcp_offset;
+
+ /* max number of bytes of options in tcp header is 40 bytes */
+ for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
+ err = parse_hdr_opt(xdp, &opt_state);
+
+ if (err || !opt_state.hdr_bytes_remaining)
+ break;
+ }
+
+ if (!opt_state.server_id)
+ return XDP_DROP;
+
+ server_id = opt_state.server_id;
+
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c
new file mode 100644
index 000000000000..dc6e43bc6a62
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* This logic is lifted from a real-world use case of packet parsing, used in
+ * the open source library katran, a layer 4 load balancer.
+ *
+ * This test demonstrates how to parse packet contents using dynptrs. The
+ * original code (parsing without dynptrs) can be found in test_parse_tcp_hdr_opt.c
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/tcp.h>
+#include <stdbool.h>
+#include <linux/ipv6.h>
+#include <linux/if_ether.h>
+#include "test_tcp_hdr_options.h"
+#include "bpf_kfuncs.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Kind number used for experiments */
+const __u32 tcp_hdr_opt_kind_tpr = 0xFD;
+/* Length of the tcp header option */
+const __u32 tcp_hdr_opt_len_tpr = 6;
+/* maximum number of header options to check to lookup server_id */
+const __u32 tcp_hdr_opt_max_opt_checks = 15;
+
+__u32 server_id;
+
+static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining,
+ __u32 *server_id)
+{
+ __u8 kind, hdr_len;
+ __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)];
+ __u8 *data;
+
+ __builtin_memset(buffer, 0, sizeof(buffer));
+
+ data = bpf_dynptr_slice(ptr, *off, buffer, sizeof(buffer));
+ if (!data)
+ return -1;
+
+ kind = data[0];
+
+ if (kind == TCPOPT_EOL)
+ return -1;
+
+ if (kind == TCPOPT_NOP) {
+ *off += 1;
+ *hdr_bytes_remaining -= 1;
+ return 0;
+ }
+
+ if (*hdr_bytes_remaining < 2)
+ return -1;
+
+ hdr_len = data[1];
+ if (hdr_len > *hdr_bytes_remaining)
+ return -1;
+
+ if (kind == tcp_hdr_opt_kind_tpr) {
+ if (hdr_len != tcp_hdr_opt_len_tpr)
+ return -1;
+
+ __builtin_memcpy(server_id, (__u32 *)(data + 2), sizeof(*server_id));
+ return 1;
+ }
+
+ *off += hdr_len;
+ *hdr_bytes_remaining -= hdr_len;
+ return 0;
+}
+
+SEC("xdp")
+int xdp_ingress_v6(struct xdp_md *xdp)
+{
+ __u8 buffer[sizeof(struct tcphdr)] = {};
+ __u8 hdr_bytes_remaining;
+ struct tcphdr *tcp_hdr;
+ __u8 tcp_hdr_opt_len;
+ int err = 0;
+ __u32 off;
+
+ struct bpf_dynptr ptr;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ off = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+
+ tcp_hdr = bpf_dynptr_slice(&ptr, off, buffer, sizeof(buffer));
+ if (!tcp_hdr)
+ return XDP_DROP;
+
+ tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr);
+ if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr)
+ return XDP_DROP;
+
+ hdr_bytes_remaining = tcp_hdr_opt_len;
+
+ off += sizeof(struct tcphdr);
+
+ /* max number of bytes of options in tcp header is 40 bytes */
+ for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) {
+ err = parse_hdr_opt(&ptr, &off, &hdr_bytes_remaining, &server_id);
+
+ if (err || !hdr_bytes_remaining)
+ break;
+ }
+
+ if (!server_id)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
index fb22de7c365d..1249a945699f 100644
--- a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
+++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
@@ -7,15 +7,15 @@
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} array_1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, 1);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} array_2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_perf_branches.c b/tools/testing/selftests/bpf/progs/test_perf_branches.c
index a1ccc831c882..05ac9410cd68 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_branches.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_branches.c
@@ -8,6 +8,7 @@
#include <bpf/bpf_tracing.h>
int valid = 0;
+int run_cnt = 0;
int required_size_out = 0;
int written_stack_out = 0;
int written_global_out = 0;
@@ -24,6 +25,8 @@ int perf_branches(void *ctx)
__u64 entries[4 * 3] = {0};
int required_size, written_stack, written_global;
+ ++run_cnt;
+
/* write to stack */
written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
/* ignore spurious events */
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index 8207a2dc2f9d..17d5b67744d5 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -7,19 +7,35 @@
#include <bpf/bpf_tracing.h>
struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} my_pid_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
+ __type(key, int);
+ __type(value, int);
} perf_buf_map SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
+ int zero = 0, *my_pid, cur_pid;
int cpu = bpf_get_smp_processor_id();
+ my_pid = bpf_map_lookup_elem(&my_pid_map, &zero);
+ if (!my_pid)
+ return 1;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+ if (cur_pid != *my_pid)
+ return 1;
+
bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU,
&cpu, sizeof(cpu));
- return 0;
+ return 1;
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_link.c b/tools/testing/selftests/bpf/progs/test_perf_link.c
new file mode 100644
index 000000000000..c1db9fd98d0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_link.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int run_cnt = 0;
+
+SEC("perf_event")
+int handler(struct pt_regs *ctx)
+{
+ __sync_fetch_and_add(&run_cnt, 1);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_skip.c b/tools/testing/selftests/bpf/progs/test_perf_skip.c
new file mode 100644
index 000000000000..7eb8b6de7a57
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_skip.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+uintptr_t ip;
+
+SEC("perf_event")
+int handler(struct bpf_perf_event_data *data)
+{
+ /* Skip events that have the correct ip. */
+ return ip != PT_REGS_IP(&data->regs);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c
index 4ef2630292b2..0facea6cbbae 100644
--- a/tools/testing/selftests/bpf/progs/test_pinning.c
+++ b/tools/testing/selftests/bpf/progs/test_pinning.c
@@ -3,8 +3,6 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_devmap.c b/tools/testing/selftests/bpf/progs/test_pinning_devmap.c
new file mode 100644
index 000000000000..c855f8f87eff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning_devmap.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_htab.c b/tools/testing/selftests/bpf/progs/test_pinning_htab.c
new file mode 100644
index 000000000000..ae227930c73c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning_htab.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct timer_val {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, struct timer_val);
+ __uint(max_entries, 1);
+} timer_prealloc SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, struct timer_val);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} timer_no_prealloc SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
index 5412e0c732c7..2a56db1094b8 100644
--- a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
+++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c
@@ -3,8 +3,6 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 852051064507..bce7173152c6 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -13,9 +13,7 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-int _version SEC("version") = 1;
+#include "bpf_misc.h"
/* llvm will optimize both subprograms into exactly the same BPF assembly
*
@@ -54,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->len;
}
@@ -76,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
{
volatile char buf[MAX_STACK] = {};
+ __sink(buf[MAX_STACK - 1]);
+
return skb->ifindex * val * var;
}
@@ -97,7 +99,7 @@ int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
return 0;
}
-SEC("classifier/test_pkt_access")
+SEC("tc")
int test_pkt_access(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 610c74ea9f64..d1839366f3e1 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
@@ -7,8 +7,6 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define TEST_FIELD(TYPE, FIELD, MASK) \
{ \
@@ -27,7 +25,7 @@ int _version SEC("version") = 1;
}
#endif
-SEC("classifier/test_pkt_md_access")
+SEC("tc")
int test_pkt_md_access(struct __sk_buff *skb)
{
TEST_FIELD(__u8, len, 0xFF);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 89b3532ccc75..a8e501af9604 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -1,26 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
-
-#include <netinet/in.h>
-
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
static struct sockaddr_in old;
-SEC("kprobe/__sys_connect")
-int BPF_KPROBE(handle_sys_connect)
+static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
- void *ptr = (void *)PT_REGS_PARM2(ctx);
struct sockaddr_in new;
- bpf_probe_read_user(&old, sizeof(old), ptr);
+ bpf_probe_read_user(&old, sizeof(old), uservaddr);
__builtin_memset(&new, 0xab, sizeof(new));
- bpf_probe_write_user(ptr, &new, sizeof(new));
+ bpf_probe_write_user(uservaddr, &new, sizeof(new));
+
+ return 0;
+}
+
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
+{
+ return handle_sys_connect_common(uservaddr);
+}
+
+#if defined(bpf_target_s390)
+#ifndef SYS_CONNECT
+#define SYS_CONNECT 3
+#endif
+
+SEC("ksyscall/socketcall")
+int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
+{
+ if (call == SYS_CONNECT) {
+ struct sockaddr_in *uservaddr;
+
+ bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
+ return handle_sys_connect_common(uservaddr);
+ }
return 0;
}
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_prog_array_init.c b/tools/testing/selftests/bpf/progs/test_prog_array_init.c
new file mode 100644
index 000000000000..2cd138356126
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_prog_array_init.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+const volatile pid_t my_pid = 0;
+int value = 0;
+
+SEC("raw_tp/sys_enter")
+int tailcall_1(void *ctx)
+{
+ value = 42;
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __array(values, int (void *));
+} prog_array_init SEC(".maps") = {
+ .values = {
+ [1] = (void *)&tailcall_1,
+ },
+};
+
+SEC("raw_tp/sys_enter")
+int entry(void *ctx)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ bpf_tail_call(ctx, &prog_array_init, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
new file mode 100644
index 000000000000..89b0cd5a3e06
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+
+char tp_name[128];
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ switch (cmd) {
+ case BPF_RAW_TRACEPOINT_OPEN:
+ bpf_copy_from_user(tp_name, sizeof(tp_name) - 1,
+ (void *)attr->raw_tracepoint.name);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+SEC("raw_tracepoint")
+int BPF_PROG(raw_tp_run)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
index 4dd9806ad73b..648e8cab7a23 100644
--- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
@@ -8,8 +8,6 @@
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
-
struct {
__uint(type, MAP_TYPE);
__uint(max_entries, 32);
@@ -26,7 +24,7 @@ struct {
__uint(value_size, sizeof(__u32));
} map_out SEC(".maps");
-SEC("test")
+SEC("tc")
int _test(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
index ecbeea2df259..7035fb4d4165 100644
--- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -4,8 +4,9 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
-static volatile const struct {
+const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
@@ -15,11 +16,11 @@ static volatile const struct {
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
-static volatile struct {
+struct {
unsigned did_run;
unsigned iters;
unsigned sum;
-} res;
+} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
@@ -64,7 +65,7 @@ int full_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
- int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
+ int i = ARRAY_SIZE(rdonly_values.a);
unsigned iters = 0, sum = 0;
/* validate verifier can allow full loop as well */
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c
index 8ba9959b036b..501cefa97633 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -15,7 +16,6 @@ struct sample {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
/* inputs */
@@ -36,12 +36,11 @@ long prod_pos = 0;
/* inner state */
long seq = 0;
-SEC("tp/syscalls/sys_enter_getpgid")
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
- int zero = 0;
if (cur_pid != pid)
return 0;
@@ -49,7 +48,7 @@ int test_ringbuf(void *ctx)
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
- return 1;
+ return 0;
}
sample->pid = pid;
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
new file mode 100644
index 000000000000..21bb7da90ea5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1000);
+ __type(key, struct sample);
+ __type(value, int);
+} hash_map SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* inner state */
+long seq = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_mem_map_key(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample, sample_copy;
+ int *lookup_val;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ sample->pid = pid;
+ bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+ sample->seq = ++seq;
+ sample->value = 42;
+
+ /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg
+ */
+ lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample);
+ __sink(lookup_val);
+
+ /* workaround - memcpy is necessary so that verifier doesn't
+ * complain with:
+ * verifier internal error: more than one arg with ref_obj_id R3
+ * when trying to do bpf_map_update_elem(&hash_map, sample, &sample->seq, BPF_ANY);
+ *
+ * Since bpf_map_lookup_elem above uses 'sample' as key, test using
+ * sample field as value below
+ */
+ __builtin_memcpy(&sample_copy, sample, sizeof(struct sample));
+ bpf_map_update_elem(&hash_map, &sample_copy, &sample->seq, BPF_ANY);
+
+ bpf_ringbuf_submit(sample, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
index edf3b6953533..9626baa6779c 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
@@ -15,7 +15,8 @@ struct sample {
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
- __uint(max_entries, 1 << 12);
+ /* libbpf will adjust to valid page size */
+ __uint(max_entries, 1000);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
@@ -31,6 +32,17 @@ struct {
},
};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __array(values, struct ringbuf_map);
+} ringbuf_hash SEC(".maps") = {
+ .values = {
+ [0] = &ringbuf1,
+ },
+};
+
/* inputs */
int pid = 0;
int target_ring = 0;
@@ -47,7 +59,6 @@ int test_ringbuf(void *ctx)
int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample;
void *rb;
- int zero = 0;
if (cur_pid != pid)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_n.c b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
new file mode 100644
index 000000000000..8669eb42dbe0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_n.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Andrea Righi <andrea.righi@canonical.com>
+
+#include <linux/bpf.h>
+#include <sched.h>
+#include <unistd.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TASK_COMM_LEN 16
+
+struct sample {
+ int pid;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+int pid = 0;
+long value = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_n(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ sample->pid = pid;
+ sample->value = value;
+ bpf_get_current_comm(sample->comm, sizeof(sample->comm));
+
+ bpf_ringbuf_submit(sample, 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c b/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c
new file mode 100644
index 000000000000..ff4aa67ddacc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(map_flags, BPF_F_RB_OVERWRITE);
+} ringbuf SEC(".maps");
+
+int pid;
+
+const volatile unsigned long LEN1;
+const volatile unsigned long LEN2;
+const volatile unsigned long LEN3;
+const volatile unsigned long LEN4;
+const volatile unsigned long LEN5;
+
+long reserve1_fail = 0;
+long reserve2_fail = 0;
+long reserve3_fail = 0;
+long reserve4_fail = 0;
+long reserve5_fail = 0;
+
+unsigned long avail_data = 0;
+unsigned long ring_size = 0;
+unsigned long cons_pos = 0;
+unsigned long prod_pos = 0;
+unsigned long over_pos = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_overwrite_ringbuf(void *ctx)
+{
+ char *rec1, *rec2, *rec3, *rec4, *rec5;
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (cur_pid != pid)
+ return 0;
+
+ rec1 = bpf_ringbuf_reserve(&ringbuf, LEN1, 0);
+ if (!rec1) {
+ reserve1_fail = 1;
+ return 0;
+ }
+
+ rec2 = bpf_ringbuf_reserve(&ringbuf, LEN2, 0);
+ if (!rec2) {
+ bpf_ringbuf_discard(rec1, 0);
+ reserve2_fail = 1;
+ return 0;
+ }
+
+ rec3 = bpf_ringbuf_reserve(&ringbuf, LEN3, 0);
+ /* expect failure */
+ if (!rec3) {
+ reserve3_fail = 1;
+ } else {
+ bpf_ringbuf_discard(rec1, 0);
+ bpf_ringbuf_discard(rec2, 0);
+ bpf_ringbuf_discard(rec3, 0);
+ return 0;
+ }
+
+ rec4 = bpf_ringbuf_reserve(&ringbuf, LEN4, 0);
+ if (!rec4) {
+ reserve4_fail = 1;
+ bpf_ringbuf_discard(rec1, 0);
+ bpf_ringbuf_discard(rec2, 0);
+ return 0;
+ }
+
+ bpf_ringbuf_submit(rec1, 0);
+ bpf_ringbuf_submit(rec2, 0);
+ bpf_ringbuf_submit(rec4, 0);
+
+ rec5 = bpf_ringbuf_reserve(&ringbuf, LEN5, 0);
+ if (!rec5) {
+ reserve5_fail = 1;
+ return 0;
+ }
+
+ for (int i = 0; i < LEN3; i++)
+ rec5[i] = 0xdd;
+
+ bpf_ringbuf_submit(rec5, 0);
+
+ ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE);
+ avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA);
+ cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS);
+ prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS);
+ over_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_OVERWRITE_POS);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
new file mode 100644
index 000000000000..f063a0013f85
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* outputs */
+long passed = 0;
+long discarded = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_write(void *ctx)
+{
+ int *foo, cur_pid = bpf_get_current_pid_tgid() >> 32;
+ void *sample1, *sample2;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ if (!sample1)
+ return 0;
+ /* first one can pass */
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
+ if (!sample2) {
+ bpf_ringbuf_discard(sample1, 0);
+ __sync_fetch_and_add(&discarded, 1);
+ return 0;
+ }
+ /* second one must not */
+ __sync_fetch_and_add(&passed, 1);
+ foo = sample2 + 4084;
+ *foo = 256;
+ bpf_ringbuf_discard(sample1, 0);
+ bpf_ringbuf_discard(sample2, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index a7278f064368..5059050f74f6 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
// we can only go as far as ~10 TLVs due to the BPF max stack size
// workaround: define induction variable "i" as "long" instead
// of "int" to prevent alu32 sub-register spilling.
- #pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 26e77dcc7e91..a5be3267dbb0 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
-#include <stdlib.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -15,8 +14,6 @@
#include <bpf/bpf_helpers.h>
#include "test_select_reuseport_common.h"
-int _version SEC("version") = 1;
-
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
@@ -24,8 +21,8 @@ int _version SEC("version") = 1;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, __u32);
} outer_map SEC(".maps");
struct {
@@ -66,7 +63,7 @@ SEC("sk_reuseport")
int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
{
__u32 linum, index = 0, flags = 0, index_zero = 0;
- __u32 *result_cnt, *linum_value;
+ __u32 *result_cnt;
struct data_check data_check = {};
struct cmd *cmd, cmd_copy;
void *data, *data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
index b4233d3efac2..176a355e3062 100644
--- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
@@ -1,27 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
-__u32 sig = 0, pid = 0, status = 0, signal_thread = 0;
+struct task_struct *bpf_task_from_pid(int pid) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, u64 value) __ksym;
+
+__u32 sig = 0, pid = 0, status = 0, signal_thread = 0, target_pid = 0;
static __always_inline int bpf_send_signal_test(void *ctx)
{
+ struct task_struct *target_task = NULL;
int ret;
+ u64 value;
- if (status != 0 || sig == 0 || pid == 0)
+ if (status != 0 || pid == 0)
return 0;
if ((bpf_get_current_pid_tgid() >> 32) == pid) {
- if (signal_thread)
- ret = bpf_send_signal_thread(sig);
- else
- ret = bpf_send_signal(sig);
+ if (target_pid) {
+ target_task = bpf_task_from_pid(target_pid);
+ if (!target_task)
+ return 0;
+ value = 8;
+ }
+
+ if (signal_thread) {
+ if (target_pid)
+ ret = bpf_send_signal_task(target_task, sig, PIDTYPE_PID, value);
+ else
+ ret = bpf_send_signal_thread(sig);
+ } else {
+ if (target_pid)
+ ret = bpf_send_signal_task(target_task, sig, PIDTYPE_TGID, value);
+ else
+ ret = bpf_send_signal(sig);
+ }
if (ret == 0)
status = 1;
}
+ if (target_task)
+ bpf_task_release(target_task);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c
new file mode 100644
index 000000000000..6a612cf168d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_set_remove_xattr.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u32 monitored_pid;
+
+const char xattr_foo[] = "security.bpf.foo";
+const char xattr_bar[] = "security.bpf.bar";
+static const char xattr_selinux[] = "security.selinux";
+char value_bar[] = "world";
+char read_value[32];
+
+bool set_security_bpf_bar_success;
+bool remove_security_bpf_bar_success;
+bool set_security_selinux_fail;
+bool remove_security_selinux_fail;
+
+char name_buf[32];
+
+static inline bool name_match_foo(const char *name)
+{
+ bpf_probe_read_kernel(name_buf, sizeof(name_buf), name);
+
+ return !bpf_strncmp(name_buf, sizeof(xattr_foo), xattr_foo);
+}
+
+/* Test bpf_set_dentry_xattr and bpf_remove_dentry_xattr */
+SEC("lsm.s/inode_getxattr")
+int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* Only do the following for security.bpf.foo */
+ if (!name_match_foo(name))
+ return 0;
+
+ bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr);
+
+ /* read security.bpf.bar */
+ ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr);
+
+ if (ret < 0) {
+ /* If security.bpf.bar doesn't exist, set it */
+ bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr);
+
+ ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0);
+ if (!ret)
+ set_security_bpf_bar_success = true;
+ ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0);
+ if (ret)
+ set_security_selinux_fail = true;
+ } else {
+ /* If security.bpf.bar exists, remove it */
+ ret = bpf_remove_dentry_xattr(dentry, xattr_bar);
+ if (!ret)
+ remove_security_bpf_bar_success = true;
+
+ ret = bpf_remove_dentry_xattr(dentry, xattr_selinux);
+ if (ret)
+ remove_security_selinux_fail = true;
+ }
+
+ return 0;
+}
+
+bool locked_set_security_bpf_bar_success;
+bool locked_remove_security_bpf_bar_success;
+bool locked_set_security_selinux_fail;
+bool locked_remove_security_selinux_fail;
+
+/* Test bpf_set_dentry_xattr_locked and bpf_remove_dentry_xattr_locked.
+ * It not necessary to differentiate the _locked version and the
+ * not-_locked version in the BPF program. The verifier will fix them up
+ * properly.
+ */
+SEC("lsm.s/inode_setxattr")
+int BPF_PROG(test_inode_setxattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* Only do the following for security.bpf.foo */
+ if (!name_match_foo(name))
+ return 0;
+
+ bpf_dynptr_from_mem(read_value, sizeof(read_value), 0, &value_ptr);
+
+ /* read security.bpf.bar */
+ ret = bpf_get_dentry_xattr(dentry, xattr_bar, &value_ptr);
+
+ if (ret < 0) {
+ /* If security.bpf.bar doesn't exist, set it */
+ bpf_dynptr_from_mem(value_bar, sizeof(value_bar), 0, &value_ptr);
+
+ ret = bpf_set_dentry_xattr(dentry, xattr_bar, &value_ptr, 0);
+ if (!ret)
+ locked_set_security_bpf_bar_success = true;
+ ret = bpf_set_dentry_xattr(dentry, xattr_selinux, &value_ptr, 0);
+ if (ret)
+ locked_set_security_selinux_fail = true;
+ } else {
+ /* If security.bpf.bar exists, remove it */
+ ret = bpf_remove_dentry_xattr(dentry, xattr_bar);
+ if (!ret)
+ locked_remove_security_bpf_bar_success = true;
+
+ ret = bpf_remove_dentry_xattr(dentry, xattr_selinux);
+ if (ret)
+ locked_remove_security_selinux_fail = true;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
new file mode 100644
index 000000000000..34b30e2603f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_kfuncs.h"
+#include "err.h"
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef SHA256_DIGEST_SIZE
+#define SHA256_DIGEST_SIZE 32
+#endif
+
+#define MAX_SIG_SIZE 1024
+
+/* By default, "fsverity sign" signs a file with fsverity_formatted_digest
+ * of the file. fsverity_formatted_digest on the kernel side is only used
+ * with CONFIG_FS_VERITY_BUILTIN_SIGNATURES. However, BPF LSM doesn't not
+ * require CONFIG_FS_VERITY_BUILTIN_SIGNATURES, so vmlinux.h may not have
+ * fsverity_formatted_digest. In this test, we intentionally avoid using
+ * fsverity_formatted_digest.
+ *
+ * Luckily, fsverity_formatted_digest is simply 8-byte magic followed by
+ * fsverity_digest. We use a char array of size fsverity_formatted_digest
+ * plus SHA256_DIGEST_SIZE. The magic part of it is filled by user space,
+ * and the rest of it is filled by bpf_get_fsverity_digest.
+ *
+ * Note that, generating signatures based on fsverity_formatted_digest is
+ * the design choice of this selftest (and "fsverity sign"). With BPF
+ * LSM, we have the flexibility to generate signature based on other data
+ * sets, for example, fsverity_digest or only the digest[] part of it.
+ */
+#define MAGIC_SIZE 8
+#define SIZEOF_STRUCT_FSVERITY_DIGEST 4 /* sizeof(struct fsverity_digest) */
+char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
+
+__u32 monitored_pid;
+char sig[MAX_SIG_SIZE];
+__u32 sig_size;
+__s32 user_keyring_serial;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open, struct file *f)
+{
+ struct bpf_dynptr digest_ptr, sig_ptr;
+ struct bpf_key *trusted_keyring;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ /* digest_ptr points to fsverity_digest */
+ bpf_dynptr_from_mem(digest + MAGIC_SIZE, sizeof(digest) - MAGIC_SIZE, 0, &digest_ptr);
+
+ ret = bpf_get_fsverity_digest(f, &digest_ptr);
+ /* No verity, allow access */
+ if (ret < 0)
+ return 0;
+
+ /* Move digest_ptr to fsverity_formatted_digest */
+ bpf_dynptr_from_mem(digest, sizeof(digest), 0, &digest_ptr);
+
+ /* Read signature from xattr */
+ bpf_dynptr_from_mem(sig, sizeof(sig), 0, &sig_ptr);
+ ret = bpf_get_file_xattr(f, "user.sig", &sig_ptr);
+ /* No signature, reject access */
+ if (ret < 0)
+ return -EPERM;
+
+ trusted_keyring = bpf_lookup_user_key(user_keyring_serial, 0);
+ if (!trusted_keyring)
+ return -ENOENT;
+
+ /* Verify signature */
+ ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring);
+
+ bpf_key_put(trusted_keyring);
+
+ set_if_not_errno_or_zero(ret, -EFAULT);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h
new file mode 100644
index 000000000000..5d3a7ec36780
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_siphash.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_SIPHASH_H
+#define _TEST_SIPHASH_H
+
+/* include/linux/bitops.h */
+static inline u64 rol64(u64 word, unsigned int shift)
+{
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
+}
+
+/* include/linux/siphash.h */
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+/* lib/siphash.c */
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 1ecd987005d2..3079244c7f96 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -15,7 +15,18 @@
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
+#if defined(IPROUTE2_HAVE_LIBBPF)
+/* Use a new-style map definition. */
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __type(key, int);
+ __type(value, __u64);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __uint(max_entries, 1);
+} server_map SEC(".maps");
+#else
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
@@ -35,8 +46,8 @@ struct {
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
+#endif
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -47,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
void *data = (void *)(long)skb->data;
struct bpf_sock_tuple *result;
struct ethhdr *eth;
- __u64 tuple_len;
__u8 proto = 0;
__u64 ihl_len;
@@ -84,13 +94,13 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
return NULL;
*tcp = (proto == IPPROTO_TCP);
+ __sink(ihl_len);
return result;
}
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -122,7 +132,6 @@ assign:
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -159,13 +168,12 @@ assign:
return ret;
}
-SEC("classifier/sk_assign_test")
+SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple, ln = {0};
+ struct bpf_sock_tuple *tuple;
bool ipv4 = false;
bool tcp = false;
- int tuple_len;
int ret = 0;
tuple = get_tuple(skb, &ipv4, &tcp);
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
new file mode 100644
index 000000000000..dcf46adfda04
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define IPROUTE2_HAVE_LIBBPF
+#include "test_sk_assign.c"
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index 1032b292af5b..71f844b9b902 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -64,36 +64,48 @@ static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
+static const __u16 SRC_PORT = bpf_htons(8008);
+static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
+static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
+
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
-SEC("sk_lookup/lookup_pass")
+SEC("sk_lookup")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
-SEC("sk_lookup/lookup_drop")
+SEC("sk_lookup")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
}
-SEC("sk_reuseport/reuse_pass")
+SEC("sk_lookup")
+int check_ifindex(struct bpf_sk_lookup *ctx)
+{
+ if (ctx->ingress_ifindex == 1)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
return SK_PASS;
}
-SEC("sk_reuseport/reuse_drop")
+SEC("sk_reuseport")
int reuseport_drop(struct sk_reuseport_md *ctx)
{
return SK_DROP;
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
-SEC("sk_lookup/redir_port")
+SEC("sk_lookup")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -112,7 +124,7 @@ int redir_port(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip4")
+SEC("sk_lookup")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -135,7 +147,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
-SEC("sk_lookup/redir_ip6")
+SEC("sk_lookup")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -160,7 +172,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a")
+SEC("sk_lookup")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -175,7 +187,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_lookup/select_sock_a_no_reuseport")
+SEC("sk_lookup")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -190,7 +202,7 @@ int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
-SEC("sk_reuseport/select_sock_b")
+SEC("sk_reuseport")
int select_sock_b(struct sk_reuseport_md *ctx)
{
__u32 key = KEY_SERVER_B;
@@ -201,7 +213,7 @@ int select_sock_b(struct sk_reuseport_md *ctx)
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
-SEC("sk_lookup/sk_assign_eexist")
+SEC("sk_lookup")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -234,7 +246,7 @@ out:
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
-SEC("sk_lookup/sk_assign_replace_flag")
+SEC("sk_lookup")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -266,7 +278,7 @@ out:
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
-SEC("sk_lookup/sk_assign_null")
+SEC("sk_lookup")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
@@ -309,7 +321,7 @@ out:
}
/* Check that selected sk is accessible through context. */
-SEC("sk_lookup/access_ctx_sk")
+SEC("sk_lookup")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
@@ -375,11 +387,11 @@ out:
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
-SEC("sk_lookup/ctx_narrow_access")
+SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
- int err, family;
+ __u32 val_u32;
bool v4;
v4 = (ctx->family == AF_INET);
@@ -398,11 +410,22 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
- /* Narrow loads from remote_port field. Expect non-0 value. */
- if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
- LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
+ /* Narrow loads from remote_port field. Expect SRC_PORT. */
+ if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
+ LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
+ return SK_DROP;
+ if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
- if (LSW(ctx->remote_port, 0) == 0)
+
+ /*
+ * NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
+ * is quirky. It gets rewritten by the access converter to a
+ * 2-byte load for backward compatibility. Treating the load
+ * result as a be16 value makes the code portable across
+ * little- and big-endian platforms.
+ */
+ val_u32 = *(__u32 *)&ctx->remote_port;
+ if (val_u32 != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
@@ -415,11 +438,14 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv4 fields */
if (v4) {
- /* Expect non-0.0.0.0 in remote_ip4 */
- if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
- LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
+ /* Expect SRC_IP4 in remote_ip4 */
+ if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
+ LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
+ LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
+ LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
+ if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
@@ -448,20 +474,32 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv6 fields */
if (!v4) {
- /* Expect non-:: IP in remote_ip6 */
- if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
- LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
- LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
- LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
- LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
- LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
- LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
- LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
+ /* Expect SRC_IP6 in remote_ip6 */
+ if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
+ LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
return SK_DROP;
- if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
- LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
- LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
- LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
+ if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
+ LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
@@ -533,7 +571,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
-SEC("sk_lookup/sk_assign_esocknosupport")
+SEC("sk_lookup")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@@ -558,28 +596,28 @@ out:
return ret;
}
-SEC("sk_lookup/multi_prog_pass1")
+SEC("sk_lookup")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_pass2")
+SEC("sk_lookup")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_drop1")
+SEC("sk_lookup")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
-SEC("sk_lookup/multi_prog_drop2")
+SEC("sk_lookup")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
@@ -603,25 +641,20 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir1")
+SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
- int ret;
-
- ret = select_server_a(ctx);
+ (void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
-SEC("sk_lookup/multi_prog_redir2")
+SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
- int ret;
-
- ret = select_server_a(ctx);
+ (void)select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
char _license[] SEC("license") = "Dual BSD/GPL";
-__u32 _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e83d0b48d80c..e9efc3263022 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -15,7 +15,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
@@ -24,8 +23,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
bool *ipv4)
{
struct bpf_sock_tuple *result;
+ __u64 ihl_len = 0;
__u8 proto = 0;
- __u64 ihl_len;
if (eth_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + nh_off);
@@ -53,8 +52,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("classifier/sk_lookup_success")
-int bpf_sk_lookup_test0(struct __sk_buff *skb)
+SEC("?tc")
+int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
@@ -79,8 +78,8 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("classifier/sk_lookup_success_simple")
-int bpf_sk_lookup_test1(struct __sk_buff *skb)
+SEC("?tc")
+int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -91,8 +90,8 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_use_after_free")
-int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+SEC("?tc")
+int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -106,12 +105,11 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
return family;
}
-SEC("classifier/fail_modify_sk_pointer")
-int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+SEC("?tc")
+int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- __u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
@@ -121,12 +119,11 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_modify_sk_or_null_pointer")
-int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+SEC("?tc")
+int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- __u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1;
@@ -135,8 +132,8 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_no_release")
-int bpf_sk_lookup_test2(struct __sk_buff *skb)
+SEC("?tc")
+int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -144,8 +141,8 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_release_twice")
-int bpf_sk_lookup_test3(struct __sk_buff *skb)
+SEC("?tc")
+int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -156,8 +153,8 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
return 0;
}
-SEC("classifier/fail_release_unchecked")
-int bpf_sk_lookup_test4(struct __sk_buff *skb)
+SEC("?tc")
+int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
@@ -173,8 +170,8 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("classifier/fail_no_release_subcall")
-int bpf_sk_lookup_test5(struct __sk_buff *skb)
+SEC("?tc")
+int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
index 8e94e5c080aa..40531e56776e 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -68,7 +68,7 @@ static void set_task_info(struct sock *sk)
}
SEC("fentry/inet_csk_listen_start")
-int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk, int backlog)
+int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk)
{
set_task_info(sk);
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
}
SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
struct sock *accepted_sk)
{
set_task_info(accepted_sk);
@@ -92,4 +92,20 @@ int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
return 0;
}
+SEC("tp_btf/tcp_retransmit_synack")
+int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req)
+{
+ /* load only test */
+ bpf_sk_storage_get(&sk_stg_map, sk, 0, 0);
+ bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0);
+ return 0;
+}
+
+SEC("tp_btf/tcp_bad_csum")
+int BPF_PROG(tcp_bad_csum, struct sk_buff* skb)
+{
+ bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
deleted file mode 100644
index 552f2090665c..000000000000
--- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <linux/bpf.h>
-#include <linux/pkt_cls.h>
-
-#include <string.h>
-
-#include <bpf/bpf_helpers.h>
-
-#define NUM_CGROUP_LEVELS 4
-
-struct bpf_map_def SEC("maps") cgroup_ids = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
- .max_entries = NUM_CGROUP_LEVELS,
-};
-
-static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
-{
- __u64 id;
-
- /* [1] &level passed to external function that may change it, it's
- * incompatible with loop unroll.
- */
- id = bpf_skb_ancestor_cgroup_id(skb, level);
- bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
-}
-
-SEC("cgroup_id_logger")
-int log_cgroup_id(struct __sk_buff *skb)
-{
- /* Loop unroll can't be used here due to [1]. Unrolling manually.
- * Number of calls should be in sync with NUM_CGROUP_LEVELS.
- */
- log_nth_level(skb, 0);
- log_nth_level(skb, 1);
- log_nth_level(skb, 2);
- log_nth_level(skb, 3);
-
- return TC_ACT_OK;
-}
-
-int _version SEC("version") = 1;
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index b02ea589ce7e..a724a70c6700 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -3,13 +3,14 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-int _version SEC("version") = 1;
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
-SEC("skb_ctx")
+SEC("tc")
int process(struct __sk_buff *skb)
{
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 5; i++) {
if (skb->cb[i] != i + 1)
return 1;
@@ -25,6 +26,12 @@ int process(struct __sk_buff *skb)
return 1;
if (skb->gso_size != 10)
return 1;
+ if (skb->ingress_ifindex != 11)
+ return 1;
+ if (skb->ifindex != 1)
+ return 1;
+ if (skb->hwtstamp != 11)
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skb_helpers.c b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
index bb3fbf1a29e3..507215791c5b 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_helpers.c
@@ -14,7 +14,7 @@ struct {
char _license[] SEC("license") = "GPL";
-SEC("classifier/test_skb_helpers")
+SEC("tc")
int test_skb_helpers(struct __sk_buff *skb)
{
struct task_struct *task;
diff --git a/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c
new file mode 100644
index 000000000000..4cfa42aa9436
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 Hengqi Chen */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+const volatile pid_t my_pid = 0;
+char path[256] = {};
+
+SEC("fentry/unix_listen")
+int BPF_PROG(unix_listen, struct socket *sock, int backlog)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ struct unix_sock *unix_sk;
+ int i, len;
+
+ if (pid != my_pid)
+ return 0;
+
+ unix_sk = (struct unix_sock *)bpf_skc_to_unix_sock(sock->sk);
+ if (!unix_sk)
+ return 0;
+
+ if (unix_sk->addr->name->sun_path[0])
+ return 0;
+
+ len = unix_sk->addr->len - sizeof(short);
+ path[0] = '@';
+ for (i = 1; i < len; i++) {
+ if (i >= (int)sizeof(struct sockaddr_un))
+ break;
+
+ path[i] = unix_sk->addr->name->sun_path[i];
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 374ccef704e1..adece9f91f58 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -5,6 +5,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#define __read_mostly SEC(".data.read_mostly")
+
struct s {
int a;
long long b;
@@ -38,10 +40,37 @@ extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
+struct s out5 = {};
+
+
+const volatile int in_dynarr_sz SEC(".rodata.dyn");
+const volatile int in_dynarr[4] SEC(".rodata.dyn") = { -1, -2, -3, -4 };
+
+int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 };
+
+int read_mostly_var __read_mostly;
+int out_mostly_var;
+
+char huge_arr[16 * 1024 * 1024];
+
+/* non-mmapable custom .data section */
+
+struct my_value { int x, y, z; };
+
+__hidden int zero_key SEC(".data.non_mmapable");
+static struct my_value zero_value SEC(".data.non_mmapable");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct my_value);
+ __uint(max_entries, 1);
+} my_map SEC(".maps");
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
- static volatile struct s out5;
+ int i;
out1 = in1;
out2 = in2;
@@ -53,6 +82,16 @@ int handler(const void *ctx)
bpf_syscall = CONFIG_BPF_SYSCALL;
kern_ver = LINUX_KERNEL_VERSION;
+ for (i = 0; i < in_dynarr_sz; i++)
+ out_dynarr[i] = in_dynarr[i];
+
+ out_mostly_var = read_mostly_var;
+
+ huge_arr[sizeof(huge_arr) - 1] = 123;
+
+ /* make sure zero_key and zero_value are not optimized out */
+ bpf_map_update_elem(&my_map, &zero_key, &zero_value, BPF_ANY);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c b/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
index 45e8fc75a739..996b177324ba 100644
--- a/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
+++ b/tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c
@@ -24,8 +24,7 @@ struct {
__type(value, __u64);
} socket_storage SEC(".maps");
-SEC("sk_msg")
-int prog_msg_verdict(struct sk_msg_md *msg)
+static int prog_msg_verdict_common(struct sk_msg_md *msg)
{
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
int verdict = SK_PASS;
@@ -44,4 +43,28 @@ int prog_msg_verdict(struct sk_msg_md *msg)
return verdict;
}
+SEC("sk_msg")
+int prog_msg_verdict(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_msg")
+int prog_msg_verdict_clone(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_msg")
+int prog_msg_verdict_clone2(struct sk_msg_md *msg)
+{
+ return prog_msg_verdict_common(msg);
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf.c b/tools/testing/selftests/bpf/progs/test_snprintf.c
new file mode 100644
index 000000000000..8fda07544023
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u32 pid = 0;
+
+char num_out[64] = {};
+long num_ret = 0;
+
+char ip_out[64] = {};
+long ip_ret = 0;
+
+char sym_out[64] = {};
+long sym_ret = 0;
+
+char addr_out[64] = {};
+long addr_ret = 0;
+
+char str_out[64] = {};
+long str_ret = 0;
+
+char over_out[6] = {};
+long over_ret = 0;
+
+char pad_out[10] = {};
+long pad_ret = 0;
+
+char noarg_out[64] = {};
+long noarg_ret = 0;
+
+long nobuf_ret = 0;
+
+extern const void schedule __ksym;
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ /* Convenient values to pretty-print */
+ const __u8 ex_ipv4[] = {127, 0, 0, 1};
+ const __u8 ex_ipv6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ static const char str1[] = "str1";
+ static const char longstr[] = "longstr";
+
+ if ((int)bpf_get_current_pid_tgid() != pid)
+ return 0;
+
+ /* Integer types */
+ num_ret = BPF_SNPRINTF(num_out, sizeof(num_out),
+ "%d %u %x %li %llu %lX",
+ -8, 9, 150, -424242, 1337, 0xDABBAD00);
+ /* IP addresses */
+ ip_ret = BPF_SNPRINTF(ip_out, sizeof(ip_out), "%pi4 %pI6",
+ &ex_ipv4, &ex_ipv6);
+ /* Symbol lookup formatting */
+ sym_ret = BPF_SNPRINTF(sym_out, sizeof(sym_out), "%ps %pS %pB",
+ &schedule, &schedule, &schedule);
+ /* Kernel pointers */
+ addr_ret = BPF_SNPRINTF(addr_out, sizeof(addr_out), "%pK %px %p",
+ 0, 0xFFFF00000ADD4E55, 0xFFFF00000ADD4E55);
+ /* Strings and single-byte character embedding */
+ str_ret = BPF_SNPRINTF(str_out, sizeof(str_out), "%s % 9c %+2c %-3c %04c %0c %+05s",
+ str1, 'a', 'b', 'c', 'd', 'e', longstr);
+ /* Overflow */
+ over_ret = BPF_SNPRINTF(over_out, sizeof(over_out), "%%overflow");
+ /* Padding of fixed width numbers */
+ pad_ret = BPF_SNPRINTF(pad_out, sizeof(pad_out), "%5d %0900000X", 4, 4);
+ /* No args */
+ noarg_ret = BPF_SNPRINTF(noarg_out, sizeof(noarg_out), "simple case");
+ /* No buffer */
+ nobuf_ret = BPF_SNPRINTF(NULL, 0, "only interested in length %d", 60);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_snprintf_single.c b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
new file mode 100644
index 000000000000..3095837334d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_snprintf_single.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google LLC. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* The format string is filled from the userspace such that loading fails */
+const char fmt[10];
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ unsigned long long arg = 42;
+
+ bpf_snprintf(NULL, 0, fmt, &arg, sizeof(arg));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
index 81b57b9aaaea..196844be349c 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
@@ -7,11 +7,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_tcp_helpers.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
INGRESS_LINUM_IDX,
+ READ_SK_DST_PORT_LINUM_IDX,
__NR_BPF_LINUM_ARRAY_IDX,
};
@@ -41,6 +41,10 @@ struct {
__type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt10 SEC(".maps");
+struct tcp_sock {
+ __u32 lsndtime;
+} __attribute__((preserve_access_index));
+
struct bpf_tcp_sock listen_tp = {};
struct sockaddr_in6 srv_sa6 = {};
struct bpf_tcp_sock cli_tp = {};
@@ -113,14 +117,14 @@ static void tpcpy(struct bpf_tcp_sock *dst,
#define RET_LOG() ({ \
linum = __LINE__; \
- bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
return CG_OK; \
})
SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb)
{
- struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
+ struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F };
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
@@ -133,11 +137,11 @@ int egress_read_sock_fields(struct __sk_buff *skb)
if (!sk)
RET_LOG();
- /* Not the testing egress traffic or
- * TCP_LISTEN (10) socket will be copied at the ingress side.
+ /* Not testing the egress traffic or the listening socket,
+ * which are covered by the cgroup_skb/ingress test program.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
- sk->state == 10)
+ sk->state == BPF_TCP_LISTEN)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
@@ -231,8 +235,8 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
- /* Only interested in TCP_LISTEN */
- if (sk->state != 10)
+ /* Only interested in the listening socket */
+ if (sk->state != BPF_TCP_LISTEN)
return CG_OK;
/* It must be a fullsock for cgroup_skb/ingress prog */
@@ -250,4 +254,57 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
return CG_OK;
}
+/*
+ * NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It
+ * gets rewritten by the access converter to a 2-byte load for
+ * backward compatibility. Treating the load result as a be16 value
+ * makes the code portable across little- and big-endian platforms.
+ */
+static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
+{
+ __u32 *word = (__u32 *)&sk->dst_port;
+ return word[0] == bpf_htons(0xcafe);
+}
+
+static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
+{
+ __u16 *half;
+
+ asm volatile ("");
+ half = (__u16 *)&sk->dst_port;
+ return half[0] == bpf_htons(0xcafe);
+}
+
+static __noinline bool sk_dst_port__load_byte(struct bpf_sock *sk)
+{
+ __u8 *byte = (__u8 *)&sk->dst_port;
+ return byte[0] == 0xca && byte[1] == 0xfe;
+}
+
+SEC("cgroup_skb/egress")
+int read_sk_dst_port(struct __sk_buff *skb)
+{
+ __u32 linum, linum_idx;
+ struct bpf_sock *sk;
+
+ linum_idx = READ_SK_DST_PORT_LINUM_IDX;
+
+ sk = skb->sk;
+ if (!sk)
+ RET_LOG();
+
+ /* Ignore everything but the SYN from the client socket */
+ if (sk->state != BPF_TCP_SYN_SENT)
+ return CG_OK;
+
+ if (!sk_dst_port__load_word(sk))
+ RET_LOG();
+ if (!sk_dst_port__load_half(sk))
+ RET_LOG();
+ if (!sk_dst_port__load_byte(sk))
+ RET_LOG();
+
+ return CG_OK;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
new file mode 100644
index 000000000000..1c7941a4ad00
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 ByteDance */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+long change_tail_ret = 1;
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ char *data, *data_end;
+
+ bpf_skb_pull_data(skb, 1);
+ data = (char *)(unsigned long)skb->data;
+ data_end = (char *)(unsigned long)skb->data_end;
+
+ if (data + 1 > data_end)
+ return SK_PASS;
+
+ if (data[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len - 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
+ return SK_PASS;
+ }
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
new file mode 100644
index 000000000000..29314805ce42
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 1858435de7aa..f48f85f1bd70 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -14,6 +14,7 @@
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_misc.h"
/* Sockmap sample program connects a client and a backend together
* using cgroups.
@@ -91,7 +92,7 @@ struct {
__uint(value_size, sizeof(int));
} tls_sock_map SEC(".maps");
-SEC("sk_skb1")
+SEC("sk_skb/stream_parser")
int bpf_prog1(struct __sk_buff *skb)
{
int *f, two = 2;
@@ -103,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb)
return skb->len;
}
-SEC("sk_skb2")
+SEC("sk_skb/stream_verdict")
int bpf_prog2(struct __sk_buff *skb)
{
__u32 lport = skb->local_port;
@@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb)
int len, *f, ret, zero = 0;
__u64 flags = 0;
+ __sink(rport);
if (lport == 10000)
ret = 10;
else
ret = 1;
len = (__u32)skb->data_end - (__u32)skb->data;
+ __sink(len);
+
f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
if (f && *f) {
ret = 3;
@@ -147,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
memcpy(c + offset, "PASS", 4);
}
-SEC("sk_skb3")
+SEC("sk_skb/stream_verdict")
int bpf_prog3(struct __sk_buff *skb)
{
int err, *f, ret = SK_PASS;
@@ -173,14 +177,10 @@ int bpf_prog3(struct __sk_buff *skb)
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
#endif
}
- f = bpf_map_lookup_elem(&sock_skb_opts, &one);
- if (f && *f)
- ret = SK_DROP;
err = bpf_skb_adjust_room(skb, 4, 0, 0);
if (err)
return SK_DROP;
bpf_write_pass(skb, 13);
-tls_out:
return ret;
}
@@ -188,8 +188,7 @@ SEC("sockops")
int bpf_sockmap(struct bpf_sock_ops *skops)
{
__u32 lport, rport;
- int op, err = 0, index, key, ret;
-
+ int op, ret;
op = (int) skops->op;
@@ -201,10 +200,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
if (lport == 10000) {
ret = 1;
#ifdef SOCKMAP
- err = bpf_sock_map_update(skops, &sock_map, &ret,
+ bpf_sock_map_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#else
- err = bpf_sock_hash_update(skops, &sock_map, &ret,
+ bpf_sock_hash_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#endif
}
@@ -216,10 +215,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
if (bpf_ntohl(rport) == 10001) {
ret = 10;
#ifdef SOCKMAP
- err = bpf_sock_map_update(skops, &sock_map, &ret,
+ bpf_sock_map_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#else
- err = bpf_sock_hash_update(skops, &sock_map, &ret,
+ bpf_sock_hash_update(skops, &sock_map, &ret,
BPF_NOEXIST);
#endif
}
@@ -231,11 +230,11 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
return 0;
}
-SEC("sk_msg1")
+SEC("sk_msg")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
- int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -249,8 +248,11 @@ int bpf_prog4(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -258,11 +260,12 @@ int bpf_prog4(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg2")
+SEC("sk_msg")
int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int err = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -279,8 +282,11 @@ int bpf_prog6(struct sk_msg_md *msg)
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
@@ -299,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg)
#endif
}
-SEC("sk_msg3")
+SEC("sk_msg")
int bpf_prog8(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -314,9 +320,14 @@ int bpf_prog8(struct sk_msg_md *msg)
} else {
return SK_DROP;
}
+
+ __sink(data_end);
+ __sink(data);
+
return SK_PASS;
}
-SEC("sk_msg4")
+
+SEC("sk_msg")
int bpf_prog9(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
@@ -334,11 +345,11 @@ int bpf_prog9(struct sk_msg_md *msg)
return SK_PASS;
}
-SEC("sk_msg5")
+SEC("sk_msg")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -352,8 +363,11 @@ int bpf_prog10(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_PASS;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -361,5 +375,4 @@ int bpf_prog10(struct sk_msg_md *msg)
return SK_DROP;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
new file mode 100644
index 000000000000..83df4919c224
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+int cork_byte;
+int push_start;
+int push_end;
+int apply_bytes;
+int pop_start;
+int pop_end;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map SEC(".maps");
+
+SEC("sk_msg")
+int prog_sk_policy(struct sk_msg_md *msg)
+{
+ if (cork_byte > 0)
+ bpf_msg_cork_bytes(msg, cork_byte);
+ if (push_start > 0 && push_end > 0)
+ bpf_msg_push_data(msg, push_start, push_end, 0);
+ if (pop_start >= 0 && pop_end > 0)
+ bpf_msg_pop_data(msg, pop_start, pop_end, 0);
+
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_sk_policy_redir(struct sk_msg_md *msg)
+{
+ int two = 2;
+
+ bpf_msg_apply_bytes(msg, apply_bytes);
+ return bpf_msg_redirect_map(msg, &sock_map, two, 0);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index a3a366c57ce1..b7250eb9c30c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -15,6 +15,13 @@ struct {
} sock_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
@@ -28,16 +35,31 @@ struct {
__type(value, unsigned int);
} verdict_map SEC(".maps");
-static volatile bool test_sockmap; /* toggled by user-space */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} parser_map SEC(".maps");
+
+bool test_sockmap = false; /* toggled by user-space */
+bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
-int prog_skb_parser(struct __sk_buff *skb)
+int prog_stream_parser(struct __sk_buff *skb)
{
+ int *value;
+ __u32 key = 0;
+
+ value = bpf_map_lookup_elem(&parser_map, &key);
+ if (value && *value)
+ return *value;
+
return skb->len;
}
SEC("sk_skb/stream_verdict")
-int prog_skb_verdict(struct __sk_buff *skb)
+int prog_stream_verdict(struct __sk_buff *skb)
{
unsigned int *count;
__u32 zero = 0;
@@ -55,6 +77,27 @@ int prog_skb_verdict(struct __sk_buff *skb)
return verdict;
}
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_sk_redirect_map(skb, &sock_map, zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+ else
+ verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero,
+ test_ingress ? BPF_F_INGRESS : 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
SEC("sk_msg")
int prog_msg_verdict(struct sk_msg_md *msg)
{
@@ -94,5 +137,4 @@ int prog_reuseport(struct sk_reuseport_md *reuse)
return verdict;
}
-int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
new file mode 100644
index 000000000000..69aacc96db36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
@@ -0,0 +1,47 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+int clone_called;
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict_clone(struct __sk_buff *skb)
+{
+ clone_called = 1;
+ return SK_PASS;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c b/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c
new file mode 100644
index 000000000000..9d58d61c0dee
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+SEC("sk_msg")
+int prog_skmsg_verdict(struct sk_msg_md *msg)
+{
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_redir.c b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c
new file mode 100644
index 000000000000..34d9f4f2f0a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map, sock_map;
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_hash, sock_hash;
+
+SEC(".maps") struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, unsigned int);
+} verdict_map;
+
+/* Set by user space */
+int redirect_type;
+int redirect_flags;
+
+#define redirect_map(__data) \
+ _Generic((__data), \
+ struct __sk_buff * : bpf_sk_redirect_map, \
+ struct sk_msg_md * : bpf_msg_redirect_map \
+ )((__data), &sock_map, (__u32){0}, redirect_flags)
+
+#define redirect_hash(__data) \
+ _Generic((__data), \
+ struct __sk_buff * : bpf_sk_redirect_hash, \
+ struct sk_msg_md * : bpf_msg_redirect_hash \
+ )((__data), &sock_hash, &(__u32){0}, redirect_flags)
+
+#define DEFINE_PROG(__type, __param) \
+SEC("sk_" XSTR(__type)) \
+int prog_ ## __type ## _verdict(__param data) \
+{ \
+ unsigned int *count; \
+ int verdict; \
+ \
+ if (redirect_type == BPF_MAP_TYPE_SOCKMAP) \
+ verdict = redirect_map(data); \
+ else if (redirect_type == BPF_MAP_TYPE_SOCKHASH) \
+ verdict = redirect_hash(data); \
+ else \
+ verdict = redirect_type - __MAX_BPF_MAP_TYPE; \
+ \
+ count = bpf_map_lookup_elem(&verdict_map, &verdict); \
+ if (count) \
+ (*count)++; \
+ \
+ return verdict; \
+}
+
+DEFINE_PROG(skb, struct __sk_buff *);
+DEFINE_PROG(msg, struct sk_msg_md *);
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
new file mode 100644
index 000000000000..d25b0bb30fc0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+SEC("sk_skb/verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_strp.c b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c
new file mode 100644
index 000000000000..dde3d5bec515
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+int verdict_max_size = 10000;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 20);
+ __type(key, int);
+ __type(value, int);
+} sock_map SEC(".maps");
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ __u32 one = 1;
+
+ if (skb->len > verdict_max_size)
+ return SK_PASS;
+
+ return bpf_sk_redirect_map(skb, &sock_map, one, 0);
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict_pass(struct __sk_buff *skb)
+{
+ return SK_PASS;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser_partial(struct __sk_buff *skb)
+{
+ /* agreement with the test program on a 4-byte size header
+ * and 6-byte body.
+ */
+ if (skb->len < 4) {
+ /* need more header to determine full length */
+ return 0;
+ }
+ /* return full length decoded from header.
+ * the return value may be larger than skb->len which
+ * means framework must wait body coming.
+ */
+ return 10;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
index 9d0c9f28cab2..6d64ea536e3d 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_update.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
@@ -24,7 +24,7 @@ struct {
__type(value, __u64);
} dst_sock_hash SEC(".maps");
-SEC("classifier/copy_sock_map")
+SEC("tc")
int copy_sock_map(void *ctx)
{
struct bpf_sock *sk;
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 0d31a3b3505f..d8d77bdffd3d 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
struct hmap_elem {
volatile int cnt;
@@ -45,8 +46,8 @@ struct {
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
-SEC("spin_lock_demo")
-int bpf_sping_lock_test(struct __sk_buff *skb)
+SEC("cgroup_skb/ingress")
+int bpf_spin_lock_test(struct __sk_buff *skb)
{
volatile int credit = 0, max_credit = 100, pkt_len = 64;
struct hmap_elem zero = {}, *val;
@@ -89,6 +90,8 @@ int bpf_sping_lock_test(struct __sk_buff *skb)
credit = q->credit;
bpf_spin_unlock(&q->lock);
+ __sink(credit);
+
/* spin_lock in cgroup local storage */
cls = bpf_get_local_storage(&cls_map, 0);
bpf_spin_lock(&cls->lock);
@@ -98,4 +101,69 @@ int bpf_sping_lock_test(struct __sk_buff *skb)
err:
return err;
}
+
+struct bpf_spin_lock lockA __hidden SEC(".data.A");
+
+__noinline
+static int static_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_lock(&lockA);
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_unlock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret + ctx->len;
+}
+
+SEC("tc")
+int lock_static_subprog_call(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_lock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ ret = static_subprog_lock(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_unlock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ ret = static_subprog_unlock(ctx);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
new file mode 100644
index 000000000000..f678ee6bd7ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_experimental.h"
+
+struct foo {
+ struct bpf_spin_lock lock;
+ int data;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct foo);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct array_map);
+} map_of_maps SEC(".maps") = {
+ .values = {
+ [0] = &array_map,
+ },
+};
+
+static struct bpf_spin_lock lockA SEC(".data.A");
+static struct bpf_spin_lock lockB SEC(".data.B");
+
+SEC("?tc")
+int lock_id_kptr_preserve(void *ctx)
+{
+ struct foo *f;
+
+ f = bpf_obj_new(typeof(*f));
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_global_zero(void *ctx)
+{
+ bpf_this_cpu_ptr(&lockA);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+
+ f = bpf_map_lookup_elem(&array_map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_innermapval_preserve(void *ctx)
+{
+ struct foo *f;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f = bpf_map_lookup_elem(map, &key);
+ if (!f)
+ return 0;
+ bpf_this_cpu_ptr(f);
+ return 0;
+}
+
+#define CHECK(test, A, B) \
+ SEC("?tc") \
+ int lock_id_mismatch_##test(void *ctx) \
+ { \
+ struct foo *f1, *f2, *v, *iv; \
+ int key = 0; \
+ void *map; \
+ \
+ map = bpf_map_lookup_elem(&map_of_maps, &key); \
+ if (!map) \
+ return 0; \
+ iv = bpf_map_lookup_elem(map, &key); \
+ if (!iv) \
+ return 0; \
+ v = bpf_map_lookup_elem(&array_map, &key); \
+ if (!v) \
+ return 0; \
+ f1 = bpf_obj_new(typeof(*f1)); \
+ if (!f1) \
+ return 0; \
+ f2 = bpf_obj_new(typeof(*f2)); \
+ if (!f2) { \
+ bpf_obj_drop(f1); \
+ return 0; \
+ } \
+ bpf_spin_lock(A); \
+ bpf_spin_unlock(B); \
+ return 0; \
+ }
+
+CHECK(kptr_kptr, &f1->lock, &f2->lock);
+CHECK(kptr_global, &f1->lock, &lockA);
+CHECK(kptr_mapval, &f1->lock, &v->lock);
+CHECK(kptr_innermapval, &f1->lock, &iv->lock);
+
+CHECK(global_global, &lockA, &lockB);
+CHECK(global_kptr, &lockA, &f1->lock);
+CHECK(global_mapval, &lockA, &v->lock);
+CHECK(global_innermapval, &lockA, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_mapval_mapval(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+
+ f1 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(&array_map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(mapval_kptr, &v->lock, &f1->lock);
+CHECK(mapval_global, &v->lock, &lockB);
+CHECK(mapval_innermapval, &v->lock, &iv->lock);
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval1(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+SEC("?tc")
+int lock_id_mismatch_innermapval_innermapval2(void *ctx)
+{
+ struct foo *f1, *f2;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f1 = bpf_map_lookup_elem(map, &key);
+ if (!f1)
+ return 0;
+ map = bpf_map_lookup_elem(&map_of_maps, &key);
+ if (!map)
+ return 0;
+ f2 = bpf_map_lookup_elem(map, &key);
+ if (!f2)
+ return 0;
+
+ bpf_spin_lock(&f1->lock);
+ f1->data = 42;
+ bpf_spin_unlock(&f2->lock);
+
+ return 0;
+}
+
+CHECK(innermapval_kptr, &iv->lock, &f1->lock);
+CHECK(innermapval_global, &iv->lock, &lockA);
+CHECK(innermapval_mapval, &iv->lock, &v->lock);
+
+#undef CHECK
+
+__noinline
+int global_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ ret += ctx->protocol;
+ return ret + ctx->mark;
+}
+
+__noinline
+static int static_subprog_call_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len + global_subprog(ctx);
+}
+
+SEC("?tc")
+int lock_global_subprog_call1(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?tc")
+int lock_global_subprog_call2(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog_call_global(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+int __noinline
+global_subprog_int(int i)
+{
+ if (i)
+ bpf_printk("%p", &i);
+ return i;
+}
+
+int __noinline
+global_sleepable_helper_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user(&i, sizeof(i), NULL);
+ return i;
+}
+
+int __noinline
+global_sleepable_kfunc_subprog(int i)
+{
+ if (i)
+ bpf_copy_from_user_str(&i, sizeof(i), NULL, 0);
+ global_subprog_int(i);
+ return i;
+}
+
+int __noinline
+global_subprog_calling_sleepable_global(int i)
+{
+ if (!i)
+ global_sleepable_kfunc_subprog(i);
+ return i;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_helper_subprog(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_sleepable_helper_subprog(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_kfunc_subprog(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_sleepable_kfunc_subprog(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?syscall")
+int lock_global_sleepable_subprog_indirect(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog_calling_sleepable_global(ctx->mark);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stack_var_off.c b/tools/testing/selftests/bpf/progs/test_stack_var_off.c
new file mode 100644
index 000000000000..665e6ae09d37
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_stack_var_off.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int probe_res;
+
+char input[4] = {};
+int test_pid;
+
+SEC("tracepoint/syscalls/sys_enter_nanosleep")
+int probe(void *ctx)
+{
+ /* This BPF program performs variable-offset reads and writes on a
+ * stack-allocated buffer.
+ */
+ char stack_buf[16];
+ unsigned long len;
+ unsigned long last;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != test_pid)
+ return 0;
+
+ /* Copy the input to the stack. */
+ __builtin_memcpy(stack_buf, input, 4);
+
+ /* The first byte in the buffer indicates the length. */
+ len = stack_buf[0] & 0xf;
+ last = (len - 1) & 0xf;
+
+ /* Append something to the buffer. The offset where we write is not
+ * statically known; this is a variable-offset stack write.
+ */
+ stack_buf[len] = 42;
+
+ /* Index into the buffer at an unknown offset. This is a
+ * variable-offset stack read.
+ *
+ * Note that if it wasn't for the preceding variable-offset write, this
+ * read would be rejected because the stack slot cannot be verified as
+ * being initialized. With the preceding variable-offset write, the
+ * stack slot still cannot be verified, but the write inhibits the
+ * respective check on the reasoning that, if there was a
+ * variable-offset to a higher-or-equal spot, we're probably reading
+ * what we just wrote.
+ */
+ probe_res = stack_buf[last];
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 0cf0134631b4..0c4426592a26 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -28,8 +28,8 @@ struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 128);
__uint(map_flags, BPF_F_STACK_BUILD_ID);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(stack_trace_t));
+ __type(key, __u32);
+ __type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
@@ -39,16 +39,8 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
-struct random_urandom_args {
- unsigned long long pad;
- int got_bits;
- int pool_left;
- int input_left;
-};
-
-SEC("tracepoint/random/urandom_read")
-int oncpu(struct random_urandom_args *args)
+SEC("kprobe/urandom_read_iter")
+int oncpu(struct pt_regs *args)
{
__u32 max_len = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH;
@@ -73,4 +65,3 @@ int oncpu(struct random_urandom_args *args)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
new file mode 100644
index 000000000000..4f0b612e1661
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 8-byte aligned .data */
+static volatile long static_var1 = 2;
+static volatile int static_var2 = 3;
+int var1 = -1;
+/* 4-byte aligned .rodata */
+const volatile int rovar1;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 2;
+}
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ var1 = subprog(rovar1) + static_var1 + static_var2;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
+int VERSION SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
new file mode 100644
index 000000000000..766ebd502a60
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_static_linked2.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* 4-byte aligned .data */
+static volatile int static_var1 = 5;
+static volatile int static_var2 = 6;
+int var2 = -1;
+/* 8-byte aligned .rodata */
+const volatile long rovar2;
+
+/* same "subprog" name in both files */
+static __noinline int subprog(int x)
+{
+ /* but different formula */
+ return x * 3;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ var2 = subprog(rovar2) + static_var1 + static_var2;
+
+ return 0;
+}
+
+/* different name and/or type of the variable doesn't matter */
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index d3c5673c0218..a8d602d7c88a 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -4,8 +4,18 @@
const char LICENSE[] SEC("license") = "GPL";
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array SEC(".maps");
+
__noinline int sub1(int x)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return x + 1;
}
@@ -23,6 +33,9 @@ static __noinline int sub3(int z)
static __noinline int sub4(int w)
{
+ int key = 0;
+
+ bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}
@@ -34,7 +47,7 @@ static __noinline int sub5(int v)
return sub1(v) - 1; /* compensates sub1()'s + 1 */
}
-/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer
+/* unfortunately verifier rejects `struct task_struct *t` as an unknown pointer
* type, so we need to accept pointer as integer and then cast it inside the
* function
*/
@@ -76,6 +89,11 @@ int prog2(void *ctx)
return 0;
}
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
/* prog3 has the same section name as prog1 */
SEC("raw_tp/sys_enter")
int prog3(void *ctx)
@@ -85,6 +103,9 @@ int prog3(void *ctx)
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
+ /* test that ld_imm64 with BPF_PSEUDO_FUNC doesn't get blinded */
+ bpf_loop(1, empty_callback, NULL, 0);
+
res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
new file mode 100644
index 000000000000..dcac69f5928a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 8);
+ __type(key, __u32);
+ __type(value, __u64);
+} test_array SEC(".maps");
+
+unsigned int triggered;
+
+static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
+{
+ return 1;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
+{
+ *(volatile int *)ret;
+ *(volatile int *)&ret->f_mode;
+ bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
+ triggered++;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton.c b/tools/testing/selftests/bpf/progs/test_subskeleton.c
new file mode 100644
index 000000000000..006417974372
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read, compiler may assume 0 otherwise */
+const volatile int rovar1;
+int out1;
+
+/* Override weak symbol in test_subskeleton_lib */
+int var5 = 5;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+extern int lib_routine(void);
+
+SEC("raw_tp/sys_enter")
+int handler1(const void *ctx)
+{
+ (void) CONFIG_BPF_SYSCALL;
+
+ out1 = lib_routine() * rovar1;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
new file mode 100644
index 000000000000..ecfafe812c36
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* volatile to force a read */
+const volatile int var1;
+volatile int var2 = 1;
+struct {
+ int var3_1;
+ __s64 var3_2;
+} var3;
+int libout1;
+
+extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
+
+int var4[4];
+
+__weak int var5 SEC(".data");
+
+/* Fully contained within library extern-and-definition */
+extern int var6;
+
+int var7 SEC(".data.custom");
+
+int (*fn_ptr)(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map1 SEC(".maps");
+
+extern struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+int lib_routine(void)
+{
+ __u32 key = 1, value = 2;
+
+ (void) CONFIG_BPF_SYSCALL;
+ bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
+
+ libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
+ return libout1;
+}
+
+SEC("perf_event")
+int lib_perf_handler(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
new file mode 100644
index 000000000000..80238486b7ce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int var6 = 6;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 16);
+} map2 SEC(".maps");
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 553a282d816a..548660e299a5 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -9,9 +9,8 @@
#include <bpf/bpf_helpers.h>
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
@@ -30,7 +29,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -59,7 +58,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 2b64bc563a12..81249d119a8b 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -9,9 +9,8 @@
#include <bpf/bpf_helpers.h>
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
@@ -30,7 +29,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +56,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 5489823c83fc..bbdd08764789 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -9,16 +9,15 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+#include "bpf_misc.h"
+
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
@@ -31,7 +30,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +56,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_task_local_data.c b/tools/testing/selftests/bpf/progs/test_task_local_data.c
new file mode 100644
index 000000000000..fffafc013044
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_local_data.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+#include "task_local_data.bpf.h"
+
+struct tld_keys {
+ tld_key_t value0;
+ tld_key_t value1;
+ tld_key_t value2;
+ tld_key_t value_not_exist;
+};
+
+struct test_tld_struct {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+ __u64 d;
+};
+
+int test_value0;
+int test_value1;
+struct test_tld_struct test_value2;
+
+SEC("syscall")
+int task_main(void *ctx)
+{
+ struct tld_object tld_obj;
+ struct test_tld_struct *struct_p;
+ struct task_struct *task;
+ int err, *int_p;
+
+ task = bpf_get_current_task_btf();
+ err = tld_object_init(task, &tld_obj);
+ if (err)
+ return 1;
+
+ int_p = tld_get_data(&tld_obj, value0, "value0", sizeof(int));
+ if (int_p)
+ test_value0 = *int_p;
+ else
+ return 2;
+
+ int_p = tld_get_data(&tld_obj, value1, "value1", sizeof(int));
+ if (int_p)
+ test_value1 = *int_p;
+ else
+ return 3;
+
+ struct_p = tld_get_data(&tld_obj, value2, "value2", sizeof(struct test_tld_struct));
+ if (struct_p)
+ test_value2 = *struct_p;
+ else
+ return 4;
+
+ int_p = tld_get_data(&tld_obj, value_not_exist, "value_not_exist", sizeof(int));
+ if (int_p)
+ return 5;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
new file mode 100644
index 000000000000..1926facba122
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
+int uprobe_res = 0;
+
+SEC("uprobe")
+int handle_uprobe(struct pt_regs *ctx)
+{
+ struct task_struct *current;
+ struct pt_regs *regs;
+
+ current = bpf_get_current_task_btf();
+ regs = (struct pt_regs *) bpf_task_pt_regs(current);
+ if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+ return 0;
+ if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+ return 0;
+
+ /* Prove that uprobe was run */
+ uprobe_res = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
new file mode 100644
index 000000000000..0b74b8bd22e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+const volatile int local_pid;
+const volatile __u64 cgid;
+int remote_pid;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *acquired;
+
+ if (local_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ if (local_pid == acquired->tgid)
+ goto out;
+
+ cgrp = bpf_cgroup_from_id(cgid);
+ if (!cgrp)
+ goto out;
+
+ if (bpf_task_under_cgroup(acquired, cgrp))
+ remote_pid = acquired->tgid;
+
+out:
+ if (cgrp)
+ bpf_cgroup_release(cgrp);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+SEC("lsm.s/bpf")
+int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *task;
+ int ret = 0;
+
+ task = bpf_get_current_task_btf();
+ if (local_pid != task->pid)
+ return 0;
+
+ if (cmd != BPF_LINK_CREATE)
+ return 0;
+
+ /* 1 is the root cgroup */
+ cgrp = bpf_cgroup_from_id(1);
+ if (!cgrp)
+ goto out;
+ if (!bpf_task_under_cgroup(task, cgrp))
+ ret = -1;
+ bpf_cgroup_release(cgrp);
+
+out:
+ return ret;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
new file mode 100644
index 000000000000..ef7da419632a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+/* Dummy prog to test TC-BPF API */
+
+SEC("tc")
+int cls(struct __sk_buff *skb)
+{
+ return 0;
+}
+
+/* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon */
+SEC("tcx/ingress")
+int pkt_ptr(struct __sk_buff *skb)
+{
+ struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr);
+
+ if ((long)(iph + 1) > (long)skb->data_end)
+ return 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
new file mode 100644
index 000000000000..fcba8299f0bc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
+
+long change_tail_ret = 1;
+
+static __always_inline struct iphdr *parse_ip_header(struct __sk_buff *skb, int *ip_proto)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = data;
+ struct iphdr *iph;
+
+ /* Verify Ethernet header */
+ if ((void *)(data + sizeof(*eth)) > data_end)
+ return NULL;
+
+ /* Skip Ethernet header to get to IP header */
+ iph = (void *)(data + sizeof(struct ethhdr));
+
+ /* Verify IP header */
+ if ((void *)(data + sizeof(struct ethhdr) + sizeof(*iph)) > data_end)
+ return NULL;
+
+ /* Basic IP header validation */
+ if (iph->version != 4) /* Only support IPv4 */
+ return NULL;
+
+ if (iph->ihl < 5) /* Minimum IP header length */
+ return NULL;
+
+ *ip_proto = iph->protocol;
+ return iph;
+}
+
+static __always_inline struct udphdr *parse_udp_header(struct __sk_buff *skb, struct iphdr *iph)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *hdr = (void *)iph;
+ struct udphdr *udp;
+
+ /* Calculate UDP header position */
+ udp = hdr + (iph->ihl * 4);
+ hdr = (void *)udp;
+
+ /* Verify UDP header bounds */
+ if ((void *)(hdr + sizeof(*udp)) > data_end)
+ return NULL;
+
+ return udp;
+}
+
+SEC("tc/ingress")
+int change_tail(struct __sk_buff *skb)
+{
+ int len = skb->len;
+ struct udphdr *udp;
+ struct iphdr *iph;
+ void *data_end;
+ char *payload;
+ int ip_proto;
+
+ bpf_skb_pull_data(skb, len);
+
+ data_end = (void *)(long)skb->data_end;
+ iph = parse_ip_header(skb, &ip_proto);
+ if (!iph)
+ return TCX_PASS;
+
+ if (ip_proto != IPPROTO_UDP)
+ return TCX_PASS;
+
+ udp = parse_udp_header(skb, iph);
+ if (!udp)
+ return TCX_PASS;
+
+ payload = (char *)udp + (sizeof(struct udphdr));
+ if (payload + 1 > (char *)data_end)
+ return TCX_PASS;
+
+ if (payload[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len - 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len + 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'Z') { /* Zero */
+ change_tail_ret = bpf_skb_change_tail(skb, 0, 0);
+ return TCX_PASS;
+ }
+ return TCX_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
new file mode 100644
index 000000000000..ca8e8734d901
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Meta
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+/* veth_src --- veth_src_fwd --- veth_det_fwd --- veth_dst
+ * | |
+ * ns_src | ns_fwd | ns_dst
+ *
+ * ns_src and ns_dst: ENDHOST namespace
+ * ns_fwd: Fowarding namespace
+ */
+
+#define ctx_ptr(field) (void *)(long)(field)
+
+#define ip4_src __bpf_htonl(0xac100164) /* 172.16.1.100 */
+#define ip4_dst __bpf_htonl(0xac100264) /* 172.16.2.100 */
+
+#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+
+#define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
+ a.s6_addr32[1] == b.s6_addr32[1] && \
+ a.s6_addr32[2] == b.s6_addr32[2] && \
+ a.s6_addr32[3] == b.s6_addr32[3])
+
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
+
+#define EGRESS_ENDHOST_MAGIC 0x0b9fbeef
+#define INGRESS_FWDNS_MAGIC 0x1b9fbeef
+#define EGRESS_FWDNS_MAGIC 0x2b9fbeef
+
+enum {
+ INGRESS_FWDNS_P100,
+ INGRESS_FWDNS_P101,
+ EGRESS_FWDNS_P100,
+ EGRESS_FWDNS_P101,
+ INGRESS_ENDHOST,
+ EGRESS_ENDHOST,
+ SET_DTIME,
+ __MAX_CNT,
+};
+
+enum {
+ TCP_IP6_CLEAR_DTIME,
+ TCP_IP4,
+ TCP_IP6,
+ UDP_IP4,
+ UDP_IP6,
+ TCP_IP4_RT_FWD,
+ TCP_IP6_RT_FWD,
+ UDP_IP4_RT_FWD,
+ UDP_IP6_RT_FWD,
+ UKN_TEST,
+ __NR_TESTS,
+};
+
+enum {
+ SRC_NS = 1,
+ DST_NS,
+};
+
+__u32 dtimes[__NR_TESTS][__MAX_CNT] = {};
+__u32 errs[__NR_TESTS][__MAX_CNT] = {};
+__u32 test = 0;
+
+static void inc_dtimes(__u32 idx)
+{
+ if (test < __NR_TESTS)
+ dtimes[test][idx]++;
+ else
+ dtimes[UKN_TEST][idx]++;
+}
+
+static void inc_errs(__u32 idx)
+{
+ if (test < __NR_TESTS)
+ errs[test][idx]++;
+ else
+ errs[UKN_TEST][idx]++;
+}
+
+static int skb_proto(int type)
+{
+ return type & 0xff;
+}
+
+static int skb_ns(int type)
+{
+ return (type >> 8) & 0xff;
+}
+
+static bool fwdns_clear_dtime(void)
+{
+ return test == TCP_IP6_CLEAR_DTIME;
+}
+
+static bool bpf_fwd(void)
+{
+ return test < TCP_IP4_RT_FWD;
+}
+
+static __u8 get_proto(void)
+{
+ switch (test) {
+ case UDP_IP4:
+ case UDP_IP6:
+ case UDP_IP4_RT_FWD:
+ case UDP_IP6_RT_FWD:
+ return IPPROTO_UDP;
+ default:
+ return IPPROTO_TCP;
+ }
+}
+
+/* -1: parse error: TC_ACT_SHOT
+ * 0: not testing traffic: TC_ACT_OK
+ * >0: first byte is the inet_proto, second byte has the netns
+ * of the sender
+ */
+static int skb_get_type(struct __sk_buff *skb)
+{
+ __u16 dst_ns_port = __bpf_htons(50000 + test);
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ __u8 inet_proto = 0, ns = 0;
+ struct ipv6hdr *ip6h;
+ __u16 sport, dport;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ void *trans;
+
+ switch (skb->protocol) {
+ case __bpf_htons(ETH_P_IP):
+ iph = data + sizeof(struct ethhdr);
+ if (iph + 1 > data_end)
+ return -1;
+ if (iph->saddr == ip4_src)
+ ns = SRC_NS;
+ else if (iph->saddr == ip4_dst)
+ ns = DST_NS;
+ inet_proto = iph->protocol;
+ trans = iph + 1;
+ break;
+ case __bpf_htons(ETH_P_IPV6):
+ ip6h = data + sizeof(struct ethhdr);
+ if (ip6h + 1 > data_end)
+ return -1;
+ if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}}))
+ ns = SRC_NS;
+ else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}}))
+ ns = DST_NS;
+ inet_proto = ip6h->nexthdr;
+ trans = ip6h + 1;
+ break;
+ default:
+ return 0;
+ }
+
+ /* skb is not from src_ns or dst_ns.
+ * skb is not the testing IPPROTO.
+ */
+ if (!ns || inet_proto != get_proto())
+ return 0;
+
+ switch (inet_proto) {
+ case IPPROTO_TCP:
+ th = trans;
+ if (th + 1 > data_end)
+ return -1;
+ sport = th->source;
+ dport = th->dest;
+ break;
+ case IPPROTO_UDP:
+ uh = trans;
+ if (uh + 1 > data_end)
+ return -1;
+ sport = uh->source;
+ dport = uh->dest;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The skb is the testing traffic */
+ if ((ns == SRC_NS && dport == dst_ns_port) ||
+ (ns == DST_NS && sport == dst_ns_port))
+ return (ns << 8 | inet_proto);
+
+ return 0;
+}
+
+/* format: direction@iface@netns
+ * egress@veth_(src|dst)@ns_(src|dst)
+ */
+SEC("tc")
+int egress_host(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ if (skb_proto(skb_type) == IPPROTO_TCP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp)
+ inc_dtimes(EGRESS_ENDHOST);
+ else
+ inc_errs(EGRESS_ENDHOST);
+ } else if (skb_proto(skb_type) == IPPROTO_UDP) {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_TAI &&
+ skb->tstamp)
+ inc_dtimes(EGRESS_ENDHOST);
+ else
+ inc_errs(EGRESS_ENDHOST);
+ } else {
+ if (skb->tstamp_type == BPF_SKB_CLOCK_REALTIME &&
+ skb->tstamp)
+ inc_errs(EGRESS_ENDHOST);
+ }
+
+ skb->tstamp = EGRESS_ENDHOST_MAGIC;
+
+ return TC_ACT_OK;
+}
+
+/* ingress@veth_(src|dst)@ns_(src|dst) */
+SEC("tc")
+int ingress_host(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp == EGRESS_FWDNS_MAGIC)
+ inc_dtimes(INGRESS_ENDHOST);
+ else
+ inc_errs(INGRESS_ENDHOST);
+
+ return TC_ACT_OK;
+}
+
+/* ingress@veth_(src|dst)_fwd@ns_fwd priority 100 */
+SEC("tc")
+int ingress_fwdns_prio100(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ /* delivery_time is only available to the ingress
+ * if the tc-bpf checks the skb->tstamp_type.
+ */
+ if (skb->tstamp == EGRESS_ENDHOST_MAGIC)
+ inc_errs(INGRESS_FWDNS_P100);
+
+ if (fwdns_clear_dtime())
+ skb->tstamp = 0;
+
+ return TC_ACT_UNSPEC;
+}
+
+/* egress@veth_(src|dst)_fwd@ns_fwd priority 100 */
+SEC("tc")
+int egress_fwdns_prio100(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1)
+ return TC_ACT_SHOT;
+ if (!skb_type)
+ return TC_ACT_OK;
+
+ /* delivery_time is always available to egress even
+ * the tc-bpf did not use the tstamp_type.
+ */
+ if (skb->tstamp == INGRESS_FWDNS_MAGIC)
+ inc_dtimes(EGRESS_FWDNS_P100);
+ else
+ inc_errs(EGRESS_FWDNS_P100);
+
+ if (fwdns_clear_dtime())
+ skb->tstamp = 0;
+
+ return TC_ACT_UNSPEC;
+}
+
+/* ingress@veth_(src|dst)_fwd@ns_fwd priority 101 */
+SEC("tc")
+int ingress_fwdns_prio101(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1 || !skb_type)
+ /* Should have handled in prio100 */
+ return TC_ACT_SHOT;
+
+ if (skb->tstamp_type) {
+ if (fwdns_clear_dtime() ||
+ (skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC &&
+ skb->tstamp_type != BPF_SKB_CLOCK_TAI) ||
+ skb->tstamp != EGRESS_ENDHOST_MAGIC)
+ inc_errs(INGRESS_FWDNS_P101);
+ else
+ inc_dtimes(INGRESS_FWDNS_P101);
+ } else {
+ if (!fwdns_clear_dtime())
+ inc_errs(INGRESS_FWDNS_P101);
+ }
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
+ skb->tstamp = INGRESS_FWDNS_MAGIC;
+ } else {
+ if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
+ BPF_SKB_CLOCK_MONOTONIC))
+ inc_errs(SET_DTIME);
+ }
+
+ if (skb_ns(skb_type) == SRC_NS)
+ return bpf_fwd() ?
+ bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0) : TC_ACT_OK;
+ else
+ return bpf_fwd() ?
+ bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0) : TC_ACT_OK;
+}
+
+/* egress@veth_(src|dst)_fwd@ns_fwd priority 101 */
+SEC("tc")
+int egress_fwdns_prio101(struct __sk_buff *skb)
+{
+ int skb_type;
+
+ skb_type = skb_get_type(skb);
+ if (skb_type == -1 || !skb_type)
+ /* Should have handled in prio100 */
+ return TC_ACT_SHOT;
+
+ if (skb->tstamp_type) {
+ if (fwdns_clear_dtime() ||
+ skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC ||
+ skb->tstamp != INGRESS_FWDNS_MAGIC)
+ inc_errs(EGRESS_FWDNS_P101);
+ else
+ inc_dtimes(EGRESS_FWDNS_P101);
+ } else {
+ if (!fwdns_clear_dtime())
+ inc_errs(EGRESS_FWDNS_P101);
+ }
+
+ if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) {
+ skb->tstamp = EGRESS_FWDNS_MAGIC;
+ } else {
+ if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
+ BPF_SKB_CLOCK_MONOTONIC))
+ inc_errs(SET_DTIME);
+ }
+
+ return TC_ACT_OK;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c
index bf28814bfde5..4f6f03122d61 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_edt.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c
@@ -14,22 +14,22 @@
#define TIME_HORIZON_NS (2000 * 1000 * 1000)
#define NS_PER_SEC 1000000000
#define ECN_HORIZON_NS 5000000
-#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
/* flow_key => last_tstamp timestamp used */
-struct bpf_map_def SEC("maps") flow_map = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(uint32_t),
- .value_size = sizeof(uint64_t),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, uint32_t);
+ __type(value, uint64_t);
+ __uint(max_entries, 1);
+} flow_map SEC(".maps");
+
+__uint64_t target_rate;
static inline int throttle_flow(struct __sk_buff *skb)
{
int key = 0;
uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
- uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
- THROTTLE_RATE_BPS;
+ uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC / target_rate;
uint64_t now = bpf_ktime_get_ns();
uint64_t tstamp, next_tstamp = 0;
@@ -70,7 +70,7 @@ static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
if ((void *)(tcp + 1) > data_end)
return TC_ACT_SHOT;
- if (tcp->dest == bpf_htons(9000))
+ if (tcp->source == bpf_htons(9000))
return throttle_flow(skb);
return TC_ACT_OK;
@@ -99,7 +99,8 @@ static inline int handle_ipv4(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("cls_test") int tc_prog(struct __sk_buff *skb)
+SEC("tc")
+int tc_prog(struct __sk_buff *skb)
{
if (skb->protocol == bpf_htons(ETH_P_IP))
return handle_ipv4(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c
new file mode 100644
index 000000000000..630f12e51b07
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_link.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Isovalent */
+#include <stdbool.h>
+
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/stddef.h>
+#include <linux/if_packet.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+bool seen_tc1;
+bool seen_tc2;
+bool seen_tc3;
+bool seen_tc4;
+bool seen_tc5;
+bool seen_tc6;
+bool seen_tc7;
+bool seen_tc8;
+
+bool set_type;
+
+bool seen_eth;
+bool seen_host;
+bool seen_mcast;
+
+int mark, prio;
+unsigned short headroom, tailroom;
+
+SEC("tc/ingress")
+int tc1(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
+ seen_host = skb->pkt_type == PACKET_HOST;
+ if (seen_host && set_type) {
+ eth.h_dest[0] = 4;
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
+ goto fail;
+ bpf_skb_change_type(skb, PACKET_MULTICAST);
+ }
+out:
+ seen_tc1 = true;
+fail:
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc2(struct __sk_buff *skb)
+{
+ seen_tc2 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc3(struct __sk_buff *skb)
+{
+ seen_tc3 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc4(struct __sk_buff *skb)
+{
+ seen_tc4 = true;
+ return TCX_NEXT;
+}
+
+SEC("tc/egress")
+int tc5(struct __sk_buff *skb)
+{
+ seen_tc5 = true;
+ return TCX_PASS;
+}
+
+SEC("tc/egress")
+int tc6(struct __sk_buff *skb)
+{
+ seen_tc6 = true;
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+int tc7(struct __sk_buff *skb)
+{
+ struct ethhdr eth = {};
+
+ if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
+ goto out;
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
+ goto out;
+ if (eth.h_dest[0] == 4 && set_type) {
+ seen_mcast = skb->pkt_type == PACKET_MULTICAST;
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+out:
+ seen_tc7 = true;
+ return TCX_PASS;
+}
+
+struct sk_buff {
+ struct net_device *dev;
+};
+
+struct net_device {
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+};
+
+SEC("tc/egress")
+int tc8(struct __sk_buff *skb)
+{
+ struct net_device *dev = BPF_CORE_READ((struct sk_buff *)skb, dev);
+
+ seen_tc8 = true;
+ mark = skb->mark;
+ prio = skb->priority;
+ headroom = BPF_CORE_READ(dev, needed_headroom);
+ tailroom = BPF_CORE_READ(dev, needed_tailroom);
+ return TCX_PASS;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index b985ac4e7a81..de15155f2609 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -33,17 +33,8 @@
a.s6_addr32[3] == b.s6_addr32[3])
#endif
-enum {
- dev_src,
- dev_dst,
-};
-
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
__be32 addr)
@@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
return v6_equal(ip6h->daddr, addr);
}
-static __always_inline int get_dev_ifindex(int which)
-{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
-
- return ifindex ? *ifindex : 0;
-}
-
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -108,7 +94,7 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src));
break;
case __bpf_constant_htons(ETH_P_IPV6):
- redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src);
+ redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}});
break;
}
@@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -132,7 +119,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst));
break;
case __bpf_constant_htons(ETH_P_IPV6):
- redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst);
+ redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}});
break;
}
@@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index d82ed3457030..ec4cce19362d 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
return 0;
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fc84a7685aa2..365eacb5dc34 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -5,41 +5,59 @@
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
-enum {
- dev_src,
- dev_dst,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
-static __always_inline int get_dev_ifindex(int which)
+SEC("tc")
+int tc_chk(struct __sk_buff *skb)
{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+ return TC_ACT_SHOT;
+}
- return ifindex ? *ifindex : 0;
+SEC("tc")
+int tc_dst(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(IFINDEX_SRC, 0);
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("tc")
+int tc_src(struct __sk_buff *skb)
{
- return TC_ACT_SHOT;
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("tc")
+int tc_dst_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+ return bpf_redirect(IFINDEX_SRC, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("tc")
+int tc_src_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+ __u16 proto = skb->protocol;
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index 37bce7a7c394..7330c61b5730 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -2,36 +2,47 @@
/* In-place tunneling */
-#include <stdbool.h>
-#include <string.h>
-
-#include <linux/stddef.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/mpls.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/pkt_cls.h>
-#include <linux/types.h>
+#include <vmlinux.h>
-#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_compiler.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
static const int cfg_port = 8000;
static const int cfg_udp_src = 20000;
+#define ETH_P_MPLS_UC 0x8847
+#define ETH_P_TEB 0x6558
+
+#define MPLS_LS_S_MASK 0x00000100
+#define BPF_F_ADJ_ROOM_ENCAP_L2(len) \
+ (((__u64)len & BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
+
+#define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
+
#define UDP_PORT 5555
#define MPLS_OVER_UDP_PORT 6635
#define ETH_OVER_UDP_PORT 7777
+#define VXLAN_UDP_PORT 8472
+
+#define EXTPROTO_VXLAN 0x1
+
+#define VXLAN_FLAGS bpf_htonl(1<<27)
+#define VNI_ID 1
+#define VXLAN_VNI bpf_htonl(VNI_ID << 8)
+
+#ifndef NEXTHDR_DEST
+#define NEXTHDR_DEST 60
+#endif
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
-
struct gre_hdr {
__be16 flags;
__be16 protocol;
@@ -45,13 +56,13 @@ union l4hdr {
struct v4hdr {
struct iphdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
struct v6hdr {
struct ipv6hdr ip;
union l4hdr l4hdr;
- __u8 pad[16]; /* enough space for L2 header */
+ __u8 pad[L2_PAD_SZ]; /* space for L2 header / vxlan header ... */
} __attribute__((packed));
static __always_inline void set_ipv4_csum(struct iphdr *iph)
@@ -62,21 +73,22 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
iph->check = 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
csum += *iph16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
}
-static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
- __u16 l2_proto)
+static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
{
+ struct iphdr iph_inner = {0};
__u16 udp_dst = UDP_PORT;
- struct iphdr iph_inner;
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
int tcp_off;
__u64 flags;
@@ -99,7 +111,6 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
/* Derive the IPv4 header fields from the IPv6 header */
- memset(&iph_inner, 0, sizeof(iph_inner));
iph_inner.version = 4;
iph_inner.ihl = 5;
iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
@@ -141,7 +152,11 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -171,14 +186,26 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = VXLAN_VNI;
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
+
break;
}
olen += l2_len;
@@ -214,14 +241,21 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
-static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto)
{
+ return __encap_ipv4(skb, encap_proto, l2_proto, 0);
+}
+
+static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto, __u16 ext_proto)
+{
__u16 udp_dst = UDP_PORT;
struct ipv6hdr iph_inner;
struct v6hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
+ __u8 *l2_hdr = NULL;
__u16 tot_len;
__u64 flags;
@@ -249,7 +283,11 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
break;
case ETH_P_TEB:
l2_len = ETH_HLEN;
- udp_dst = ETH_OVER_UDP_PORT;
+ if (ext_proto & EXTPROTO_VXLAN) {
+ udp_dst = VXLAN_UDP_PORT;
+ l2_len += sizeof(struct vxlanhdr);
+ } else
+ udp_dst = ETH_OVER_UDP_PORT;
break;
}
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
@@ -267,7 +305,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
- sizeof(h_outer.l4hdr.udp);
+ sizeof(h_outer.l4hdr.udp) + l2_len;
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(tot_len);
break;
@@ -278,13 +316,24 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
}
/* add L2 encap (if specified) */
+ l2_hdr = (__u8 *)&h_outer + olen;
switch (l2_proto) {
case ETH_P_MPLS_UC:
- *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ *(__u32 *)l2_hdr = mpls_label;
break;
case ETH_P_TEB:
- if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
- ETH_HLEN))
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2_ETH;
+
+ if (ext_proto & EXTPROTO_VXLAN) {
+ struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
+
+ vxlan_hdr->vx_flags = VXLAN_FLAGS;
+ vxlan_hdr->vx_vni = VXLAN_VNI;
+
+ l2_hdr += sizeof(struct vxlanhdr);
+ }
+
+ if (bpf_skb_load_bytes(skb, 0, l2_hdr, ETH_HLEN))
return TC_ACT_SHOT;
break;
}
@@ -309,7 +358,67 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
}
-SEC("encap_ipip_none")
+static int encap_ipv6_ipip6(struct __sk_buff *skb)
+{
+ struct v6hdr h_outer = {0};
+ struct iphdr iph_inner;
+ struct tcphdr tcph;
+ struct ethhdr eth;
+ __u64 flags;
+ int olen;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* filter only packets we want */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + (iph_inner.ihl << 2),
+ &tcph, sizeof(tcph)) < 0)
+ return TC_ACT_OK;
+
+ if (tcph.dest != __bpf_constant_htons(cfg_port))
+ return TC_ACT_OK;
+
+ olen = sizeof(h_outer.ip);
+
+ flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
+
+ /* add room between mac and network header */
+ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
+ return TC_ACT_SHOT;
+
+ /* prepare new outer network header */
+ h_outer.ip.version = 6;
+ h_outer.ip.hop_limit = iph_inner.ttl;
+ h_outer.ip.saddr.in6_u.u6_addr8[1] = 0xfd;
+ h_outer.ip.saddr.in6_u.u6_addr8[15] = 1;
+ h_outer.ip.daddr.in6_u.u6_addr8[1] = 0xfd;
+ h_outer.ip.daddr.in6_u.u6_addr8[15] = 2;
+ h_outer.ip.payload_len = iph_inner.tot_len;
+ h_outer.ip.nexthdr = IPPROTO_IPIP;
+
+ /* store new outer network header */
+ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
+ BPF_F_INVALIDATE_HASH) < 0)
+ return TC_ACT_SHOT;
+
+ /* update eth->h_proto */
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
+ return TC_ACT_SHOT;
+ eth.h_proto = bpf_htons(ETH_P_IPV6);
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
+static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto)
+{
+ return __encap_ipv6(skb, encap_proto, l2_proto, 0);
+}
+
+SEC("tc")
int __encap_ipip_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -318,7 +427,7 @@ int __encap_ipip_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_none")
+SEC("tc")
int __encap_gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -327,7 +436,7 @@ int __encap_gre_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_mpls")
+SEC("tc")
int __encap_gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -336,7 +445,7 @@ int __encap_gre_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_eth")
+SEC("tc")
int __encap_gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -345,7 +454,7 @@ int __encap_gre_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_none")
+SEC("tc")
int __encap_udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -354,7 +463,7 @@ int __encap_udp_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_mpls")
+SEC("tc")
int __encap_udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -363,7 +472,7 @@ int __encap_udp_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_eth")
+SEC("tc")
int __encap_udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -372,7 +481,18 @@ int __encap_udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_sit_none")
+SEC("tc")
+int __encap_vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return __encap_ipv4(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("tc")
int __encap_sit_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -381,7 +501,7 @@ int __encap_sit_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6tnl_none")
+SEC("tc")
int __encap_ip6tnl_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -390,7 +510,16 @@ int __encap_ip6tnl_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_none")
+SEC("tc")
+int __encap_ipip6_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv6_ipip6(skb);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("tc")
int __encap_ip6gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -399,7 +528,7 @@ int __encap_ip6gre_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_mpls")
+SEC("tc")
int __encap_ip6gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -408,7 +537,7 @@ int __encap_ip6gre_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_eth")
+SEC("tc")
int __encap_ip6gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -417,7 +546,7 @@ int __encap_ip6gre_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_none")
+SEC("tc")
int __encap_ip6udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -426,7 +555,7 @@ int __encap_ip6udp_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_mpls")
+SEC("tc")
int __encap_ip6udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -435,7 +564,7 @@ int __encap_ip6udp_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_eth")
+SEC("tc")
int __encap_ip6udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -444,16 +573,46 @@ int __encap_ip6udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("tc")
+int __encap_ip6vxlan_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return __encap_ipv6(skb, IPPROTO_UDP,
+ ETH_P_TEB,
+ EXTPROTO_VXLAN);
+ else
+ return TC_ACT_OK;
+}
+
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
{
- char buf[sizeof(struct v6hdr)];
+ __u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO;
+ struct ipv6_opt_hdr ip6_opt_hdr;
struct gre_hdr greh;
struct udphdr udph;
int olen = len;
switch (proto) {
case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ case NEXTHDR_DEST:
+ if (bpf_skb_load_bytes(skb, off + len, &ip6_opt_hdr,
+ sizeof(ip6_opt_hdr)) < 0)
+ return TC_ACT_OK;
+ switch (ip6_opt_hdr.nexthdr) {
+ case IPPROTO_IPIP:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4;
+ break;
+ case IPPROTO_IPV6:
+ flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6;
+ break;
+ default:
+ return TC_ACT_OK;
+ }
break;
case IPPROTO_GRE:
olen += sizeof(struct gre_hdr);
@@ -479,14 +638,16 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
case ETH_OVER_UDP_PORT:
olen += ETH_HLEN;
break;
+ case VXLAN_UDP_PORT:
+ olen += ETH_HLEN + sizeof(struct vxlanhdr);
+ break;
}
break;
default:
return TC_ACT_OK;
}
- if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
- BPF_F_ADJ_ROOM_FIXED_GSO))
+ if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags))
return TC_ACT_SHOT;
return TC_ACT_OK;
@@ -519,7 +680,7 @@ static int decap_ipv6(struct __sk_buff *skb)
iph_outer.nexthdr);
}
-SEC("decap")
+SEC("tc")
int decap_f(struct __sk_buff *skb)
{
switch (skb->protocol) {
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
deleted file mode 100644
index 47cbe2eeae43..000000000000
--- a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-// Copyright (c) 2019 Cloudflare
-
-#include <string.h>
-
-#include <linux/bpf.h>
-#include <linux/pkt_cls.h>
-#include <linux/if_ether.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <sys/socket.h>
-#include <linux/tcp.h>
-
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_endian.h>
-
-struct bpf_map_def SEC("maps") results = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
- .max_entries = 3,
-};
-
-static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
- void *iph, __u32 ip_size,
- struct tcphdr *tcph)
-{
- __u32 thlen = tcph->doff * 4;
-
- if (tcph->syn && !tcph->ack) {
- // packet should only have an MSS option
- if (thlen != 24)
- return 0;
-
- if ((void *)tcph + thlen > data_end)
- return 0;
-
- return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
- }
- return 0;
-}
-
-static __always_inline void check_syncookie(void *ctx, void *data,
- void *data_end)
-{
- struct bpf_sock_tuple tup;
- struct bpf_sock *sk;
- struct ethhdr *ethh;
- struct iphdr *ipv4h;
- struct ipv6hdr *ipv6h;
- struct tcphdr *tcph;
- int ret;
- __u32 key_mss = 2;
- __u32 key_gen = 1;
- __u32 key = 0;
- __s64 seq_mss;
-
- ethh = data;
- if (ethh + 1 > data_end)
- return;
-
- switch (bpf_ntohs(ethh->h_proto)) {
- case ETH_P_IP:
- ipv4h = data + sizeof(struct ethhdr);
- if (ipv4h + 1 > data_end)
- return;
-
- if (ipv4h->ihl != 5)
- return;
-
- tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
- if (tcph + 1 > data_end)
- return;
-
- tup.ipv4.saddr = ipv4h->saddr;
- tup.ipv4.daddr = ipv4h->daddr;
- tup.ipv4.sport = tcph->source;
- tup.ipv4.dport = tcph->dest;
-
- sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
- BPF_F_CURRENT_NETNS, 0);
- if (!sk)
- return;
-
- if (sk->state != BPF_TCP_LISTEN)
- goto release;
-
- seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h),
- tcph);
-
- ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
- tcph, sizeof(*tcph));
- break;
-
- case ETH_P_IPV6:
- ipv6h = data + sizeof(struct ethhdr);
- if (ipv6h + 1 > data_end)
- return;
-
- if (ipv6h->nexthdr != IPPROTO_TCP)
- return;
-
- tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
- if (tcph + 1 > data_end)
- return;
-
- memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
- memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
- tup.ipv6.sport = tcph->source;
- tup.ipv6.dport = tcph->dest;
-
- sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
- BPF_F_CURRENT_NETNS, 0);
- if (!sk)
- return;
-
- if (sk->state != BPF_TCP_LISTEN)
- goto release;
-
- seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h),
- tcph);
-
- ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
- tcph, sizeof(*tcph));
- break;
-
- default:
- return;
- }
-
- if (seq_mss > 0) {
- __u32 cookie = (__u32)seq_mss;
- __u32 mss = seq_mss >> 32;
-
- bpf_map_update_elem(&results, &key_gen, &cookie, 0);
- bpf_map_update_elem(&results, &key_mss, &mss, 0);
- }
-
- if (ret == 0) {
- __u32 cookie = bpf_ntohl(tcph->ack_seq) - 1;
-
- bpf_map_update_elem(&results, &key, &cookie, 0);
- }
-
-release:
- bpf_sk_release(sk);
-}
-
-SEC("clsact/check_syncookie")
-int check_syncookie_clsact(struct __sk_buff *skb)
-{
- check_syncookie(skb, (void *)(long)skb->data,
- (void *)(long)skb->data_end);
- return TC_ACT_OK;
-}
-
-SEC("xdp/check_syncookie")
-int check_syncookie_xdp(struct xdp_md *ctx)
-{
- check_syncookie(ctx, (void *)(long)ctx->data,
- (void *)(long)ctx->data_end);
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
new file mode 100644
index 000000000000..7d5293de1952
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+#include "test_siphash.h"
+#include "test_tcp_custom_syncookie.h"
+#include "bpf_misc.h"
+
+#define MAX_PACKET_OFF 0xffff
+
+/* Hash is calculated for each client and split into ISN and TS.
+ *
+ * MSB LSB
+ * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 |
+ * | Hash_1 | MSS | ECN | SACK | WScale |
+ *
+ * TS: | 31 ... 8 | 7 ... 0 |
+ * | Random | Hash_2 |
+ */
+#define COOKIE_BITS 8
+#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1)
+
+enum {
+ /* 0xf is invalid thus means that SYN did not have WScale. */
+ BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1,
+ BPF_SYNCOOKIE_SACK = (1 << 4),
+ BPF_SYNCOOKIE_ECN = (1 << 5),
+};
+
+#define MSS_LOCAL_IPV4 65495
+#define MSS_LOCAL_IPV6 65476
+
+const __u16 msstab4[] = {
+ 536,
+ 1300,
+ 1460,
+ MSS_LOCAL_IPV4,
+};
+
+const __u16 msstab6[] = {
+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 1480 - 60,
+ 9000 - 60,
+ MSS_LOCAL_IPV6,
+};
+
+static siphash_key_t test_key_siphash = {
+ { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }
+};
+
+struct tcp_syncookie {
+ struct __sk_buff *skb;
+ void *data;
+ void *data_end;
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __be32 *ptr32;
+ struct bpf_tcp_req_attrs attrs;
+ u32 off;
+ u32 cookie;
+ u64 first;
+};
+
+bool handled_syn, handled_ack;
+
+static int tcp_load_headers(struct tcp_syncookie *ctx)
+{
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+
+ if (ctx->eth + 1 > ctx->data_end)
+ goto err;
+
+ switch (bpf_ntohs(ctx->eth->h_proto)) {
+ case ETH_P_IP:
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+
+ if (ctx->ipv4 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4)
+ goto err;
+
+ if (ctx->ipv4->version != 4)
+ goto err;
+
+ if (ctx->ipv4->protocol != IPPROTO_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ break;
+ case ETH_P_IPV6:
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+
+ if (ctx->ipv6 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv6->version != 6)
+ goto err;
+
+ if (ctx->ipv6->nexthdr != NEXTHDR_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ break;
+ default:
+ goto err;
+ }
+
+ if (ctx->tcp + 1 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_reload_headers(struct tcp_syncookie *ctx)
+{
+ /* Without volatile,
+ * R3 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 data_len = ctx->skb->data_end - ctx->skb->data;
+
+ if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4)
+ goto err;
+
+ /* Needed to calculate csum and parse TCP options. */
+ if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0))
+ goto err;
+
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+ if (ctx->ipv4) {
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+ ctx->ipv6 = NULL;
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ } else {
+ ctx->ipv4 = NULL;
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ }
+
+ if ((void *)ctx->tcp + 60 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static int tcp_validate_header(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_reload_headers(ctx))
+ goto err;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ /* check tcp_v4_csum(csum) is 0 if not on lo. */
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (csum_fold(csum) != 0)
+ goto err;
+ } else if (ctx->ipv6) {
+ /* check tcp_v6_csum(csum) is 0 if not on lo. */
+ }
+
+ return 0;
+err:
+ return -1;
+}
+
+static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz)
+{
+ __u64 off = ctx->off;
+ __u8 *data;
+
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
+
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
+
+ ctx->off += sz;
+ return data;
+}
+
+static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx)
+{
+ __u8 *opcode, *opsize, *wscale;
+ __u32 *tsval, *tsecr;
+ __u16 *mss;
+ __u32 off;
+
+ off = ctx->off;
+ opcode = next(ctx, 1);
+ if (!opcode)
+ goto stop;
+
+ if (*opcode == TCPOPT_EOL)
+ goto stop;
+
+ if (*opcode == TCPOPT_NOP)
+ goto next;
+
+ opsize = next(ctx, 1);
+ if (!opsize)
+ goto stop;
+
+ if (*opsize < 2)
+ goto stop;
+
+ switch (*opcode) {
+ case TCPOPT_MSS:
+ mss = next(ctx, 2);
+ if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss)
+ ctx->attrs.mss = get_unaligned_be16(mss);
+ break;
+ case TCPOPT_WINDOW:
+ wscale = next(ctx, 1);
+ if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) {
+ ctx->attrs.wscale_ok = 1;
+ ctx->attrs.snd_wscale = *wscale;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ tsval = next(ctx, 4);
+ tsecr = next(ctx, 4);
+ if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) {
+ ctx->attrs.rcv_tsval = get_unaligned_be32(tsval);
+ ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr);
+
+ if (ctx->tcp->syn && ctx->attrs.rcv_tsecr)
+ ctx->attrs.tstamp_ok = 0;
+ else
+ ctx->attrs.tstamp_ok = 1;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn)
+ ctx->attrs.sack_ok = 1;
+ break;
+ }
+
+ ctx->off = off + *opsize;
+next:
+ return 0;
+stop:
+ return 1;
+}
+
+static void tcp_parse_options(struct tcp_syncookie *ctx)
+{
+ ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data,
+
+ bpf_loop(40, tcp_parse_option, ctx, 0);
+}
+
+static int tcp_validate_sysctl(struct tcp_syncookie *ctx)
+{
+ if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) ||
+ (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6))
+ goto err;
+
+ if (!ctx->attrs.wscale_ok ||
+ !ctx->attrs.snd_wscale ||
+ ctx->attrs.snd_wscale >= BPF_SYNCOOKIE_WSCALE_MASK)
+ goto err;
+
+ if (!ctx->attrs.tstamp_ok)
+ goto err;
+
+ if (!ctx->attrs.sack_ok)
+ goto err;
+
+ if (!ctx->tcp->ece || !ctx->tcp->cwr)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void tcp_prepare_cookie(struct tcp_syncookie *ctx)
+{
+ u32 seq = bpf_ntohl(ctx->tcp->seq);
+ u64 first = 0, second;
+ int mssind = 0;
+ u32 hash;
+
+ if (ctx->ipv4) {
+ for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab4[mssind])
+ break;
+
+ ctx->attrs.mss = msstab4[mssind];
+
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ } else if (ctx->ipv6) {
+ for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab6[mssind])
+ break;
+
+ ctx->attrs.mss = msstab6[mssind];
+
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+ }
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok) {
+ ctx->attrs.rcv_tsecr = bpf_get_prandom_u32();
+ ctx->attrs.rcv_tsecr &= ~COOKIE_MASK;
+ ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK;
+ }
+
+ hash &= ~COOKIE_MASK;
+ hash |= mssind << 6;
+
+ if (ctx->attrs.wscale_ok)
+ hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK;
+
+ if (ctx->attrs.sack_ok)
+ hash |= BPF_SYNCOOKIE_SACK;
+
+ if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr)
+ hash |= BPF_SYNCOOKIE_ECN;
+
+ ctx->cookie = hash;
+}
+
+static void tcp_write_options(struct tcp_syncookie *ctx)
+{
+ ctx->ptr32 = (__be32 *)(ctx->tcp + 1);
+
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 |
+ ctx->attrs.mss);
+
+ if (ctx->attrs.wscale_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_WINDOW << 16 |
+ TCPOLEN_WINDOW << 8 |
+ ctx->attrs.snd_wscale);
+
+ if (ctx->attrs.tstamp_ok) {
+ if (ctx->attrs.sack_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 |
+ TCPOLEN_SACK_PERM << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr);
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval);
+ } else if (ctx->attrs.sack_ok) {
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_SACK_PERM << 8 |
+ TCPOLEN_SACK_PERM);
+ }
+}
+
+static int tcp_handle_syn(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_sysctl(ctx))
+ goto err;
+
+ tcp_prepare_cookie(ctx);
+ tcp_write_options(ctx);
+
+ swap(ctx->tcp->source, ctx->tcp->dest);
+ ctx->tcp->check = 0;
+ ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1);
+ ctx->tcp->seq = bpf_htonl(ctx->cookie);
+ ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2;
+ ctx->tcp->ack = 1;
+ if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr)
+ ctx->tcp->ece = 0;
+ ctx->tcp->cwr = 0;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ swap(ctx->ipv4->saddr, ctx->ipv4->daddr);
+ ctx->tcp->check = tcp_v4_csum(ctx, csum);
+
+ ctx->ipv4->check = 0;
+ ctx->ipv4->tos = 0;
+ ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4);
+ ctx->ipv4->id = 0;
+ ctx->ipv4->ttl = 64;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0);
+ if (csum < 0)
+ goto err;
+
+ ctx->ipv4->check = csum_fold(csum);
+ } else if (ctx->ipv6) {
+ swap(ctx->ipv6->saddr, ctx->ipv6->daddr);
+ ctx->tcp->check = tcp_v6_csum(ctx, csum);
+
+ *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000);
+ ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp);
+ ctx->ipv6->hop_limit = 64;
+ }
+
+ swap_array(ctx->eth->h_source, ctx->eth->h_dest);
+
+ if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0))
+ goto err;
+
+ return bpf_redirect(ctx->skb->ifindex, 0);
+err:
+ return TC_ACT_SHOT;
+}
+
+static int tcp_validate_cookie(struct tcp_syncookie *ctx)
+{
+ u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1;
+ u32 seq = bpf_ntohl(ctx->tcp->seq) - 1;
+ u64 first = 0, second;
+ int mssind;
+ u32 hash;
+
+ if (ctx->ipv4)
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ else if (ctx->ipv6)
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok)
+ hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK;
+ else
+ hash &= ~COOKIE_MASK;
+
+ hash -= cookie & ~COOKIE_MASK;
+ if (hash)
+ goto err;
+
+ mssind = (cookie & (3 << 6)) >> 6;
+ if (ctx->ipv4)
+ ctx->attrs.mss = msstab4[mssind];
+ else
+ ctx->attrs.mss = msstab6[mssind];
+
+ ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale;
+ ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK;
+ ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_handle_ack(struct tcp_syncookie *ctx)
+{
+ struct bpf_sock_tuple tuple;
+ struct bpf_sock *skc;
+ int ret = TC_ACT_OK;
+ struct sock *sk;
+ u32 tuple_size;
+
+ if (ctx->ipv4) {
+ tuple.ipv4.saddr = ctx->ipv4->saddr;
+ tuple.ipv4.daddr = ctx->ipv4->daddr;
+ tuple.ipv4.sport = ctx->tcp->source;
+ tuple.ipv4.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv4);
+ } else if (ctx->ipv6) {
+ __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr));
+ __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr));
+ tuple.ipv6.sport = ctx->tcp->source;
+ tuple.ipv6.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv6);
+ } else {
+ goto out;
+ }
+
+ skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0);
+ if (!skc)
+ goto out;
+
+ if (skc->state != TCP_LISTEN)
+ goto release;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(skc);
+ if (!sk)
+ goto err;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_cookie(ctx))
+ goto err;
+
+ ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs));
+ if (ret < 0)
+ goto err;
+
+release:
+ bpf_sk_release(skc);
+out:
+ return ret;
+
+err:
+ ret = TC_ACT_SHOT;
+ goto release;
+}
+
+SEC("tc")
+int tcp_custom_syncookie(struct __sk_buff *skb)
+{
+ struct tcp_syncookie ctx = {
+ .skb = skb,
+ };
+
+ if (tcp_load_headers(&ctx))
+ return TC_ACT_OK;
+
+ if (ctx.tcp->rst)
+ return TC_ACT_OK;
+
+ if (ctx.tcp->syn) {
+ if (ctx.tcp->ack)
+ return TC_ACT_OK;
+
+ handled_syn = true;
+
+ return tcp_handle_syn(&ctx);
+ }
+
+ handled_ack = true;
+
+ return tcp_handle_ack(&ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
new file mode 100644
index 000000000000..34024de6337e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_TCP_SYNCOOKIE_H
+#define _TEST_TCP_SYNCOOKIE_H
+
+#define __packed __attribute__((__packed__))
+#define __force
+
+#define swap(a, b) \
+ do { \
+ typeof(a) __tmp = (a); \
+ (a) = (b); \
+ (b) = __tmp; \
+ } while (0)
+
+#define swap_array(a, b) \
+ do { \
+ typeof(a) __tmp[sizeof(a)]; \
+ __builtin_memcpy(__tmp, a, sizeof(a)); \
+ __builtin_memcpy(a, b, sizeof(a)); \
+ __builtin_memcpy(b, __tmp, sizeof(a)); \
+ } while (0)
+
+/* linux/unaligned.h */
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return bpf_ntohs(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return bpf_ntohl(__get_unaligned_t(__be32, p));
+}
+
+/* lib/checksum.c */
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum)from64to32(s);
+}
+
+/* asm-generic/checksum.h */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/* net/ipv6/ip6_checksum.c */
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+ __u32 sum = (__force u32)csum;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ ulen = (__force u32)bpf_htonl((__u32)len);
+ sum += ulen;
+ carry = (sum < ulen);
+ sum += carry;
+
+ uproto = (__force u32)bpf_htonl(proto);
+ sum += uproto;
+ carry = (sum < uproto);
+ sum += carry;
+
+ return csum_fold((__force __wsum)sum);
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index adc83a54c352..e2ae049c2f85 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
@@ -244,7 +244,7 @@ static __always_inline void send_basic_event(struct sock *sk,
bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY);
}
-SEC("dummy_tracepoint")
+SEC("tp/dummy/tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
if (!arg->sock)
@@ -255,4 +255,3 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
index 678bd0fad29e..1ecdf4c54de4 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -14,10 +14,7 @@
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
-
-#ifndef sizeof_field
-#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
-#endif
+#include "bpf_misc.h"
__u8 test_kind = TCPOPT_EXP;
__u16 test_magic = 0xeB9F;
@@ -594,7 +591,7 @@ static int handle_parse_hdr(struct bpf_sock_ops *skops)
return CG_OK;
}
-SEC("sockops/estab")
+SEC("sockops")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 94f50f7e94d6..6935f32eeb8f 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -1,22 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stddef.h>
-#include <string.h>
-#include <netinet/in.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
+#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_tcp_helpers.h"
#include "test_tcpbpf.h"
struct tcpbpf_globals global = {};
-int _version SEC("version") = 1;
/**
* SOL_TCP is defined in <netinet/tcp.h> while
@@ -47,8 +35,6 @@ int bpf_testcb(struct bpf_sock_ops *skops)
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
int window_clamp = 9216;
- int good_call_rv = 0;
- int bad_call_rv = 0;
int save_syn = 1;
int rv = -1;
int v = 0;
@@ -62,7 +48,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
- : [op] "+r"(op)
+ : [op] "=r"(op)
: [skops] "r"(skops)
:);
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index ac63410bb541..ef00d38b0a8d 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -23,13 +23,10 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(max_entries, 2);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(__u32));
+ __type(key, int);
+ __type(value, __u32);
} perf_event_map SEC(".maps");
-int _version SEC("version") = 1;
-
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
diff --git a/tools/testing/selftests/bpf/progs/test_time_tai.c b/tools/testing/selftests/bpf/progs/test_time_tai.c
new file mode 100644
index 000000000000..7ea0863f3ddb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_time_tai.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Linutronix GmbH */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tc")
+int time_tai(struct __sk_buff *skb)
+{
+ __u64 ts1, ts2;
+
+ /* Get TAI timestamps */
+ ts1 = bpf_ktime_get_tai_ns();
+ ts2 = bpf_ktime_get_tai_ns();
+
+ /* Save TAI timestamps (Note: skb->hwtstamp is read-only) */
+ skb->tstamp = ts1;
+ skb->cb[0] = ts2 & 0xffffffff;
+ skb->cb[1] = ts2 >> 32;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
new file mode 100644
index 000000000000..cf0547a613ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../test_kmods/bpf_testmod.h"
+#include "bpf_misc.h"
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare_tp")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ return nullable_ctx->len;
+}
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare_tp")
+int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ if (nullable_ctx)
+ return nullable_ctx->len;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 4b825ee122cf..4cb8bbb6a320 100644
--- a/tools/testing/selftests/bpf/progs/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
-/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
struct sched_switch_args {
unsigned long long pad;
- char prev_comm[16];
+ char prev_comm[TASK_COMM_LEN];
int prev_pid;
int prev_prio;
long long prev_state;
- char next_comm[16];
+ char next_comm[TASK_COMM_LEN];
int next_pid;
int next_prio;
};
@@ -23,4 +23,3 @@ int oncpu(struct sched_switch_args *ctx)
}
char _license[] SEC("license") = "GPL";
-__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index f030e469d05b..7765720da7d5 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -1,20 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stdbool.h>
-#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct task_struct;
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(fentry_test, int a, int *b)
+{
+ return 0;
+}
-SEC("fentry/__set_task_comm")
-int BPF_PROG(prog1, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
return 0;
}
-SEC("fexit/__set_task_comm")
-int BPF_PROG(prog2, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int a, int *b, int ret)
{
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index a621b58ab079..32127f1cd687 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -6,44 +6,56 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <stddef.h>
-#include <string.h>
-#include <arpa/inet.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/pkt_cls.h>
-#include <linux/erspan.h>
+#include "vmlinux.h"
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_kfuncs.h"
+#include "bpf_tracing_net.h"
-#define ERROR(ret) do {\
- char fmt[] = "ERROR line:%d ret:%d\n";\
- bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
- } while (0)
-
-int _version SEC("version") = 1;
-
-struct geneve_opt {
- __be16 opt_class;
- __u8 type;
- __u8 length:5;
- __u8 r3:1;
- __u8 r2:1;
- __u8 r1:1;
- __u8 opt_data[8]; /* hard-coded to 8 byte */
-};
+#define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
+
+#define VXLAN_UDP_PORT 4789
+#define ETH_P_IP 0x0800
+#define PACKET_HOST 0
+#define TUNNEL_CSUM bpf_htons(0x01)
+#define TUNNEL_KEY bpf_htons(0x04)
+
+/* Only IPv4 address assigned to veth1.
+ * 172.16.1.200
+ */
+#define ASSIGNED_ADDR_VETH1 0xac1001c8
-struct vxlan_metadata {
- __u32 gbp;
+struct bpf_fou_encap___local {
+ __be16 sport;
+ __be16 dport;
+} __attribute__((preserve_access_index));
+
+enum bpf_fou_encap_type___local {
+ FOU_BPF_ENCAP_FOU___local,
+ FOU_BPF_ENCAP_GUE___local,
};
-SEC("gre_set_tunnel")
-int _gre_set_tunnel(struct __sk_buff *skb)
+struct bpf_fou_encap;
+
+int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
+ struct bpf_fou_encap *encap, int type) __ksym;
+int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
+ struct bpf_fou_encap *encap) __ksym;
+struct xfrm_state *
+bpf_xdp_get_xfrm_state(struct xdp_md *ctx, struct bpf_xfrm_state_opts *opts,
+ u32 opts__sz) __ksym;
+void bpf_xdp_xfrm_state_release(struct xfrm_state *x) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} local_ip_map SEC(".maps");
+
+SEC("tc")
+int gre_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
@@ -57,32 +69,52 @@ int _gre_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int gre_set_tunnel_no_key(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER |
+ BPF_F_NO_TUNNEL_KEY);
+ if (ret < 0) {
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("gre_get_tunnel")
-int _gre_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int gre_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "key %d remote ip 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4);
+ bpf_printk("key %d remote ip 0x%x\n", key.tunnel_id, key.remote_ipv4);
return TC_ACT_OK;
}
-SEC("ip6gretap_set_tunnel")
-int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
@@ -98,35 +130,34 @@ int _ip6gretap_set_tunnel(struct __sk_buff *skb)
BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6gretap_get_tunnel")
-int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip6 ::%x label %x\n";
struct bpf_tunnel_key key;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+ bpf_printk("key %d remote ip6 ::%x label %x\n",
+ key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
return TC_ACT_OK;
}
-SEC("erspan_set_tunnel")
-int _erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -141,7 +172,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -154,63 +185,58 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
__u8 hwid = 7;
md.version = 2;
- md.u.md2.dir = direction;
- md.u.md2.hwid = hwid & 0xf;
- md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, dir, direction);
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid, (hwid & 0xf));
+ BPF_CORE_WRITE_BITFIELD(&md.u.md2, hwid_upper, (hwid >> 4) & 0x3);
#endif
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("erspan_get_tunnel")
-int _erspan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
- __u32 index;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.version);
+ bpf_printk("key %d remote ip 0x%x erspan version %d\n",
+ key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
- char fmt2[] = "\tindex %x\n";
-
index = bpf_ntohl(md.u.index);
- bpf_trace_printk(fmt2, sizeof(fmt2), index);
+ bpf_printk("\tindex %x\n", index);
#else
- char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
-
- bpf_trace_printk(fmt2, sizeof(fmt2),
- md.u.md2.dir,
- (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
- bpf_ntohl(md.u.md2.timestamp));
+ bpf_printk("\tdirection %d hwid %x timestamp %u\n",
+ BPF_CORE_READ_BITFIELD(&md.u.md2, dir),
+ (BPF_CORE_READ_BITFIELD(&md.u.md2, hwid_upper) << 4) +
+ BPF_CORE_READ_BITFIELD(&md.u.md2, hwid),
+ bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
-SEC("ip4ip6erspan_set_tunnel")
-int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -225,7 +251,7 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -239,70 +265,113 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
__u8 hwid = 17;